diff --git "a/dask__dask-8801/docstore.json" "b/dask__dask-8801/docstore.json" new file mode 100644--- /dev/null +++ "b/dask__dask-8801/docstore.json" @@ -0,0 +1 @@ +{"docstore/metadata": {"/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/conftest.py_pytest_": {"doc_hash": "06c1266131fdc08030619e9f494e8cf9270810c1f1feed973c5de8aa00929546"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py__This_file_helps_to_comp_register_vcs_handler.return.decorate": {"doc_hash": "a9f6a9280080bbbf1afad704657c0f07a93a80f5d7662248cace4d37da6a4895"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_run_command_run_command.return.stdout": {"doc_hash": "b157b933d06e25048e7c010f8205e9ca015bfa46dec43fa470be689123c9c7c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_versions_from_parentdir_versions_from_parentdir.return._": {"doc_hash": "7b907cfcb72231e1177b03b38d56920da01861d319db6838294b786846021aed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_get_keywords_git_get_keywords.return.keywords": {"doc_hash": "0e2d4894bde7f31b60c35fcc899f3c920720c802e3bcd56f114da1fbdc9a6e7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_versions_from_keywords_git_versions_from_keywords.return._": {"doc_hash": "16c168340a85e808da24455c3cd0a0f51a638d282735d3abfeddee949bbaf9bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces": {"doc_hash": "d6b5faddac73562d512fd5f71f0ef8c0acd98529239b0eabd68f2607ca761cd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_plus_or_dot_render_pep440.return.rendered": {"doc_hash": "e1361e9e7f31292c8fad2b4c78dd5d7ddab63fe3edf6b2d21efd350057f9605b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_pre_render_pep440_post.return.rendered": {"doc_hash": "f736ff4dfea0243dfac2ff609c0191dbec7db316734bbc4bdf6b9efb3a5a7b2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_old_render_pep440_old.return.rendered": {"doc_hash": "af126064d117fdaf3a0c47991d6e68ef56304322e9b41c39523dd2abe0d2e44c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_render_git_describe.return.rendered": {"doc_hash": "4f8656d406540ec09d958a32cc7ecc81f7f5de84721313016b0e228499a1f49c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_long_render_git_describe_long.return.rendered": {"doc_hash": "9d44c78548b4e9d91f54839e47594fa0bdc73dc7197d4bf6e7085ba0ddf78294"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_render.return._": {"doc_hash": "804fc381aff372ee4cc53a4951345e67223e3259693c49bd34fef69e048787ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_get_versions_": {"doc_hash": "81d079ebbe56db63a6cc14fca7ecf6573363f7b663d1906af58a0aa7cfa01a3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/__init__.py_try__": {"doc_hash": "6f7f3beb6878526decd40241c6752a5320ce2b6cda5f741637c5da0a3c25a55f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_numbers_blockwise._Tensor_operation_Gene": {"doc_hash": "03c6cfd9897930ad9ff6586a9698b4b9914580d6af9eb67877121ce737a66374"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.out_blockwise.chunks._chunkss_i_for_i_in_out_": {"doc_hash": "674b18130415e38cad51d1614aca960d6c1110d86ab0a559a9dc2958dc3eaafc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.if_adjust_chunks__": {"doc_hash": "430f8bf19bba79ed0d96312bd8fa17e110226e7565a6eac955b977e82b8549a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__A_set_of_NumPy_functi_keepdims_wrapper.return.keepdims_wrapped_callable": {"doc_hash": "a7e7f78b43a447b7d3928083702e6d60cee75d30bb27053981398bdf0a8a9587"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__Wrap_NumPy_functions_to_None_2.nanstd.np_nanstd": {"doc_hash": "1b0ba8b0fbc91605ae4bc489193098c153dde7b623b39503397f13a6dc735b81"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_coarsen_coarsen.return.reduction_x_reshape_newsh": {"doc_hash": "23916e8106631d93edcaf26f1460e0af90847ab9397cfe5cfa39cd72b5444b59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_trim_trim.return.x_tuple_slice_ax_ax_if_": {"doc_hash": "84535f708ca9434cbf0cd87b3b498e25a7e4936fc2946d0ffd5d81dc937f217b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_topk.return.a_tuple_k_slice_if_i_a": {"doc_hash": "71afaa6839afc79a5051bd7f081c355d321a0ac0c5ead5c400435787117752a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_aggregate_argtopk_preprocess.return.a_idx": {"doc_hash": "aa30a20d30cc3a044a3dc089547b61afd7c21887439e31b18138b2496da488b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_aggregate_argtopk_aggregate.return.idx_": {"doc_hash": "2369a2d083f4dc582d9e8cb9ddae666b9d21d30c334eb84bc8af5b813243d42a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_arange_view.if_order_C_.else_.return.x_T_view_dtype_T": {"doc_hash": "76d64c305fe9c2b982184e8ba91029ba062e573e1cca1971018d2e13dac356c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_idx_if_i_axis_": {"doc_hash": "ee4e0481650e62bb852d096c799bdb70bd51abc59d86fbdb28d7b89fcefb9d39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_try__": {"doc_hash": "1dd2774efe8c9e7d70d68ca7f4cfc7ebfcc0eda2b7281f018f7f5a07af2a9f70"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_slices_from_chunks_slices_from_chunks.return.list_product_slices_": {"doc_hash": "18afbf357bc75cd9f280170142b2655557936c8f27692ec888b638434ac21fc0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_dotmany_dotmany.return.sum_map_partial_np_dot_": {"doc_hash": "1b11393f1fc976c7fabff4fa5fb3e956cbb496694fd4b6e09a15cc4655c84b4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_apply_infer_dtype_apply_infer_dtype.return.o_dtype_if_nout_is_None_e": {"doc_hash": "559ed6ba45cc9a5d0dbd66b59b06339ea7a0dc28979d09d702098a5e0dc1afda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_extra_argpairs__map_blocks.return.out": {"doc_hash": "4fa4969bc26460a93b279b7b708a9824ab5a60be97ca25b79769fd81f1406e74"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_chunks_broadcast_chunks.return.tuple_result_": {"doc_hash": "9b48897b89882baf297c720626a5398d74caa4f6fa35a00ef6efbe9bc9e38c4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_blockdims_from_blockshape_blockdims_from_blockshape.return.tuple_": {"doc_hash": "46adcc3b2b73f7050e0e2836fb3ea893c13aac0d4b13bac0ba9d254951ff32ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_finalize_CHUNKS_NONE_ERROR_MESSAGE._": {"doc_hash": "c21ed13b65bd2158af53778c83af62be0fbf0380f9a12d78f7122cbc30fbf82a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__new___Array.__new__.return.self": {"doc_hash": "8b1183cd19d625b7450d88affa7845dcf1ae8aa691af6991edb8374ae34dd5ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__reduce___Array.__dask_keys__.return.result": {"doc_hash": "69e932fd86068971612192f0bf0871d8dd6158cacdb41ba9b46a1f3092e24335"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__dask_tokenize___Array.npartitions.return.reduce_mul_self_numblock": {"doc_hash": "31cf42ef9224541ff775642064ad823e33a44bfa03fb910f3d1fc93740bd1ada"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.compute_chunk_sizes_Array.compute_chunk_sizes.return.x": {"doc_hash": "e174c71413a1bbbb7822b1815c4ca61ca4bca0a14fdf4ebd490a929f015aecb3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_ufunc___Array.__array_ufunc__.if_method___call___.else_.return.NotImplemented": {"doc_hash": "697e4c23ea68e9f86712dffd00af0fe3bd09463eb9af871dca0fc4618de5dc3c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.ndim_Array.__array__.return.x": {"doc_hash": "0e7d580dfa1d1309b5f91cd136b5d722995014ffb90e709aa2f74b0e1aa6ac69"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function___Array.__array_function__.handle_nonmatching_names.return._HANDLED_FUNCTIONS_func_": {"doc_hash": "cbfdf4355d89bc71bccf8979bb7530fcbd73c8f977f28383dcbf464813b4fab7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function__._First_verify_that_all__Array.__array_function__.return.da_func_args_kwargs_": {"doc_hash": "6b6f16ffa0c7540feb2fd7403622f9ac90c44c443365966e8e78ec51186e855d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._elemwise_Array.to_svg.return.svg_self_chunks_size_siz": {"doc_hash": "5f9047fc96a4fd7a4173d4030a04e0cae87158752eac73bc29663f770130287f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_hdf5_Array.to_hdf5.return.to_hdf5_filename_datapat": {"doc_hash": "7a4b1f6f1dca4965f42c0943e1e41d58cabcc3327ae6856eedee7f9d2a7ad178"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_dask_dataframe_Array.to_dask_dataframe.return.from_dask_array_self_col": {"doc_hash": "eb41ea798375e7649dd5b4b1a703744197109a6d88af956785e4857ee8e0690f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__getitem___Array.__getitem__.return.Array_graph_out_chunks_": {"doc_hash": "43ae4f84af25464b64f707b30d2bd4ce577b6f4389d0ec766c24ad45b39edd8e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._vindex_Array._vindex.return._vindex_self_key_": {"doc_hash": "becb90a50266ea6383e3132a507bf19b51f41deaee54aa92e525cb6eb35cecb8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.vindex_Array.vindex.return.IndexCallable_self__vinde": {"doc_hash": "3d8f85e1aea07f5f36e186ef8f848cac68ff176a1b43c2ffaf6243b163a58d0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.partitions_Array.partitions.return.self_blocks": {"doc_hash": "da6776b686a413f8665c1c115389eda61881330d0e7875380c33152c3bc05ed7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.dot_Array.argtopk.return.argtopk_self_k_axis_axi": {"doc_hash": "ba290bc562bc6f77645ef3e2c7373ba8e472ad76f25796e29e52378ff33ecee6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.astype_Array.astype.return.self_map_blocks_chunk_ast": {"doc_hash": "5304e8f9951fa84dffdf45be7c6a6380e6ed18ad3d7ed6d58b05b80da6e8fc78"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__abs___Array.__sub__.return.elemwise_operator_sub_se": {"doc_hash": "b7ae628ef4abf6d37c1838c74476974d7e757fa95ae6c90c895bc18ebd1975e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__rsub___Array.sum.return.sum_": {"doc_hash": "e65e83a5bd120314dbe554f05fdb3e7566bbc66901eeef1e42816ca593b8c1bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.trace_Array.var.return.var_": {"doc_hash": "25bada816b1ee2c4c88f7b07df4e857926240efd2412081fc50119438736a90f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.moment_Array.map_blocks.return.map_blocks_func_self_a": {"doc_hash": "2c7595af891dbccf4cb83ae113a854aaa76f3a79ea6b899dc11528624d1a0c2a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.map_overlap_Array.map_overlap.return.map_overlap_": {"doc_hash": "0ee97c4b64c11dfbbd10299f03a0fe9b92e0f03c3ab54ebfef24170fd8629460"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.view_Array.view.return.self_map_blocks_": {"doc_hash": "5aa2687e75b00cd93c3a52afe86ce0402492335dd844369c5d270f12b76f1349"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.swapaxes_Array.__deepcopy__.return.c": {"doc_hash": "a56294942f1efd0fbf1b0cf0e424d76db487ab2c009f3e8ee84b2e383bd691a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_delayed_Array.to_delayed.return.np_array_L_dtype_object_": {"doc_hash": "ef5bb99fe4c792751c03a8f28c67bcef6c14f731e31b65df4dbc9c2481bfe66c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.repeat_ensure_int.return.i": {"doc_hash": "4f0700d45ac0ef2ec21ead7677d99b47160b00008957a031b6f6197731ec79dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks_normalize_chunks.if_isinstance_chunks_lis.chunks.tuple_chunks_": {"doc_hash": "0ea87a17a460c85521e087383211ae89eaa011f9c5046e4bf740ca07177bae56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks.if_isinstance_chunks_Nu_normalize_chunks.return.tuple_tuple_int_x_if_not": {"doc_hash": "e5ffc23fc30076b0660e3cb447523a8f3ba9d6a63642b48387931c5b27e5d246"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__compute_multiplier_auto_chunks.largest_block.np_prod_": {"doc_hash": "7f039f6a0bc20e7a9344e315a8f05edfa179b2b136053e108c66d4bf1c585ed6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_auto_chunks.if_previous_chunks__auto_chunks.if_previous_chunks_.else_.return.tuple_chunks_": {"doc_hash": "1005b2bfd9620564268d787afe125008250fc52bc792544b88282ea3c7fd5504"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array.if_isinstance_x_Array__from_array.return.Array_dsk_name_chunks_": {"doc_hash": "901921b71e13c52de8ebbf29672776decdd0673228556df433298b58cbcd0b90"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_zarr_from_zarr.return.from_array_z_chunks_nam": {"doc_hash": "6a4993fdb9ade6d46edb2e559e68136c3e3750856f7affaf1179d3af7c1987f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_zarr_to_zarr.return.arr_store_z_lock_False_": {"doc_hash": "34088a0df05de3ac072be8e8e0150c3579ecb0def61a0a27e64f35b09ba4c14a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__check_regular_chunks__check_regular_chunks.return.True": {"doc_hash": "62e09a0117a1209e5c4f2feb69b55cd4f82c0d9bfcfd36bc0a8bd1afe3b9902e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_delayed_from_delayed.return.Array_graph_name_chunks": {"doc_hash": "3a8aeb62dda65117d3dae67d5768c47f2565844bdcf416da6647957dadd46fd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_func_from_func.return.Array_dsk_name_chunks_": {"doc_hash": "51ec22c7c6a1859e7b202dbc6717c0e0c8ecf6985df46bc6a8980e8de3143c20"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_common_blockdim_common_blockdim.return.tuple_out_": {"doc_hash": "460c7bdf1221fdc41009874e37dc71bdab41d47f752992ebf7359244de7d40d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block_block._": {"doc_hash": "91289085cb4d44868b2a0c0999760f5455c617c5712b01dfa534b3cb02e57272"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block._This_was_copied_almost__block.return.rec_map_reduce_": {"doc_hash": "15b501147ecfbe9d5e927d43bae73d6c20f68c0356743620c4f69cf5064ab359"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_load_store_chunk_load_chunk.return.load_store_chunk_None_ou": {"doc_hash": "bbf5936156121fbd93d4347e8d1fd4bf1a45e140b5447268522f3a4317d5c47b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_insert_to_ooc_insert_to_ooc.return.dsk": {"doc_hash": "41451a40f546e24358029a57d392def73befbb14137249a0e963b171860851a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_retrieve_from_ooc_retrieve_from_ooc.return.load_dsk": {"doc_hash": "4f525743f2ad71c448c21896cf18b6236369065cf5ad3be99eb294472a11a2e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_is_scalar_for_elemwise_is_scalar_for_elemwise.return._": {"doc_hash": "ec02f31babf9b764705384928aff761c9f8f4646a26190a8b7c96ec3ece211b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_shapes_broadcast_shapes.return.tuple_reversed_out_": {"doc_hash": "2609355c41c8213c946bcf5fd265723c5a4ae61d7f89d29c3081a3a2ddb756d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_handle_out_handle_out.if_isinstance_out_Array_.else_.return.result": {"doc_hash": "5b60b92ec4f803fa3611fb6f1c755d7d27ee3f4e1cb0f09fe2bb90633ce21599"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__enforce_dtype__enforce_dtype.return.result": {"doc_hash": "69be4a9567201ba6151024d8de494d0eeb5195f64459455450e0591a12d4a8cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_to_broadcast_to.return.Array_graph_name_chunks": {"doc_hash": "32d095d7edcf8bd07d03de527c23ebd7fe88af22667809bc322519523415c81b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_arrays_broadcast_arrays.return.result": {"doc_hash": "70219fbea4552323155d66626b6989864612d07e02b90a7819ab205a4ccd12a7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_offset_func_offset_func.return._offset": {"doc_hash": "d0a443b598efcb78e09afd4a3a4f2e053e7716bd6e0f7f014ac6a9bb4978cf17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_chunks_from_arrays_chunks_from_arrays.return.tuple_result_": {"doc_hash": "af0b55adc669ffee2634882267a15fbb5e313ec30ead0c0adb53e7a3f03de27b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_transposelist_transposelist.return.reshapelist_newshape_res": {"doc_hash": "88350e3382500f29b8081db2f1dfc9660f5b297fd0d63230250d114e88c5c11b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack_stack.keys.list_product_name_ra": {"doc_hash": "7d78412948faef6e346945baf410c8a3ddd1476e702d56c927b510c9edbb7585"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack.inputs_stack.return.Array_graph_name_chunks": {"doc_hash": "a2fc20ccade92de61012a9e79e72bafc37ebbc3dc7c3e59671ef7c512db2d242"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate3_concatenate3.return.result": {"doc_hash": "8c91ee026ecf18a95519d8922f23d9f546b97b974f57adbb53cd990fe143121f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_axes_to_hdf5.with_h5py_File_filename_.store_list_data_values_": {"doc_hash": "84db0f39a4e92c4802a39428724eed3b22bde26f7cbae4a8f855bae27c49ce13"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_interleave_none_keyname.return._name_i_tuple_k_for_k": {"doc_hash": "e222a54e46cb41333517d313287b1b95c148946fb732102cfc19cc0664f5b2df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex__vindex.return.x": {"doc_hash": "71cddccbc557f4f4468f2b0c39e7de41073ed31d90f579194d37ca401b092089"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_array__vindex_array.return.result_1d_reshape_broadca": {"doc_hash": "9d4f8e7e256852c79878866c59a152e815fea4905967aa19912013edf590d4c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__get_axis__vindex_transpose.return.block_transpose_axes_": {"doc_hash": "ea22ca673235966cf26abdf3a1d5cea9bf76a22fe30035c0310ef048f24df410"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_merge__vindex_merge.return.x": {"doc_hash": "d982ce71f9c0eb3a80dba144d6edd49c0c91801b16351e52d8794e5786e43f48"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_npy_stack_to_npy_stack.compute_as_if_collection_": {"doc_hash": "f47c4134bb5b4721e67b6c8d308a2c38df4e5ec6cc7feb2840d605fdba0b8814"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_full_like__get_like_function_shapes_chunks.return.shape_chunks": {"doc_hash": "1e61ccae8215c97887dd67eadc1997a8ea575182d8e1b2633886bd9271510fc1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linspace_linspace.if_retstep_.else_.return.Array_dsk_name_chunks_": {"doc_hash": "4c127382d4bd26a0a2e703a2432e3078e0c4d665ed0d3fd7d3b6b442ba1949f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_arange_arange.return.Array_dsk_name_chunks_": {"doc_hash": "7e953fdb8a0f99a2df15f620eff72b57040dffa3336203a2730203cf747e0f3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_meshgrid_meshgrid.return.grid": {"doc_hash": "24b193083d696a96e58db00d7231952d6d000b68829931d76f9fa472e3566b30"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_indices_indices.return.grid": {"doc_hash": "d6d45a6e39744ad5b138d3dcd8c0987e34b8b8afe86e7aeb7dcbb7d6cd8ed922"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_eye_eye.return.Array_eye_name_eye_shap": {"doc_hash": "b1f4a97432a68f6a235b078343a539b55ea02c2850fdb2e991077bb76900465a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_repeat_repeat.return.concatenate_out_axis_axi": {"doc_hash": "a3f80e3a43e12a5df28cee0122038f2bedb01eb9f505a4de10b3c07db74ddeda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tile_tile.return.empty_shape_shape_out_dt": {"doc_hash": "1cba7ecaa196c602993fae546e1764a12057516228862414925a1bc5e3167879"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_expand_pad_value_expand_pad_value.return.pad_value": {"doc_hash": "44bb4ed537076573a268d1c8bf850ccde603fcc5b01cf10a5ba5a41696d0fd4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_get_pad_shapes_chunks_get_pad_shapes_chunks.return.pad_shapes_pad_chunks": {"doc_hash": "688c2f17169cf1421eb12ca6a6ae92845e43da52e1e1d05e2f96753ea309c5e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linear_ramp_chunk_linear_ramp_chunk.return.result": {"doc_hash": "2351c45a545d50801c602d9ce3673f5129d71d7809ae2fffa2891691bae80fd8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_edge_pad_edge.return.result": {"doc_hash": "9e51db4cc0166034006610d8275af823cd6f76ed9804d843b5aded4eddd9bc54"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_reuse_pad_reuse.return.result": {"doc_hash": "65aee02fd8fc1cd3cb44a71386947f373fe6f11861852045bd70690ff76197bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_stats_pad_stats.return.result": {"doc_hash": "9d0a2b55695c34ff20feb8b288076b6baffaffbacbac2c1114825719770a8f8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_wrapped_pad_func_pad_udf.return.result": {"doc_hash": "82d26e729e23eed211c63a008f3f22a1b89592d81ccd1c5256be63c80d4e44da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_": {"doc_hash": "d483d8acb6f95905c6fbf1c9cf9eced8e40761c62e6755cb796c88f2769b1d0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_np_chunk_einsum.return.chunk_reshape_chunk_shape": {"doc_hash": "b8251e755cf557d26a61cc5b1f9bc3f8068a4350be748ebd4e5609284d7cf5a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py__This_function_duplicate_parse_einsum_input._Parse_ellipses": {"doc_hash": "b095f157a7a5b966b7d37116832675b7443f11018cc29997c230bc0c0bb06a67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_parse_einsum_input.if_in_subscripts__parse_einsum_input.return._input_subscripts_output": {"doc_hash": "42068a064a2dac2460eb50d5adf2f0b4ed792cbe5f2ea88a2c40dcb362ed9edb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_einsum_": {"doc_hash": "08b527872c59417ee1897e0a36d4c6d08d0587deeccd860ec261936e82e60241"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_inspect__hfft_out_chunks.return.chunks": {"doc_hash": "b78546131ed537a1bc9f977d8465a20edc38be0c76297f169521e47085d0bcd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__ihfft_out_chunks__out_chunk_fns._": {"doc_hash": "3cabb6a98edf8851f793a3ab292e0b49280562e8a3e8d53bea286ef5c225496a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap_fft_wrap.try_.except_KeyError_.raise_ValueError_Given_u": {"doc_hash": "b684f1a42ce86baa06271855f67d876405948f0ecbed9adeaed46b0fc6bfd04e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.func_fft_wrap.func.return.a_map_blocks_fft_func_a": {"doc_hash": "b5058979c84ce548343c9cd322b80c7c334f3a78fcfc8bd4b95487aaeb91b94a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.if_kind_endswith_fft__fft_wrap.return.func": {"doc_hash": "0c91bb22150426ad31a53e18a2b957b528e59dbe9aea0b0ae4ebf3e5a0d5c5c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_rfftfreq.return.r": {"doc_hash": "05c5e884839e760cbfb490e2f19a5bb43b7ee63078298f840fbeefc8047c6fc3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__fftshift_helper_": {"doc_hash": "7275be591d823941f88c8bea39390b12b92eb61d8acd77dc6c5afb46b563df19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__parse_gufunc_signature__parse_gufunc_signature.return.ins_outs": {"doc_hash": "060e5adefc62247832bcec349e064e3846853fc4b347f14fb79f85aa41901a4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes__validate_normalize_axes._Assert_we_have_as_many_": {"doc_hash": "e959df10eb023926a92b75a8cf7a5abb2a8af76bda5e64b4cd695dc3ab60b3e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes.for_idx_iax_icd_in_en__validate_normalize_axes.return.input_axes_output_axes": {"doc_hash": "261db20631488665c7fadd7e551c249d33912b503e2f473180af76e1be4a7b63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc_apply_gufunc._": {"doc_hash": "99b77a84d7a8b60ac05a76f161e6bbd565c96ed337ed2d7c1db6d8180ea678ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc_gufunc._": {"doc_hash": "5c204e5f1219199970f96286f835711c46e3e9cc988590294e4ba850fcab1b3f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc.__init___gufunc.__call__.return.apply_gufunc_": {"doc_hash": "4b439d90654c78014efc4eb9b78288715d8b62553ea13899c653bfbcffd1ed38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc_as_gufunc._": {"doc_hash": "98536c3b13d52fbb1f0758477a625782917bab25c5d6335d9daf8384d2d29914"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc._allowedkeys_": {"doc_hash": "978982205b4bd494fede0dc8fdee92d0febc54c5935a9c1f86b8b68ba62e662f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_operator__nanmin.return.k_1_if_np_isnan_k_0_else": {"doc_hash": "0e00e098c88ebd869690bb4232ad7527e6952acaaf4e7753bcf9920f6a4b8189"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__wrapped_qr__wrapped_qr.if_a_shape_0_0_.else_.return.np_linalg_qr_a_": {"doc_hash": "66767e45a752400e6f7e0d9ff7ce4b720a873f9a7f99d48c9e72567de8de74d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr_tsqr.layers.data___dask_graph___lay": {"doc_hash": "0f71b61dcf0c16c461769c1448d2e3af13e88cc582a2906f6883a19b80c974c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.dependencies_tsqr.can_distribute.chunks_well_defined_and_i": {"doc_hash": "e70c1c41291186f0c652ec44f9393547300bd26f7a8638ca3ef431c311e52432"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an_tsqr.if_chunks_well_defined_an.dependencies_name_q_st3_": {"doc_hash": "fe4ff8264193450eef2196455dd98afaf8022307338fb031ee16a1285ba85150"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an.else__tsqr.if_chunks_well_defined_an.else_.dependencies_name_r_st2_": {"doc_hash": "36ce786291ad6a0eb6ac9fc8299eaa255548d17ca0938d4a46b92de92793d014"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_not_compute_svd__tsqr.if_not_compute_svd_.else_.return.u_s_vh": {"doc_hash": "e06859b7c3af91cb855226fd7b6d604ed7a8fa4a831f577d28e0e15956097273"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr_sfqr.name_R_1.prefix_R_1_": {"doc_hash": "1bb319eb1a19287783eb276a2721124eda02f4559feded56e0b5bec493155047"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr.layers_name_Q_R1_nam_sfqr.return.Q_R": {"doc_hash": "788b23b9aec0cd083f945fe3e7bab72833be465645222dd8734e944be30cccbf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_level_compression_level.return.min_max_min_subspace_size": {"doc_hash": "ee1b761e710f8f771cedf20db5c9aee772e76638521dba32bdb8cf16efc7c952"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_matrix_compression_matrix.return.q_T": {"doc_hash": "9d8cc9229845fa8b60dd1ca2859e9e945058fabc60bc17c7b6650b203efcd03d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_compressed_svd_compressed.return.u_s_v": {"doc_hash": "fafa8c8d1960b78f97d0d61d7a9cfac6b79debf753ec63f59da8a836cc3d78fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_qr_qr.if_len_a_chunks_1_1_.else_.raise_NotImplementedError": {"doc_hash": "ca6d7768a8bde88e3a1d41e545a258ee7de224cc99dc820360c281938634b378"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_svd._Single_chunk_case": {"doc_hash": "1d9cb1992f16e84a5d714e2b34903f2b59aa71a5ff489ca934e3128cf8e8fb8e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu_lu.dsk._": {"doc_hash": "fd65d8e30773526ea65b831e0190f044246b2c10caf3047a93c52000b60d5661"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.for_i_in_range_min_vdim__lu.for_i_in_range_min_vdim_.for_k_in_range_i_1_vdi.dsk_name_lu_k_i_": {"doc_hash": "381d34c91b0a7c5344df478730c2a757590cefe907905b867086afcc9f256789"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.None_4_lu.return.p_l_u": {"doc_hash": "e0667e5516ab6a537b4b1aacc123f966ee3e3180285a4710b4cc9f6628f95e3c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_triangular_solve_triangular.return.Array_graph_name_shape_": {"doc_hash": "11b001b7c14fa17ceed8d0e768f8dcb7810f5618127b13b4806da11a01f3d049"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_solve.return.solve_triangular_u_uy_": {"doc_hash": "fdf0c4c1eaa1a711b6462508f1b4892ba35fd5df7c901a067f5898e53af139cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__cholesky__cholesky.return.lower_upper": {"doc_hash": "66ed60407236404d6cf4300bdd48514dea278a59e1fd13655effef9f2e8cfb29"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_norm_": {"doc_hash": "0b3ea2f48799a1dbe6ea799175dcf1e43b2dfddb8c9b82a86460734083ccd004"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_where_masked_where.return.blockwise_": {"doc_hash": "78cf43f154acc5082dbe99f792dc6ac02fe57f8b9a6c4842f3011537e1dabf56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_values__masked_array.return.np_ma_masked_array_data_": {"doc_hash": "20d2019b37306542905f761957107a7e7da45fce0561767d151f260db184b962"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_array_masked_array.return.blockwise__masked_array_": {"doc_hash": "6eaa9e2b0385cfbdabe6eaaa9bd9d58087968d2f0b7668d1617f71b657a2fca9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__set_fill_value_": {"doc_hash": "8f5999f6a5c1d60530b0dff7daaf2756df9a094656bba0067b789a0159153097"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser__Recurser.map_reduce.return.f_x_kwargs_": {"doc_hash": "f61f48d5232624880b433e6fb770da0fa1c468c1141e076c2f1b3697ad4fb7e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_taken_di_moveaxis.return.result": {"doc_hash": "5cd02b093a23df15d738013998fd1b03ec7b3ac8e043611d21d19f783c66ffbb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_from_itertools_import_zip_GETNOREMOVE._getter_getter_nofancy_": {"doc_hash": "bbabbdd6853bd40d99502639b4f636eee8f0e2501772596ea847cdbfbf358a67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_hold_keys_hold_keys.return.hold_keys": {"doc_hash": "cfa2971852f7b932684b2f00f811a16f1669e7812d136ade3aba1d4ebc5a0ab0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_slices_optimize_slices.return.dsk": {"doc_hash": "99df1a6d6242c0eb66dc536907331d396221dacb6cba11e7f27f4dea09c0d643"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_normalize_slice_check_for_nonfusible_fancy_indexing.for_f_n_in_zip_longest_f.if_type_f_is_not_list_an.raise_NotImplementedError": {"doc_hash": "6650f3f814bd3e5702b028566bc574b21e556ea1ed194fb9a301f60354582d00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice_fuse_slice._and_newaxes": {"doc_hash": "0ebe26b90504be091a55a5bc528455cbc55e8ce757f9071f8c602b309c33d4a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice.None_8_": {"doc_hash": "4a7c8739a0263cfa7cfb8168ce6d2f18dec7f00c28d10bbe1e7c2d96f42fc7b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py__trim__trim.return.x_ind_": {"doc_hash": "5f67b416d011267cebbcb439ccc1a48c7a125e452702dba107e6d9a6dfb643a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_periodic_periodic.return.concatenate_r_x_l_ax": {"doc_hash": "4c7894cdaf701b7de8f47f2ae92df4f43b1cdc274f9ba99b00d406ace22e35a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_reflect_reflect.return.concatenate_l_x_r_ax": {"doc_hash": "eb59e04f9e63091d05b162216497bcf91340ca59958fb8a8835b852f8391a1e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_nearest_nearest.return.concatenate_l_x_r_ax": {"doc_hash": "4c43ba64cf299316e3786d2414c9d092d03a55b3277fcd6abf5a3eae2f0fbef6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_constant__remove_overlap_boundaries.return.l_r": {"doc_hash": "c1494e55a8e3d0547a52b5381ad23e69db4f0b2177e956cb8c1a14a1a95198d8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_boundaries_boundaries.return.x": {"doc_hash": "e951d9504d4e042ac294e78174ffed00de266cc6f27f41b0e4f15753596a9e9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_overlap._Share_boundaries_betwe": {"doc_hash": "924403a710dbad1baaae0c6509f5c8e8c0e20eb72af848e38ad45376016356a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap.depth2_overlap.return.x4": {"doc_hash": "ccd11a9174da8c0c56cf378ba369333c43f2c203eb02dc5104201cdfe1416898"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile_percentile._Allow_using_t_digest_if": {"doc_hash": "4d6e83aed532026ff737b018930de4bd667fa00faa101c078c845735bc7ae725"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile.if__percentile.return.Array_graph_name2_chunk": {"doc_hash": "eba5cd902920d43768434895e1b10428cb4e2842900f9f71785bc91d49aa47fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._derived_from_np_random_RandomState.multinomial.return.self__wrap_": {"doc_hash": "f792d37644bc29d05fbee9b56fd3ddab5da73631bec103824320e882b0e8323a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.negative_binomial_RandomState.rayleigh.return.self__wrap_rayleigh_sc": {"doc_hash": "554831d6512f900fa3af10433b9e7463c0c6ebc9192ce75e387fc3dc06772c82"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.standard_cauchy_RandomState.zipf.return.self__wrap_zipf_a_siz": {"doc_hash": "911e5765b7eb8b502b510d04cae286813bde202bbc82d16fdd623995a0ee543c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py__choice_": {"doc_hash": "d8f5ad828565596aec9b513ad94dc11c6af7f3b7d16761bb62ab484549c2d39a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_cumdims_label_cumdims_label.return._": {"doc_hash": "f889d58b1de136d1a222328934d259b62cf622d9749c8e9b6d16c16b8996a2fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__breakpoints__breakpoints.return.tuple_sorted_cumold_cum": {"doc_hash": "e6a9503d31d480dd619c069b5e65ac502192d9a5e3069c04694d4eb285c65ab5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__old_to_new__old_to_new.return.old_to_new": {"doc_hash": "aa674b1c25f216f11a72114a6e9cf584d586880efc3ce196e328ca907d4da7f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_intersect_chunks_intersect_chunks.return.cross": {"doc_hash": "825ef9e652323d0fe498d70de354808569953a9804f554dc69e7c17486aeadbf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_rechunk_rechunk.return.x": {"doc_hash": "003f5646cb36c5b4d8011955ffcef51937853b048a565deff1ecee496216583d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__number_of_blocks_divide_to_width.return.tuple_chunks_": {"doc_hash": "178b18f76dfae2dbb770cf0ec23437484a3eb40f7611cbe909f20785d8561133"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_merge_to_number_merge_to_number.return.tuple_filter_None_chunks": {"doc_hash": "fc82712635db2a3998679336f7909c98e07ab00d6db39dcafe9bafd5432a8f76"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_merge_rechunk_find_merge_rechunk.return.tuple_chunks_memory_lim": {"doc_hash": "d23e3e8950d874ac648b213214cbaf5c7334eb548351eb9a30f560c45a3258eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_split_rechunk_find_split_rechunk.return.tuple_chunks_": {"doc_hash": "eec737ad37d33000372fd1e9baa1a2b2b937ff8bbdfc4a607181f672cf6f1844"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_plan_rechunk_plan_rechunk.return.steps_new_chunks_": {"doc_hash": "7250fdc29fc483ff6988ab9cfb39731ee823b2e338442afec33fb0feb900f407"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__compute_rechunk__compute_rechunk.return.Array_graph_merge_name_": {"doc_hash": "a7703de2efd69af820afdd2d33f109a1cff3c1004bf383fd75d95494d104c1a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__PrettyBlocks__PrettyBlocks.__repr__.__str__": {"doc_hash": "8a6e3fae04d3446f32bd9d36ff7e04912031c29b546f8a2a69ca7082da0e7dae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_blocks_format_blocks.return._PrettyBlocks_blocks_": {"doc_hash": "619c36af4b482a1cb62a26975b01972d2ac401bcfe8c047f663251b30a90b966"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_builtins_divide.return.f_a_b_dtype_dtype_": {"doc_hash": "067376d079b849ce86a81f6ef1ccf8aafb59b9b58cfd67d4738f52f55aa7db02"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction_reduction._General_version_of_red": {"doc_hash": "f8f69996dff788eb92f598eeec534823220980578f9e016c912d4b57baec598f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction.if_axis_is_None__reduction.return.handle_out_out_result_": {"doc_hash": "383770ba1164e870f5a9175f483b2609d147784a2c0ee90c607e91ee14b6f3e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__tree_reduce__tree_reduce.return.partial_reduce_": {"doc_hash": "209d5e4a32a5f1430e6c754d36bffe54ba916231dea7e25c6a5df4524cdbe235"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_partial_reduce_partial_reduce.if_np_isscalar_meta_.else_.return.Array_graph_name_out_ch": {"doc_hash": "261026916f1056f39eaab974630ae04edc3c0e68764c3519fa7546e86bd4748f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_sum_prod.return.reduction_": {"doc_hash": "c6b56ec8d61581dd41ac36f1b6c27092822114b0a009f3f49ed509f3e14950a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_min_all.return.reduction_": {"doc_hash": "b5568de5b8af54179040b7cf9ac24a5918976772eacb1edbd28488a5739d142d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nansum_nansum.return.reduction_": {"doc_hash": "3f10e7bfc5c67b589aa854f94da031c25a6db535b193123ec087610c5957cc4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nannumel_mean_chunk.return._n_n_total_total_": {"doc_hash": "ad70fa0e9ecbd8fae222925b1cc4e8cb526bc5dddf0cd08606211a703f639e0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_combine_mean_combine.return._n_n_total_total_": {"doc_hash": "86ca998c4b7973542ab2e419530837ca54e2d96f4030dde468554292d9f05ddc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_mean.return.reduction_": {"doc_hash": "a5985e9bf1487420fff1bc4cc683218e0cab69a4286b1d9a3ba414df88efa0f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_chunk_moment_chunk.return._total_total_n_n_": {"doc_hash": "841eae85021dafca7f4b1306c9063b5fa7f806367a2cb145b94298f4d26d84b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__moment_helper__moment_helper.return.M": {"doc_hash": "f78d4dfa5eebb08aee93b3c9c5f5e4150f3ff2d8b28802693441b4f480cad91b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_combine_moment_combine.return._total_total_n_n_": {"doc_hash": "c3fd6b182cf51e24db252dc6ba551f93840f24ef4db32f34301ad4d5caed50eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_agg_moment_agg.return.divide_M_denominator_dt": {"doc_hash": "e0f51c72bbf7ea335e40ec6eee2c87dc9b2480e03115d731583bb130f88e66b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_moment.return.reduction_": {"doc_hash": "6c87afc87c56b77fbdb7c16b94b517c5476ff9071f4b577893b4e19954b4f846"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_var_var.return.reduction_": {"doc_hash": "e6b7ae8504bdf9031679f1217eee9401ef4ec0ca4c66d42cbaa11c3cc9ed5948"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanvar_nanvar.return.reduction_": {"doc_hash": "10c6122aab6f7378b30bdd0ed38717044d1d35eb972f29b14704c84bca6673e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_std_nanstd.return.result": {"doc_hash": "f55d80afe28b3ce506374139e97f9e318673914d9f1d89b60768278b767ed926"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_chunk_arg_chunk.return.result": {"doc_hash": "76a86499ed41521367d58609fcf3c8c6d485873fe7cd01c1112aaf803b345cf0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_combine_nanarg_agg.return.arg": {"doc_hash": "78f88afa96997122ed85b1e1eb41d0bd7e63b693fdbc6e99a7459bbc06f5eb28"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_reduction_arg_reduction.return.handle_out_out_result_": {"doc_hash": "311a4f3108405c216afa4d9e89d32c6b278d1744204b7bcf094d5bb868442f81"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_make_arg_reduction_make_arg_reduction.return.derived_from_np_wrapped_": {"doc_hash": "4921f7d9a8692d08e9700e22b013b7d026762f666a1370efc832a48fda45b0af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__nanargmin_nanargmax.make_arg_reduction_chunk_": {"doc_hash": "e2b1299db517755bc00c5e2199ebe719e5a28377b49a603a5a49c56338e50761"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_topk_topk.return.reduction_": {"doc_hash": "90e6028e4cc6d9c0385734376315ef7036b534be48fce086cc6415938200948e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_argtopk_argtopk.return.reduction_": {"doc_hash": "ede2858ef5a6ee92afe3a322a78956eb01697d4431367535d4c985a144740b20"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_trace_median.return.result": {"doc_hash": "0654757e242c2ac79575309a9f5fc368ef805cb536b5f3d7c3d40a78d7a4fb2f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmedian_": {"doc_hash": "0cf3c275a0031020914ccc6ffc1bd60762f9672b874f244003f052f552b644f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_expand_tuple_expand_tuple.return.tuple_out_": {"doc_hash": "0db96f19e443afe6d780724bac7043c6277d872226defe9ea4a5666823c66500"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_contract_tuple_contract_tuple.return.tuple_out_": {"doc_hash": "9f62f7737f5638b153f7c64ba7c902043e105bd688ea0612423ea6c07b946129"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_3d_atleast_3d.if_len_new_arys_1_.else_.return.new_arys": {"doc_hash": "d8bfaa731ee75d83a55148d8427a4064a462b70c6e69152a97389939b4a9a9eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_swapaxes_transpose.return.blockwise_": {"doc_hash": "00541c58b676ed2d4c916377989a5b00866ca40b4858faf5148ef08b1aa0cae8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_matmul_matmul.return.out": {"doc_hash": "b840e0da0237c971dd7251951482142ec6b85b2a35c81d1863ec07e0697de226"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_outer__inner_apply_along_axis.return.np_apply_along_axis_func1": {"doc_hash": "7e68f7251d68f46521d16cccc2eb152a0e532ea11df6aff0fc0ee765a1af101d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_along_axis_apply_along_axis.return.result": {"doc_hash": "452dd6676cf158e7de4761e3e5c9bbf638949ec18874e4781609b2950cbdb228"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ediff1d__gradient_kernel.return.grad": {"doc_hash": "0d83f395b02b2b7dc28bbb7a3dbff0491b38c1e4a32d16a8ef847b7614394747"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_gradient_gradient.return.results": {"doc_hash": "a8ccf18fc25e9ad82765bb3edb45c8b03660568a3a86b4696e29987e04a29a45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram_histogram._": {"doc_hash": "442e258e7184fddb67b5490d8feb430521fb896758fa899490ed8513065c3267"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_isinstance_bins_Array_histogram._Map_the_histogram_to_al": {"doc_hash": "893f1a4d74f798c0800c8a297f2520e69a3aa40d114eb07badcaa1a7aecfeb9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_weights_is_None__histogram.if_density_is_not_None_.else_.return.n_bins": {"doc_hash": "1d4742fe9740f6d42a610e5b742264bd787e4d0781e669cc984d0b00c7109fed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_cov_cov.if_not_rowvar_.else_.return._dot_X_X_T_conj_fac": {"doc_hash": "de33c4713834c38f05bfb93b902f89c623d2f254a55ac43893fbaf8449cc3f5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_corrcoef_iscomplexobj.return.issubclass_x_dtype_type_": {"doc_hash": "a4fdb53cd46d47ae79de093da30b0a4f398cdad91f3c4dfc8da6d90034e7eb5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__unique_internal__unique_internal.return.r": {"doc_hash": "9d756ba60b55fb34b209b86ec2bf510ffb69bb9c7a197fc521968064d9485da1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__isin_kernel_isin.return.result": {"doc_hash": "568f31af0e36d5794686aceb9e8d65b266ba34154cc12bb4cd885dafd371fa53"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_roll_roll.return.result": {"doc_hash": "d98061bcf0a7aecb9732d194a92d65637260f591528f97340f0010d585074904"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_compress_compress.return.a": {"doc_hash": "567cd16e028859ee602099e7698ad0b5d98c32106caefcede9d6c3d66d5471d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_extract__isnonzero_vec.np_vectorize__isnonzero_v": {"doc_hash": "bf85ff50048b1e3a761838c5061a2ffd89f743d9c2db7580cf96b72313bb010f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_isnonzero_isnonzero.try_.else_.return.a_astype_bool_": {"doc_hash": "4809b5b5cb7fcd7fa8cb7374f749f9841d5fe340eaa97b48404d0d8a4767e972"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_argwhere_where.if_np_isscalar_condition_.else_.return.elemwise_np_where_condit": {"doc_hash": "dc60cae82fa51b236505781333d6c8a1aebb18702cbe71abeca9b29ef2400570"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_count_nonzero__unravel_index_kernel.return.np_stack_np_unravel_index": {"doc_hash": "f70bd74edbc14c1036de473eef3f2734ee5149576615c924c430d77ac0731bb1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_coarsen_coarsen.return.Array_graph_name_chunks": {"doc_hash": "9a329ebae8bc98e4fb127ce79bb1da3889b7dd1efe02be9be8d0344227c27ba3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_split_at_breaks_split_at_breaks.return.split_array": {"doc_hash": "16fe33a7012f00edd2e100ec776d30c241923783bceb41c7c82c482f9f34a1e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_insert_insert.return.concatenate_interleaved_": {"doc_hash": "3d331e06bec048400b6af505af4a05e743bbd3cd42408d3b8e8f9c935376d217"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_sanitize_index_sanitize_index.if_index_array_dtype_b.else_.raise_TypeError_Invalid_": {"doc_hash": "0a8d4baf6664c7961d4b78bd6f3e4f794ab2db7726337e40b3cd82f6f446d77b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_array_slice_array.return.dsk_out_bd_out": {"doc_hash": "17993497cd23a476872db8600c69ac561fb05b279a8d8ee776b9a340fecf8c86"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_newaxes_slice_with_newaxes.if_where_none_.else_.return.dsk_blockdims2": {"doc_hash": "dd9037a9e0175f75435bc1141e5c649a34b73aa350e84941c22f6e7d3448fcfc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_wrap_lists_slice_wrap_lists.return.dsk3_blockdims2": {"doc_hash": "e2b7ec538fe1bda96b636a3276d6dbb07a777e7141e9212b6c39990b6b8ae8f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_slices_and_integers_slice_slices_and_integers.return.dsk_out_new_blockdims": {"doc_hash": "9fd5e0d32f8f45ed2ce3b0a8edb1e4ba97105d7a3fec8e6c22d14e49170c6f3e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d__slice_1d._Returns_a_dict_of_blo": {"doc_hash": "7bbcd0fdb34830ae0d3c936259ca585c4a3c39953dbf00eb68dccf4e5bea31e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d.chunk_boundaries__slice_1d.return.d": {"doc_hash": "95a181199ed9a7d2e06919b03f3979ec02f1176d0257c896caf9e75cfed42a92"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_partition_by_size_issorted.return.np_all_seq_1_seq_1_": {"doc_hash": "efcbdc5871ce376131f570f3cc08d0087a433a6102bd33b07d3dffd78da0d5eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slicing_plan_slicing_plan.return.out": {"doc_hash": "e26f4141a2d68a45f3d92a0315e4bed2844ae992664a89e2033318e9aa68dd90"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_posify_index_posify_index.return.ind": {"doc_hash": "1a8ecc737310020569a457adbf089696953ec3167b5f2cd22391df10d0c44f2b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__expander_expander.return._expander_tuple_where_": {"doc_hash": "0e79f9db3ef819894b1d515f616d822bbbdab4fa80ca7cdd09b697d208eb2035"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_new_blockdim_new_blockdim.return._int_math_ceil_1_0_slc": {"doc_hash": "23d9adaf194856e87bd2a16d30631747032092542ed63076a1a12b92dfe703c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_replace_ellipsis_replace_ellipsis.return._": {"doc_hash": "a7d917d689d0d98174eb823505c95bd929104c34a6e20b7369d9bb3a6a80c01d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_slice_normalize_slice.return.idx": {"doc_hash": "9a0678db8b41bc54c0950da775ac4b69a28cc4f4d5111782b0ae35a43d41938f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_index_normalize_index.return.idx": {"doc_hash": "64239dd9e8f60cbcfc4deb1bf68521ce911f07c75b2e09776d22dc542e15d9cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_out_index_": {"doc_hash": "6b59ac04ce82c08eadce8d5e63b9bdc0e58fdd519fd5ee07c408033fb62464b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_on_axis_slice_with_int_dask_array_on_axis.return.y": {"doc_hash": "8f7529c4c4c164784a87c3bf83e9d2525ed5e3a01c93232751268c22cc373843"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_bool_dask_array_slice_with_bool_dask_array.return.out_tuple_out_index_": {"doc_hash": "b2c500452ea6f13f4ea09d2a9b795abb62185311c2fd4bfa13e4a9fec00db539"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_getitem_variadic_make_block_sorted_slices.return.index2_index3": {"doc_hash": "70591c77dc36e8c49cb829c55d917babf58765ef4c226c0d0583734e87204cf2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_shuffle_slice_shuffle_slice.with_warnings_catch_warni.return.x_index2_rechunk_chunks2": {"doc_hash": "929db5cfd80ddb6aa6a9b38c45d060851fc85a88ed4a11fd6285daf297fcd539"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_____all__._": {"doc_hash": "c41751c2f48e2b1b43ea602150e99caa4158a74a6e4d7ee132757d4ad85a0ab8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_1samp_ttest_1samp.return.delayed_Ttest_1sampResult": {"doc_hash": "d4e8361beef68569557b3aa1be59f80a9de9e4bcc0e122d51aa1a62de9d1c18e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_rel_chisquare.return.power_divergence_f_obs_f": {"doc_hash": "b77d275fa500050aa6cdb6684f323bffff180758cfc94caa0ba65cf2523ae195"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_power_divergence_power_divergence.return.delayed_Power_divergenceR": {"doc_hash": "bc51e13bd619ca4f589a910fd2971fe2076ee5f7192f7c81e033329c35a243eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skew_skew.return.vals": {"doc_hash": "ceb320c954d6dc46f0311afc334a05729ad9a966f1f20f33b7a37fdc9b45b8a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skewtest_skewtest.return.delayed_SkewtestResult_n": {"doc_hash": "8d3b4bfd51c58efc2ffa0d53f74e2c9e166a26bdd50036ddd1b00a6f64bf3f1e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosistest_kurtosistest.return.delayed_KurtosistestResul": {"doc_hash": "716d1dd25c9a56f0f0e87279e9aecc3499bd176d0dd9f6a4c61d3fb157b9fe07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_normaltest_normaltest.return.delayed_NormaltestResult_": {"doc_hash": "af267ce6482e3919de99b4314f86e6202e0db8ec4f292cdcbf67c81df19d2d80"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_f_oneway_f_oneway.return.delayed_F_onewayResult_n": {"doc_hash": "df625c0515093a0476e747d82a1b7e0d9798d3a66b392bfe66c8b7fdf4607dfe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_moment__equal_var_ttest_denom.return.df_denom": {"doc_hash": "1a08ab23e4084eb934739b3fda7b40553ac2f7cd73f499c53f5f887dbb4b868f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__unequal_var_ttest_denom__unequal_var_ttest_denom.return.df_denom": {"doc_hash": "ecbe53fe846a0d849e43b0d4b6cc2c038d1593768c27343095a5b7041e8db563"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__ttest_ind_from_stats__count.if_axis_is_None_.else_.return.x_shape_axis_": {"doc_hash": "fbcffe68fad2e72f090cdbf505c3a2109180958dea1eb3555a70628fad3344f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__sum_of_squares__sum_of_squares.return.da_sum_a_a_axis_": {"doc_hash": "c3897ac60c2619436f60a2545427f0c0c01b2a4b361d49c017290775fa390d6c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__square_of_sums_": {"doc_hash": "969dd22840e2a94c8e305de702549d69687e669af6a110b8cab9623be9ded6e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_math_text_style._font_size_1_0rem_font_": {"doc_hash": "568abfa65255b948ff400d51c1d3d08533b5ecd27d938e74a490df4a7a0ac453"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_2d_svg_2d.return.header_n_join_lines_": {"doc_hash": "92751f7293674586a471f49e1c070413390466bba33acc966a4ce9ab53ded1d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_3d_svg_3d.return.header_n_join_xy_z": {"doc_hash": "16d6359666890362e69525b3c6e02c85c46abf8b24703440c440557996afbd6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_nd_svg_nd.return.header_n_n_join_out_": {"doc_hash": "dd5a4d00c7b6c834f800d2956bcd7e968f08cfa0f52a4322e4060b6a3f648c59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_lines_svg_lines.return.lines": {"doc_hash": "78fadd4c453ed18dc3366ca1ece3aef3f470aab71a3f7430071f9cf6ed31372c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_grid_svg_grid.return.h_lines_v_lines_rect_": {"doc_hash": "73db723f9082024545f451b8ef9ed0d23d2197b12be41b0a954e3d51c20c0bc8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_1d_draw_sizes.return.tuple_size_r_for_r_in_r": {"doc_hash": "4ca7b6fe3f086def80c933a4ec63335c0aa0e87cbd0ec3c8b93687b598c2894d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_ratio_response_": {"doc_hash": "42310fc68f08be8b95e3387bc67318d65b1f80a4c22e9d06ebe93be6af59f566"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_sparse_tensordot_test_array_function_sparse_tensordot.assert_eq_": {"doc_hash": "d671c0525bf69d284dc347c78a2fa18309fcf140bc77fda72afae610ae04a27a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_cupy_svd_test_array_function_cupy_svd.assert_eq_v_v_base_": {"doc_hash": "2ac27893277863ceea8af9e1366ec16f5d2ef66e385769ed005f7358314962c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_unregistered_func_test_unregistered_func.assert_eq_xx_yy_check_m": {"doc_hash": "b35b9fbc30ed96559e3b1bbcf6c5463de881852e1b89c94d367ac94b6555c1db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_np_test_meta_from_array.assert_meta_from_array_np": {"doc_hash": "aca712366d29b834daec39a2a1971bc6c980bd196fc5b9c8f82626258a79652b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_collections_test_optimize_blockwise.assert_": {"doc_hash": "ec0df6136b8658e05f432f87b6043e3947977a59691c027eb6afac057eeee15f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_diamond_fusion_test_blockwise_diamond_fusion.assert_": {"doc_hash": "dc69f2a7faf664c717e86d007ba9d30f09406d9949e4b3fd21f8c19d102d9dc6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_non_blockwise_output_test_blockwise_non_blockwise_output.None_5": {"doc_hash": "f900256775d8c13964a9e43c43148a603db5da37942790b948e54850d14c4ede"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_top_len_test_blockwise_names.assert_y_name_startswith_": {"doc_hash": "b5f4eb2dba55be4cfbb47c112276703a3974af47c681f30d9de10299228f863e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_test_blockwise_new_axes.assert_eq_y_np_ones_4_": {"doc_hash": "02017fddf6506c9791b999bac6e5bf96f97ba47ae969c71e210b6294215141f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_2_test_blockwise_stacked_new_axes.assert_eq_z_np_ones_5_": {"doc_hash": "e7f01aeb88158b134842f6106783619c6267e46d88961b334d3d06497b7d0cdc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_front_test_blockwise_stacked_new_axes_front.assert_eq_w_np_ones_7_": {"doc_hash": "c7016de02aae1d0c3b6f17f3aca436572755fe459827a996a92782b385b9736d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_same_dim_test_blockwise_stacked_new_axes_same_dim.assert_eq_c_np_ones_5_": {"doc_hash": "0eb88272f42da59269c45042d1cf9a24308515103a3ea51264760a119f800953"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_chunked_test_blockwise_new_axes_chunked.assert_eq_y_np_array_0": {"doc_hash": "3af5c59603976c3ce4ab037c6eba5da975311c6d52792310052fdbb195a94ff4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_no_args_test_blockwise_kwargs.assert_eq_y_np_ones_5_": {"doc_hash": "b1f7790b8348d68363cd9cd0f3921bdf3a33243fbabd16fcecb8ea42b61196cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_chunks_test_blockwise_chunks.None_3": {"doc_hash": "2568626fcd46846d7f2eab7252af935a2785280d8c9cb96d7358d1ae411f1f43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_bag_array_conversion_test_svd.assert_eq_z_z_": {"doc_hash": "53fe8370688bc0b69c30b0c2b2ecd709da796b1b431b9ee8b03b572903e48568"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_args_delayed_test_args_delayed.None_1": {"doc_hash": "49426b737fc61084621bbebd8f87932b9559dde8298346ed09b4d44b56f52ddc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_namedtuple_test_namedtuple.assert_eq_A_B_": {"doc_hash": "c92bca1d087411cd1c77d8ddf5fa5c750cee6c1416699c0003fb904d49bc7fb3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_validate_top_inputs_test_validate_top_inputs.assert_i_in_str_info_va": {"doc_hash": "db1bb33b856a544dc2966ffa248ba4847fb1c056a26a1efdc1dee4d862519ee2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_dont_merge_before_reductions_test_dont_merge_before_reductions.z_compute_": {"doc_hash": "44dfae0e234f427b5f3cec6847f676ca9b7dad07f15c4f642626c136be5d21fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_atop_legacy_": {"doc_hash": "8c25ef7a3ebfa9264c00a290c560b6657e094a42e682ec5604aeaf7bde146005"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_pytest_test_keepdims_wrapper_no_axis.assert_rwf_276": {"doc_hash": "e9f3b882c4d58cfef7ba827beda53cfb9aeea588577ccadd2018df732cecf5cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_one_axis_test_keepdims_wrapper_one_axis.assert_rwf_np_array_": {"doc_hash": "8a50e425262987521552746010093f47f77ff46391ad9a96417701fb4f3e4072"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_two_axes_test_keepdims_wrapper_two_axes.assert_rwf_np_array_": {"doc_hash": "98fa4391977191c2093130de14fbe586a666e9dc397834832693e19ca62088b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_coarsen_": {"doc_hash": "3666cc9430dccb7d2b5769e95e1956b9f72ca079aef44f2c0417203034ea8edd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_pytest_test_arr_like.if_order_F_.else_.assert_not_np_isfortran_d": {"doc_hash": "bfab53616f2d7b322dec4ee52a05cb7df9ece5f7b84fe393bea1f86059b13038"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arr_like_shape_test_arr_like_shape.if_empty_not_in_funcnam.assert_eq_np_r_da_r_": {"doc_hash": "b3bf6841bd9c2b859e35e8788041f016201587b847d2d8a4a76fdad937ca56fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_test_arange.assert_da_arange_10_chun": {"doc_hash": "23405af146e4b6cfc67277b6eb23e3c6f367643f0a7e35ae5b0d0afd79d611f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_dtypes_test_arange_dtypes.assert_eq_a_np_a_da_": {"doc_hash": "afdecf7f8721dc4d25e071fb2d104fd4861bb2d649b3c1700f68ccbdf4b58e46"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_cast_float_int_step_test_arange_cast_float_int_step.assert_eq_darr_nparr_": {"doc_hash": "55948725bfb8d101ea96f22736544ce4a079efd189053ed287cf40185676a1b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_float_step_test_arange_float_step.None_3": {"doc_hash": "86bb49850afa8b3e4020a7efab24529cafc858149648da1be61e6df935009a0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indices_wrong_chunks_test_indices_dimensions_chunks.with_dask_config_set_ar.assert_expected_actual": {"doc_hash": "d95a4265c303aab48021f3bce2e3b40a7bdcb738e98d91eba11e69c4f2d70533"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_empty_indicies_test_empty_indicies.None_3": {"doc_hash": "dd97c18e5375ab634c753694e3788d74309ea17a14ddc65b37bd4cbda1a9d4e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indicies_test_indicies.None_3": {"doc_hash": "5b656fe233100332c53f67fe3affe45bee0cbbc47f4e526b830630ce6a6650aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_test_meshgrid.for_e_r_a_e_r_d_i_in_zi.if_sparse_.else_.assert_e_r_d_chunks_xi": {"doc_hash": "296e8dfa1097cc7a5ddf5700bbfa681185c427e5531847ea677da53cf347551d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_inputcoercion_test_meshgrid_inputcoercion.assert_eq_z_z_d_": {"doc_hash": "be2ccf119c275f2a4164abe57dad2e1f859fca9990829fbffad9610f6a8ffc6b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_eye_test_eye.with_dask_config_set_ar.assert_4_x_npartitions_": {"doc_hash": "e5d25e0311d44ce069fc866ae585adfc9d96e874347d5bb5bd1cca7c5a8512c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal_test_diagonal.None_14": {"doc_hash": "5889650b655d149bcb9d964791c15d5318e47c1cd316db3b816947e19cbe0a43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal.None_15_test_diagonal.None_22": {"doc_hash": "55fbb135f085eaff37465c9be1ba525cf2e6c99e99744d1c3e34cf482f6564bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_fromfunction_test_fromfunction.assert_same_keys_d_d2_": {"doc_hash": "4828de766802643eee6c33cbf993ab0ccb1979dd8cd89e11502d2c675128b6a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_repeat_test_repeat.for_r_in_1_2_3_4_.assert_all_concat_d_repea": {"doc_hash": "6cc76abf371b7476047e0b0df3ab995a2b31115459e19621a919095754c42118"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_basic_test_tile_basic.assert_eq_np_tile_b_reps": {"doc_hash": "2c75fed05dcfbecd6534f2688557594bb4937f2a583c69b5d67d85b640a93650"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_chunks_test_tile_neg_reps.with_pytest_raises_ValueE.da_tile_d_reps_": {"doc_hash": "4032670416c797a5d781b1457eaaf82d892f257da3dc21da73a103fe94689b60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_zero_reps_test_tile_zero_reps.assert_eq_np_tile_x_reps": {"doc_hash": "2c155efc2594f4bd1482082ba8d0724120fa34fdb042bb14eca1d3ba6a2fd816"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_empty_array_test_tile_empty_array.assert_eq_np_tile_x_reps": {"doc_hash": "b0bf2a0babbcdab08e10c3598afb56a523e1c8d5c78849ca45aad1e794ec11ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_0_width_test_pad_0_width.assert_eq_np_r_da_r_": {"doc_hash": "0a1959c6c2eccac6970ebdf62acbd9766414193b98647d9957edc08b0430d2c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_test_pad.if_mode_empty_.else_.assert_eq_np_r_da_r_": {"doc_hash": "9f1735749ef42f5421e7a21ed0bfe0ac513458e2b61307bc5fe900a91a3aa288"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_3d_data_test_pad_3d_data.assert_eq_np_r_da_r_": {"doc_hash": "b574546fce9d0463237e644ee652960a4b2e9841e1697b3ae3a6b503fac36a58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_operator_dispatch_property.return.wrapped": {"doc_hash": "296f1c30b9fa716abaaf5599be753e4cb948d50427a863affc913f61f6523093"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray_EncapsulateNDArray.__array__.return.np_asarray_self_arr_arg": {"doc_hash": "9da32cc53c09562d5834621380ed7ac08f4990389d7fd13fc80035e58e357e3c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_function___EncapsulateNDArray.__setitem__.wrap___setitem___": {"doc_hash": "3f49a12391631e18bc4bf27ab23871022cfdfb4f678c6d5cb2c6537375aa1461"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_ufunc___da_register_chunk_type_En": {"doc_hash": "3b85694a59f0ad807c39e8ad8e8f968e957ebfc8ce4971b999cd4b4dfbc1584a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_WrappedArray_WrappedArray.__setitem__.self_arr_key_value": {"doc_hash": "c071ac80e8702996b4cb000960977d2b8ec5713a1e766e70771200e07196c7bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_binary_operation_type_precedence_test_binary_operation_type_precedence.assert_": {"doc_hash": "a6b8135c7cc5a14eee9383143b7c0bcc604b1f23b366232d7710d1d8b0d061b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_array_chunk_test_is_valid_array_chunk.assert_is_valid_array_chu": {"doc_hash": "35d41a25028472bb4a285933fc82b1c7576774b284409e41550ced3eda7f012f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_from_itertools_import_com_test_fft.assert_eq_da_fft_darr_n": {"doc_hash": "c5d02bca4dad98dc36d564d567119e1c2de3701a977015581678482c575fcf3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft2n_shapes_test_fft2n_shapes.assert_eq_": {"doc_hash": "632f12d366ddc2d5a41f576f98646e88bafdd9624a849d67107b55099f026222"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_n_kwarg_test_fft_n_kwarg.None_5": {"doc_hash": "1117f5baea2fac14e141885a1d0765ff6700549f23c9b9f454e7829316e8cd05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_consistent_names_test_wrap_bad_kind.with_pytest_raises_ValueE.fft_wrap_np_ones_": {"doc_hash": "c3f6e727cddaa01e2853d30978f216338f44a87f6a3d7e33907cc85af2f6743b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_nd_ffts_axes_test_nd_ffts_axes.for_num_axes_in_range_1_.for_axes_in_combinations_.if_len_set_axes_len_a.else_.assert_eq_r_er_": {"doc_hash": "f5bd41db059f7a5545bc576b1c70c677a1e9647196cdc784f9f4e30e2fbb7be7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_ffts_test_wrap_ffts.if_modname_scipy_fftp.else_.None_5": {"doc_hash": "530bd615afd19fcd23494b19d0dd147d0ea6537f9335f08769170aafe7d303d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_fftns_test_wrap_fftns.assert_eq_": {"doc_hash": "2f0451373ad2387f9619cd5461d51309c638eb6db4e479cc1337003d69b1e4d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftfreq_test_fftfreq.assert_eq_r1_r2_": {"doc_hash": "0101e8d82e97518ca26df3d4773a8fdb42253e71b291cb8dfac480f88d12c0e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_rfftfreq_test_rfftfreq.assert_eq_r1_r2_": {"doc_hash": "94005c36f4ebad7528317e3b15069970d914a24311c3a4267351c73b37be61a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_test_fftshift.assert_eq_d_r_a_r_": {"doc_hash": "281386e558d82975081ed86791390423694f3eba21201dd4550cd4db7d9836d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_identity_": {"doc_hash": "e97bd10d31521f993760023fc0ecdd7242f475de66e0726259da3837b69369ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_input_validation_01_test_apply_gufunc_axes_input_validation_01.None_2.apply_gufunc_foo_i_": {"doc_hash": "fbeda353c636daca6d437d432f8c4651aa6eacfbecc3b9a0f22b5b5e3d934f32"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_02_test__validate_normalize_axes_02.None_2._validate_normalize_axes_": {"doc_hash": "e6cde94ee6bb668f5338930e70a260885f56b55f8d1efa1ddcea6e426b519d23"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_03_test__validate_normalize_axes_03.None_2._validate_normalize_axes_": {"doc_hash": "2fb04460ca164c2f0de8b8f705643b0833e000e928a7099ca7b0ea969ac06396"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01_test_apply_gufunc_01.assert_std_compute_shap": {"doc_hash": "2600c72bec68c1dd91729297d43c7c1204033347f9e4e901bf7ce859a581ddef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01b_test_apply_gufunc_01b.assert_std_compute_shap": {"doc_hash": "35415ee65556e42787f37995d7c1773e29bffd8d3258742971d50fd035327b1f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_test_apply_gufunc_output_dtypes_string.assert_mean_compute_sha": {"doc_hash": "091ea3d1a834cd1285e86ab1cb3fdd41646a34d837718bea4f5f505d0da95654"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_pass_additional_kwargs_test_apply_gufunc_02.assert_c_compute_shape_": {"doc_hash": "81917c6b411a0861fc24fbded2ebdb145e95d452acf1dcb827361ac6b2d5648e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_scalar_output_test_apply_gufunc_elemwise_01b.with_pytest_raises_ValueE.apply_gufunc_add_": {"doc_hash": "c6ad27f35d74c7ce962634f10b65382ec9aa83c60ef65f9056cc10dc5a8646d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_elemwise_02_test_apply_gufunc_elemwise_02.assert_eq_z2_np_array_1": {"doc_hash": "bc1277464ca1c2f5eab165c38e1d0927efc7ddfae8a93e335c901cc9608d6ec3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_vector_output_test_apply_gufunc_two_scalar_output.assert_y_compute_2": {"doc_hash": "d8e497532feef657dfd76f514429300b382ee672831144d34832506a938428aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_two_mixed_outputs_test_apply_gufunc_output_dtypes.assert_eq_y_dy_": {"doc_hash": "0cdedd70119571165ccb76e698e468f34f5d80ceca14822056fff0249fb31674"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_two_inputs_test_gufunc_two_inputs.assert_eq_x_3_np_ones_": {"doc_hash": "4665e4ec2c67b16b80892aaf06db4a1dd359579f9c20d84917f4f755e2b339ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_same_dimsizes_test_apply_gufunc_check_coredim_chunksize.assert_consists_of_multi": {"doc_hash": "cf65b1c5b7099841169827156281c95c6f454eab2b8f6d2b0f1ebdab9884768f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_inhomogeneous_chunksize_test_apply_gufunc_check_inhomogeneous_chunksize.assert_with_different_ch": {"doc_hash": "2f70d2267db4a76bb927bfd32aedc882bea9a97d01e2ea7de488d83b0be70d00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_infer_dtype_test_apply_gufunc_infer_dtype.assert_eq_z1_dx_dy_": {"doc_hash": "6b90ebfdb1e24c98273a8a09b131bf385bf62e61891896d501c653eb1b19c2fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_01_test_apply_gufunc_axis_02.assert_eq_m_dm_": {"doc_hash": "c9fa2b12a816bcd46ba395f8baed4d24c9a7ae9447161c2c3db2895f8d4c458c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_02b_test_apply_gufunc_axis_02b.assert_eq_m_dm_": {"doc_hash": "97910ed9f1ad2b2c8fa1e11012066544ad6e18e919ae53c169eb20443b7cc0df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_03_test_apply_gufunc_axis_03.assert_eq_m_dm_": {"doc_hash": "f1337b281b074fc6552d841716892cdacb1b3d8f6641efdf4c02e85839ad473d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_keepdims_test_apply_gufunc_axis_keepdims.assert_eq_m_dm_": {"doc_hash": "8aa5433c365f831ebd5a2636fe0fc517db4f6c52ca5904fa8e98d3f8bfd46153"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_01_test_apply_gufunc_axes_01.assert_eq_m_dm_": {"doc_hash": "4fee468b197621a0c813b6e6284d57e6765be9c1fc2b901fb7c666d8f6f530e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_02_test_apply_gufunc_axes_02.assert_eq_m_dm_": {"doc_hash": "bfec28111607f2a98c0e75aea6272f77f1be79686dd1e81144cb7962fd7594d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_two_kept_coredims_test_apply_gufunc_axes_two_kept_coredims.assert_c_compute_shape_": {"doc_hash": "a0e9d6e34515be9dc0a37c198f585b8cb1f735b47c6a4c830e6bcd9c6406f95d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_01_test_apply_gufunc_via_numba_01.assert_eq_x_y_": {"doc_hash": "c043dfe3c4a72c73af0bf402c0e282cf7e4fc95f5d8cff54454577d68c89ff40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_02_test_apply_gufunc_via_numba_02.assert_eq_x_y_": {"doc_hash": "44e263695887a301dee59a953a31d3dae3a2dae9dd29cac0b858e53e6266cdbe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_preserve_meta_type_test_preserve_meta_type.assert_eq_mean_mean_": {"doc_hash": "0b65dc0225e3f9283c9e7ed10a5393740e01b767ad8b26515d62cea6f20e0905"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_test_imread.with_random_images_4_5_.assert_im_compute_dtype": {"doc_hash": "1de2325b19fb0d81ab2d3ead4f81afc557a6c7a7c4229c4eaf487bcd1d1b8ece"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_with_custom_function_": {"doc_hash": "2599cd47d29cfaa9e97c754a44278a1e780d3fb3da9e4288f206fd3e7d29228e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain_test_tsqr_uncertain.if_vary_rows_.m.mat_shape_0_": {"doc_hash": "9e977e5b4aeb57716923fe627fe008c5fd287fc79f10103000a9f397451e8f99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_vary_cols__test_tsqr_uncertain._full_matrix_returned": {"doc_hash": "65a04bedb67fe6472daec8b94121b8e0aebdd852b263b81a5ba987ff7b53238b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com": {"doc_hash": "56e27bcdda8341df9e0abc0bf3deef570f1c5962f4169f002027714197f8c11d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_zero_height_chunks_test_tsqr_zero_height_chunks.None_13": {"doc_hash": "0ee246afab18e541055ac35cf8974d5212ac760fa11895d794f44f363eb535dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_sfqr_data_": {"doc_hash": "20817ea12c96f43ff18fb94977e93a2a3257ceba16654a1a2e7f663829199475"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_qr_test_qr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_qr_data_": {"doc_hash": "b6a81803841024b544a781ec1f9aed609b18e42e4379f41778bdf3f10c4b88ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_linalg_consistent_names_test_linalg_consistent_names.assert_same_keys_v1_v2_": {"doc_hash": "d46fd9f8dc94ae1e36c0cca700b144fc34d2bea627659343aeb0f257d7d7566e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_dask_svd_self_consistent_test_dask_svd_self_consistent.for_d_e_e_in_zip_d_u_d.assert_d_e_dtype_e_dty": {"doc_hash": "8aedec5a7933f1a11ac7218c675c3e504492fa30280fd2a949b1e350fb74cc84"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_1_test_lu_1.for_A_chunk_in_zip_A3_._check_lu_result_dp_dl_": {"doc_hash": "a30312252a7af98748f02e752e943dbd7fd015e9362c67c5588ed88d57d113ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_2_test_lu_3._check_lu_result_dp_dl_": {"doc_hash": "6b5788244d3725ad78202607ea9c7aae069126bb509eb3d644308268b61cfe03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_errors_test_lu_errors.None_2": {"doc_hash": "4d9218cc77a3dd7e77b76694311d4c377d1f9ed5e81dcd92aef5e633196de3fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_vector_test_solve_triangular_vector.assert_eq_dAl_dot_res_b": {"doc_hash": "8bd870cecfd823b1d57679e1a6cf7a7a66ef425d7931a7c45859633d932d27ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix_test_solve_triangular_matrix.assert_eq_dAl_dot_res_b": {"doc_hash": "daf957161ba4d6f624483d7825f265e31f40a04f9b2bf4e18d4cfaeeffcafcfc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix2_test_solve_triangular_matrix2.assert_eq_dAl_dot_res_b": {"doc_hash": "f31e6d66533a4f6ceff1629d021f0b1120203bf47d1a0ca89b8bbba2a9382d35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_errors_test_solve_triangular_errors.None_1": {"doc_hash": "6b1a630125be491cc9243808dd3e61a9b626d2985c07e414b328c32a63317f89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_test_solve.None_6": {"doc_hash": "2969c1c5632150f0dedfe3dc953c98e99145e33e664f5f75699b77d1ab4b24ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_inv__get_symmat.return.lA_dot_lA_T_": {"doc_hash": "5dc075a1feee1df456f31475092ccc2d25639f37d656984d1b999c7369e6688d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_sym_pos_test_solve_sym_pos.None_6": {"doc_hash": "535caab04d7f269295f0483df0e60743b34ee7af18802f3737f4244e1a56da6f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_no_chunks_svd_test_no_chunks_svd.for_chunks_in_np_nan_.assert_eq_abs_u_abs_du_": {"doc_hash": "284e1c7fef49408375d830168938560b7f751ea739bfb233902f61235776ee91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_supported_array_shapes_test_svd_supported_array_shapes.assert_eq_dv_nv_": {"doc_hash": "945c7744291fc1ed57815f3df54dfee5100d8c41e23283bcc37d33f41ee5e7a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_incompatible_chunking_test_svd_incompatible_dimensions.with_pytest_raises_ValueE.da_linalg_svd_x_": {"doc_hash": "bad1218e900cf50e5f082dde6e7ad14851db59e81047c913d265238dfa97e7ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_ndim_test_norm_any_ndim.assert_eq_a_r_d_r_": {"doc_hash": "f13f5b52929a966e2c92c06cdbc0d8c533e1033c3d1e4157c58117ddfa5ed43a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_slice_test_norm_any_slice.for_firstaxis_in_range_le.for_secondaxis_in_range_l.assert_eq_a_r_d_r_": {"doc_hash": "83ce533239f14be257ecb287c14918812ebae081f1979c2b233c1c13bdeb1110"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_1dim_test_norm_1dim.assert_eq_a_r_d_r_": {"doc_hash": "74544d4bc5b429c35973ddeec64500eb4fbda8e13fb4bea1e42c1caee3501626"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_2dim_test_norm_2dim.assert_eq_a_r_d_r_": {"doc_hash": "d3d57d0c333e31c410f4313c2a9d526b6f7e02ca1712fae1aa73e431e9bb3f9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_implemented_errors_": {"doc_hash": "f857fffa6c1789929c7b8ca5bef1ea3eae088fb273f46755e2ade295bf1eecea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_random_test_tokenize_masked_array.None_4": {"doc_hash": "d36ca5bde135d007e904f3e172cb702ada8c8cbb487eb85d66d900b865d27e3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_from_array_masked_array_test_copy_deepcopy.assert_isinstance_y2_comp": {"doc_hash": "635258393bc186c5f22cb0791ec18ca23ec8758ad1234a2094704303f621894c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_functions_functions._": {"doc_hash": "1f11481f530bebd95a9ae3a8a559bec76e4d495b5635a264294d646f7dcab989"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_basic_test_basic.if_yy_shape_.assert_isinstance_zz_np_": {"doc_hash": "4cd9c9bc8072a0ba3dadb274698bcef8d64e3964c6bcaa100df3fe3044f1f0d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_tensordot_test_tensordot.None_2": {"doc_hash": "873559e2f682789b392baad6418bf3bcc01a5c3a0ab7e4416a549cf6e6fdef27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_check_m": {"doc_hash": "dd1b146a674fe3a8f40068b3017598bd3fb2e92885a16908d2a7cdba9570e7d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_check_m": {"doc_hash": "e0919cae04db58b4b5160f6df7ce3551b0b34d1283ccf252666dc141a13c5422"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_output_type_test_mixed_output_type.assert_isinstance_zz_np_": {"doc_hash": "324064cb8967bce12aabb1ef19888dd7d633adfc49f281583909437f7a764c4b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_creation_functions_test_creation_functions.assert_eq_da_ma_fix_inval": {"doc_hash": "899b75a42ea21e8b945b160057aadaac59b901957f3bf007fa7c4c0b4f7076d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_filled_assert_eq_ma.if_res_is_np_ma_masked_.else_.assert_eq_a_b_equal_nan": {"doc_hash": "f94702fdfef877dbb3519aeb6083f662a050fcc949cd2e927e9954c8e0efe35c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_test_reductions.None_6": {"doc_hash": "c2d168d12d21612bc44e91946239578d858812721de9e861e10b1a8f1e4fc350"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_allmasked_test_reductions_allmasked.assert_eq_ma_dfunc_dx_f": {"doc_hash": "7aa74c6e3663e26fdfb6d5910e2f46c4f0fa4ace2c9cb66d531ee3c363f4031b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_arg_reductions_test_arg_reductions.assert_eq_ma_dfunc_dmx_2": {"doc_hash": "c7db251706be26e9869717ecdbde239c65100b8f16be9a317a4aea3405e4b9e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_cumulative_test_cumulative.for_axis_in_0_1_2_.assert_eq_ma_dmx_cumprod_": {"doc_hash": "a798b75c0557404e4ce4942f6c40743e2dc59d1a6eb0870e05e694f0d2ca160e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_accessors_test_accessors.None_3": {"doc_hash": "7f0e34eab362cfea5c4d56b953708d12f2484b4dfe73f401193c244e53e143b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_masked_array_test_masked_array.with_pytest_raises_np_ma_.da_ma_masked_array_dx_ma": {"doc_hash": "60641999cf87448bb0113bab0cb4a5c823c02df778db4925f8dc347dc95dd8cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_set_fill_value_test_set_fill_value.with_pytest_raises_ValueE.da_ma_set_fill_value_dmx_": {"doc_hash": "378aa1646ab6817390b7ff4e897803ca745c28f45f5ea972dd0b86d207719343"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_average_weights_with_masked_array_": {"doc_hash": "229df04d64aac4198e1ff48398b6fee2ad00d730a9146502a800e8d435d9f9e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_test_min_max_round_funcs_": {"doc_hash": "7439a4f00fc089c9d33c9d8153fd75ed6fa556b7b62604a6442eb03077553c23"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_pytest_test_fuse_getitem.for_inp_expected_in_pair.assert_result_y_ex": {"doc_hash": "85710d46c92723b6a55627c8da450a506179ef36822440cd45be7761283874d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getitem_lock_test_fuse_getitem_lock.for_inp_expected_in_pair.assert_result_y_ex": {"doc_hash": "6c06fa2e39e710244ac85582d307ac1c50d7ae262b886dc35b688a703bec25fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_with_getitem_fusion_test_optimize_with_getitem_fusion.assert_len_result_len_": {"doc_hash": "8ad1c49fde51b8beb84f53db922dcf8f8c7dce1904d826e647445071a479d843"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_slicing_test_optimize_slicing.None_1": {"doc_hash": "b61c1d631ee355ed4cce0fac0d17d31488d31a2e755b4bc8293b11e63be334a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_test_fuse_slice.None_1.fuse_slice_None_np_array": {"doc_hash": "64914ccc058e57a25dc5e1c310a455a9c514f55f009f3aa65b2ee1d171e1e7e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_with_lists_test_fuse_slice_with_lists.None_6": {"doc_hash": "59ac7f7a5331a8ddee8ffa72ddabdef178d01008d44aacc2d868429c49cc2ae1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_nonfusible_fancy_indexing_test_nonfusible_fancy_indexing.for_a_b_in_cases_.with_pytest_raises_NotImp.fuse_slice_a_b_": {"doc_hash": "79d3e6a5ecfaacad1545e3e00a0da7dfaafb77cb3f93bf2cccf0e8044ac21e0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_hard_fuse_slice_cases_test_dont_fuse_numpy_arrays.for_chunks_in_5_10_.assert_sum_isinstance_v_": {"doc_hash": "a60dbd7839aeb0d0c1c763c4335375f939d06c60f4c9e860f874e57ac71fcdee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slices_with_alias_test_fuse_slices_with_alias.assert_dsk2_fused_key_": {"doc_hash": "4677fd16529231b4d3c05e96db0649c8e6d499e5e1ac8d8cd8aaa24f8816cc8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_dont_fuse_fancy_indexing_in_getter_nofancy_test_dont_fuse_fancy_indexing_in_getter_nofancy.None_1": {"doc_hash": "3cf9971f95fb73ca6a221f5cfbfd0d1faccb90194ec6213a69f8605f91d88600"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getter_with_asarray_test_fuse_getter_with_asarray.assert_eq_z_x_1_": {"doc_hash": "0b17308e743e932b547ade9ab37fa3ca524db62254c02bbffe3a59e77f3c926b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy.for_orig_final_in_opts_.assert_optimize_slices_": {"doc_hash": "c1fdccbf49405dfeda38fb0f22bce4d008d55bf8a480672e76577d03f6695a0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_turn_off_fusion_test_turn_off_fusion.assert_len_a_len_b_": {"doc_hash": "3219a92b4bcc946e363f8d712a7669b5f1a8d7876abda4517ea7a81acfb7d736"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_gh3937_test_gh3937.y_compute_": {"doc_hash": "29de2d34d19a8bc75e83f0d37aa989c63463e6d267ce3eb78b3acfca7a4def8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_test_overlap_internal_asymmetric.assert_same_keys_overlap_": {"doc_hash": "4e9ab70969b790b6ec2c60d7f48e04a8c84a3ecafb44f0359a1def915e8e0452"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_small_test_overlap_internal_asymmetric_small.assert_same_keys_overlap_": {"doc_hash": "ce5d051590780cb1fd8b300ad962d1ae56b8267967d5109d7b611cde52558a96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_": {"doc_hash": "2507ddff4dc14a30d67797a28773b3d1cb0352008490ec94d241f3b92f35504a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_reflect_test_reflect.None_1": {"doc_hash": "1deab0dcf8adc7ea964319bf210ec0cd68152eab82524ae8c5cf61cde9986bc4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_test_nearest.None_1": {"doc_hash": "5989f0b4b73d30b4ac1d05408ebbe3b18eca23684fd113109ace627ea8e037bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_constant_test_constant.assert_eq_e_1_np_on": {"doc_hash": "d92839efee8e7bac502440e73b820e952ff668a570886f4b40ee5699d7c4f220"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_boundaries_test_boundaries.assert_eq_e_expected_": {"doc_hash": "15bb7bd9513cd01da3f71c4abfe5ab6256787fbc9e052ee97f3f5c172a04897c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_asymmetric_overlap_boundary_exception_test_map_overlap.assert_eq_": {"doc_hash": "9a2c3697a48c77e041425b189f746079d9ad91606db954a65aaacbedd9a18780"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_variadic_test_map_overlap_multiarray_variadic.assert_all_x_compute_": {"doc_hash": "d57ad0bce102a9131e17483629f5b8595c09b7cf025fc9125850b8be05ef0f51"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_overlap_test_nearest_overlap.assert_array_almost_equal": {"doc_hash": "e98ffa66f9c2e124038a9fcfef6ee1a36aaca4077644df5881a6750708a611fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_one_chunk_along_axis_test_constant_boundaries.assert_b_chunks_darr_c": {"doc_hash": "759045858be2c64cbb65d8060f5cf8da0303bbcc57e06582ec485ad3e878adda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_small_test_no_shared_keys_with_different_depths.da_compute_r_scheduler_": {"doc_hash": "12ca298029ec4352bb7661064026730ae821a6f16c519cd783798aa526370361"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_small_test_overlap_few_dimensions_small.None_5": {"doc_hash": "6b01ddfd4bfbd336e3a770c0c927573680d280d64fa27b22de304cab2fb9a8c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_test_overlap_few_dimensions.assert_len_c_dask_10_": {"doc_hash": "1c40d5f078cbf362213c6e1ff7da044829e8c1a76490e9f1cd3f929b84396e68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentile_with_categoricals_test_percentile_with_categoricals.assert_same_keys_da_perce": {"doc_hash": "eb2d7918f7601d4de1a34e0ade60ac1d3fe3e46d35e05c5a459bc9fe50b7a805"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_unknown_chunk_sizes_": {"doc_hash": "59df943a5b79629b05bacca84509e29ce07b301e4b7c1bded2e0aaefa590a43e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_pytest_test_determinisim_through_dask_values.assert_eq_samples_1_samp": {"doc_hash": "56ec3cc84f282bacc8f5d9fa6ee739aef9b5b58b4748a7a0ca9bd2b3a47d0a3b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_randomstate_consistent_names_test_randomstate_consistent_names.assert_sorted_": {"doc_hash": "eba20ed8c3814a4a49de6a3c0ddd80157fcb1993e908fa74cada5f5dcab32370"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_test_parametrized_random_function.assert_len_y_90": {"doc_hash": "629d14c379335ae79e3b1689484af1bded7dcf088546cff43d967c70b892bb5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_kwargs_test_consistent_across_sizes.assert_eq_x1_x3_": {"doc_hash": "f7a42cd19efb71de758dcf83f2239121394acadd1dc6e4ee81714fc26b9b466b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_all_test_random_all.da_random_standard_t_2_s": {"doc_hash": "0fbb40073ad4864c514dc0f6f3b85814c57955ed8985a0218b90d6a0f0043d8b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_array_broadcasting_test_multinomial.for_size_chunks_in_5_.assert_x_shape_y_shape": {"doc_hash": "95fbc5c05f7539e7c84fd405765fb02a5adbb8c5601a73b46fe3fa45c9e268fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_choice_test_choice.assert_len_res_len_np": {"doc_hash": "e1c9324f5f23655640fc7e00f9060a60888e229804aaf9b36c72bfedb40a813e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_create_with_auto_dimensions_test_permutation.assert_x_shape_100_": {"doc_hash": "8c9d7d3285260b0880cf9eea901268c96fdd2807b1d04ca74a0df10533d9ce27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_external_randomstate_class_test_external_randomstate_class.assert_eq_a_b_": {"doc_hash": "3cd6cb2613138028f21916f404a9a9d5e0210fcea4e6843a8834629d2a72e157"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_auto_chunks_": {"doc_hash": "8ef2ff71dafc0cf4db05fbff10825f6e5437a0a4c65de1011c21895ea0581f4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_1_test_intersect_1.assert_answer_cross": {"doc_hash": "a0bb97630b2e27457fd46324b712c9d0002bda3f1c560a5fb4c56296e4bc4a1b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_2_test_intersect_2.assert_answer_cross": {"doc_hash": "63b6d783dd7fb37af673eed1cb7714f1c801108c80c793ed7e15d1758c4bce74"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_1d_test_rechunk_2d.assert_np_all_x2_compute_": {"doc_hash": "d9f6dce63b1b531dea2d9d7f17d443c0c6e0483b68ba6c2ead5c31d3aee8039b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_4d_test_rechunk_expand.assert_np_all_y_compute_": {"doc_hash": "5ffcf9448d382f060fcd43087c0207281576dfa57e07ca303ed0f334d1ff55a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_expand2_test_rechunk_expand2.for_off_off2_in_product_.if_a_off_off2_0_.assert_np_all_y_orig_": {"doc_hash": "d6040705ccd46cd1641121e0e21af37f0b5f30fdb98702ade914c9879297c248"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_method_test_rechunk_method.assert_np_all_x2_compute_": {"doc_hash": "a6917326842ab85ac395af95104fe45c73b914d5ece85976cc3269d2b187d379"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_blockshape_test_dtype.assert_x_rechunk_chunks_": {"doc_hash": "f9ed10e7bee51fef10156d6839d9676bc1a0903b18d027061a62d322c7c7b6cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_dict_test_rechunk_with_dict.assert_y_chunks_24_": {"doc_hash": "aa8f7d876f815fc5ebef489b8cc2fd70bf917a25f1fb26de0b1839b2f97f3b5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_empty_input_test_rechunk_intermediates.assert_len_y_dask_30": {"doc_hash": "f8a5cd6e97f95f10e2d075caebefba8bef8864ff8beb3ca60cf1e5e38644cee7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_divide_to_width_test_divide_to_width.assert_chunks_4_4_2": {"doc_hash": "ca080b1e3e26a0ff34f7eafe8b2c03e83fa12ff2437cf471fb315c74c26a5b43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_merge_to_number__assert_steps.assert_steps_expected": {"doc_hash": "af3f0d61c64499a953c462fe8937ae6fc47566581746a688a65b415bbfbba9a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_test_plan_rechunk.for_i_in_range_len_steps_.assert_len_succ_1_le": {"doc_hash": "6f81e7032cb5875b68538e542c423d4e91eef818a133ba4178f2aabe1d0e89ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_5d_test_plan_rechunk_5d.None_2": {"doc_hash": "19a5b2acd6aef52246eb3bd0919cd71df75ac73abd1a2159308f026f66ceab1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_asymmetric_test_rechunk_warning.assert_not_w": {"doc_hash": "5818cef12ea6c6e99ad2ced8baef2f25afa4ab2adb2e1f9e595c4b54ae793f88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_dont_concatenate_single_chunks_test_dont_concatenate_single_chunks.assert_not_any_": {"doc_hash": "17ed6ad7c51ed4953bc04a61a364392b2066b64978cdb502d183de6cee88fc1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_test_intersect_nan.assert_result_expected": {"doc_hash": "bfdd901e97978864ede2e28092ba81455829776d3c593745a3c3ed45e2ddf8cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_single_test_intersect_nan_single.assert_result_expected": {"doc_hash": "fd62873d939574e887b7da8dbe39ea86fea22506a1519450a8809707e46c0a84"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_long_test_intersect_nan_long.assert_result_expected": {"doc_hash": "e9336afdf0993b691bc0b153329f7bddb09c0561abfeefe04d61ac033198d817"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_pandas_test_rechunk_unknown_from_pandas.assert_eq_result_expecte": {"doc_hash": "b2e23227e462271e80f380d077a8bdc4119099115589a536070ceb4f8295fefe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_array_test_rechunk_unknown_from_array.assert_eq_x_result_": {"doc_hash": "b6164b93e8037eeba5a4b136c4c000b026b01bc40ba6b9bbdc3baf35d9c31f3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_test_rechunk_unknown.assert_eq_result_expecte": {"doc_hash": "aac008192767784eb86b4bc0d271a2861779bd250819dd3b5d72052f719b5124"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_explicit_test_rechunk_unknown_raises.with_pytest_raises_ValueE.x_rechunk_None_5_5_5": {"doc_hash": "90f72328a3ed5b7bf44264dd3a85f17705b4541f6b17d5b92298ae5ffecec1f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_single_test_old_to_new.assert_result_expected": {"doc_hash": "11b6dd1c80d345d1f268950077ae915419780f459b71c869be7207a80f34f212"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_large_test_old_to_new_large.assert_result_expected": {"doc_hash": "ea92b62374b6ee8011fc9148798024e246e28acb38cd43e83126a9c8a985621f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_changing_raises_test_old_to_new_known.assert_result_expected": {"doc_hash": "014e5c5b3b966fbad5bd418cf47df1eb4e60583802bf830f6ec540d0bf79c947"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_dim_test_rechunk_avoid_needless_chunking.assert_len_dsk_8_2": {"doc_hash": "b25f43428233af22b397dfbc9ec93f94137ec52dc37f3fe99e16182956dc40e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_1d_test_rechunk_auto_1d.assert_y_chunks_expec": {"doc_hash": "9e97ad4d9ec821262f88f5af6287100c111ccbdded77ad24f6caa07f2dbd2e03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_2d_test_rechunk_auto_2d._limited_by_largest": {"doc_hash": "2d1617605f6476fc707773ec58afb91c69d1eac445cd86279e2144dcc22b8d1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_3d_test_rechunk_auto_3d._even_split": {"doc_hash": "137ae4e03a26298458dea33cd344269afe3ae6314cab631e853dd3bcee0c349c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_image_stack_test_rechunk_auto_image_stack.None_2.assert_z_chunks_1_": {"doc_hash": "0fd0e4290affdd89150eb9c884f8ed5346c366686f7e0cd3303677a6989c4b74"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_down_test_rechunk_down.None_2.assert_z_chunks_10_": {"doc_hash": "6e15253e9b98e8b7540d17ea6675a640d9eddff7dd708de14073c4101e249e48"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_1d_test_reduction_1d_test.if_split_every_.assert_eq_": {"doc_hash": "6a42e8e597ed78f33ffc283ef7842861e0eace14187ccdfdc067b22951254a2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_1D_test_reductions_1D.None_15": {"doc_hash": "48aea01596966fb4d3a58536599c07b74184e33921b731e6fbcec94a198a6f24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reduction_errors_test_reductions_2D.None_15": {"doc_hash": "010b5685cb5f4513873f0638db53de90a95d802f4a230eee7eee50395394d790"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_test_arg_reductions.assert_eq_dfunc_a2_0_sp": {"doc_hash": "10e47ce7447e2d8b1c535950e08249c97dbda9979404f99202876a79adee760c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_unknown_chunksize_test_arg_reductions_unknown_single_chunksize.None_1": {"doc_hash": "8f2a166bb301ad0e0cbb84e3102545aa5b55f18974d3397eddbae6bc48d92a68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_moment_test_moment.None_7": {"doc_hash": "8fe3d6e73906859b89d9580ab4eb29da978c6c271a7bc39047d8774199103b9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_negative_axes_test_reductions_with_negative_axes.assert_eq_a_sum_axis_0_": {"doc_hash": "30ced6dfd7c65bc02a31e0439d65b7b6af0e57414f42419e21fac324f36a3d25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_test_nan.assert_eq_np_nanprod_x_": {"doc_hash": "1f3939d55824268ab985366c8e8cfd82429623a0532e2d933038c4e526e607ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_0d_array_test_reduction_on_scalar.assert_x_x_all_": {"doc_hash": "ff3b34887ec3aad2c1ad361a5de9ea9491c0bf4a0e9c6d536f9e87cbb1839f4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_empty_array_assert_max_deps.if_eq_.else_.assert_max_map_len_depen": {"doc_hash": "6728a53a209f4b9373f451f150b1a90a47fe4621bf03e64054078e21906e1fc6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_depth_test_tree_reduce_depth.None_26": {"doc_hash": "8ef0527b00790c4c58b8f99fd56619220f2774d11b695a7ef522782ef5390121"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_set_options_test_array_reduction_out.assert_eq_x_func_np_ones": {"doc_hash": "48cdb35d4af87a9cf8782ce95302b72576c226985170daedb3a6272a7ed40a4a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_array_cumreduction_axis_test_array_cumreduction_out.assert_eq_x_func_np_ones": {"doc_hash": "25cd41c87e7f80c2d9d8d767a3833167f09a406275a7a3dbad77a532cecbc230"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk1_test_topk_argtopk1.None_1.daskfunc_b_k_axis_3_s": {"doc_hash": "1fbf579c889ce86e0bc4e8a39967b00b770e744cc5f3a40d767d96b852b9c85b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk2_test_topk_argtopk2.None_1": {"doc_hash": "9c87f5102d38d5aa13881ccb6b069ed7d14d5b8f896502e3aea59092948bce83"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk3_test_topk_argtopk3.assert_eq_": {"doc_hash": "f2f88084139648e21b78a5c9592b1c7a7bc08ef757dab22ef6d9a9a3a291e1a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_regres_3940_test_regres_3940.if_func_not_in_da_cumsum.None_1": {"doc_hash": "9057faaeebd4ed3c6208bd9a7db534d2a7006802e02e0a53affbff29fc414676"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_trace_test_trace.None_13": {"doc_hash": "99e2c39bc9c0dccb09e79af22390621fc071d8a3dd1e0d342c566af9e0aa0d7e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_expand_tuple_test_expand_tuple.assert_expand_tuple_7_4": {"doc_hash": "ba0c55faf016f38ba39d407876842a8e398d48581d1cdc6725b0fc9c323d483e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_contract_tuple_test_contract_tuple.None_3": {"doc_hash": "e8e6eec6fa53b866b400d0fc13951161da8076b90593e14f28af7d784537fd5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_array_return_type_test_atleast_nd_no_args.assert_np_r_n_da_r_n": {"doc_hash": "f5302e12e56eac249da0b750a7ced31ee03361268135afc3c4adf081f70e418d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_one_arg_test_atleast_nd_one_arg.assert_eq_np_r_da_r_": {"doc_hash": "f95a7b42534189df7313d292c303cacb8d6789a89f071090cc29225ede4d7d16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_two_args_test_atleast_nd_two_args.for_np_r_da_r_in_zip_np_.assert_eq_np_r_da_r_": {"doc_hash": "94b1b17e2d80d5880f7472ba65e443d77010163bbba680f1f091d1039313396f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_test_transpose.None_1.d_transpose_1_2_": {"doc_hash": "2cdec967647dd6c8df9d68c1502a0fbb04ae6f6f15a6426b0c570c9aa8be9dcb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_negative_axes_test_transpose_skip_when_possible.assert_x_transpose_3_": {"doc_hash": "b6489591e974a6d6626b7e312504161483c3db07ba491905601b12a0d70aae01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_swapaxes_test_swapaxes.None_1": {"doc_hash": "4f4f900a3d54a7a59de1b4a408b7bc9c6456b294f8dfd5b59d4596eba53db7ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_test_moveaxis_rollaxis.for_axis1_in_range_x_ndi.for_axis2_in_range_x_ndi.assert_eq_np_func_x_axis": {"doc_hash": "14ec6df936e4e0b57c264c8f2d46fc131af111473d416441e84a2b9248da808e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_keyword_test_moveaxis_rollaxis_numpy_api.assert_eq_result_np_roll": {"doc_hash": "44586488d36359f7d7c071c198a49ed83a1947303555111608b6dd35efd35972"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_flip_test_flip.try_.else_.assert_eq_np_r_da_r_": {"doc_hash": "a4bb9daa7aa13048fa1dadb313329e11289ee7e32028f75a54b41924da480243"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_matmul_test_matmul.for_d1_d2_in_itertools_p.if_x_ndim_0_or_y_ndim_.else_.assert_eq_expected_da_ma": {"doc_hash": "c61a9f7ce18f052b8a87dab83b7b13856f74aa5afc0a2a3eff7ddcbfb7704ac8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_test_tensordot.with_pytest_warns_da_Perf.assert_not_same_keys_da_t": {"doc_hash": "b84cc15f622d7b6e45b7d967f3fafb1c53466ff667e47c56ebef4dd020f0f33f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_2_test_tensordot_2.assert_eq_da_tensordot_y_": {"doc_hash": "6660beabe967e580a1e09452348173eee95ba389b3272b9ec20d2d45fac9c8f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_neq2_test_tensordot_double_contraction_neq2.assert_eq_da_tensordot_y_": {"doc_hash": "0fa656ccef42f00e30e296a36c58fff56f0688bdcecd51d696e7deb236e03f17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_ngt2_test_tensordot_double_contraction_ngt2.None_1": {"doc_hash": "21bddf8f96cc1feb7fe2496edbd22f27bfc4c92d17f490621b3206aae0775505"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_more_than_26_dims_test_dot_method.assert_eq_a_dot_b_x_dot": {"doc_hash": "b467ecc4046b2bd235da36959de381ba957e06b55f1a1e5b51ff273e9eda8169"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vdot_test_vdot.assert_eq_da_vdot_a_b_": {"doc_hash": "fda3815d5e4300597812014708c76ee5499ae60d4201feaee3df3552c759f0c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_outer_test_outer.assert_eq_np_outer_y_x_": {"doc_hash": "c00a44bd5ec3adb61e3af521bed02c37dc4f4791e0f1b96420bf001ba35eb212"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_along_axis_test_apply_along_axis.assert_eq_": {"doc_hash": "6e623970cf5af48b458379f2781464879af45a7c7875a1ab1f12631100f8d466"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_over_axes_test_apply_over_axes.assert_eq_da_apply_over_a": {"doc_hash": "2c778889886b66f0bcbb8eb55c79ab05ec356c2cf93177808a5b71643876b5f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ptp_test_ptp.assert_eq_da_ptp_d_axis_": {"doc_hash": "338c8c04f811b4a8cff7a31eb2457f6093cf5da91be4bc21ee172228735aec14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_test_diff.assert_eq_da_diff_a_n_a": {"doc_hash": "cc63cb213b2e5192966e77c364db82265230830373d3cc28f195125d1fc24b5e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_gradient_test_gradient.if_isinstance_axis_Numbe.else_.assert_eq_": {"doc_hash": "8ad980f388adfa1a6c3fa1a955e8d96529b53998283e14dcf371d8c98af7ebda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_with_weights_test_bincount_unspecified_minlength._shape_is_nan_so_must": {"doc_hash": "7f239f8b53d11d9270136a4ad3c82e4ee2c110cf7b306768ebe504ebbe3a0559"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_digitize_test_digitize.for_chunks_in_10_10_.for_right_in_False_True.assert_eq_": {"doc_hash": "b5abaa280b7230d3c1291edeecaaa881237021e9e11f34e17133646914954a9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_alternative_bins_range_test_histogram_return_type.assert_eq_da_histogram_v_": {"doc_hash": "633ad26522eb80517fb259fa1a3f313c312eafde65a9113c8790e3c85328ac84"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_extra_args_and_shapes_test_histogram_extra_args_and_shapes.for_v_bins_w_in_data_.None_2": {"doc_hash": "e1832d0b0da34e81a9f2dfad780e2a6ed3357225f439c84323e9a88c34c00494"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_normed_deprecation_test_histogram_bin_range_raises.assert_bins_in_err_msg_": {"doc_hash": "0810e66728741354b844f282d672123160b6f1b382693ee3986dbc48ea721110"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_range_test_histogram_delayed_range.assert_eq_bins_d_bins_": {"doc_hash": "f92ca49ef74b800bc7640f73d58e87ffb45e3bf5cae6c76b8196b9d930642698"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_corrcoef_test_round.assert_eq_d_round_2_da_": {"doc_hash": "c245c313fcd1f6d0135c194cf3eb4d3e25741ae19642d6edc028c038638998c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_kwargs_test_unique_kwargs.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_": {"doc_hash": "2e99360f19aa61e17638fdae111e799e2dd5fa4c589292551b8e3a3d7ac30715"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_rand_test_unique_rand.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_": {"doc_hash": "b8375a0258c7e5cc2f30d568b3bb7af060f33f38dea1ced6b063602b65331032"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_rand_test_isin_rand.assert_eq_r_a_r_d_": {"doc_hash": "9dfb7d9f5dd389a97e26019dfb0ac060f970ae6c13ffadf0a2367d5051cdde75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_assume_unique__maybe_len.try_.except_TypeError_.return.0": {"doc_hash": "8d8180dea55f0b804536c55434e52c47ebf37e18f3543930684f5f4ff88ebc64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_squeeze_test_squeeze.assert_d_s_chunks_exp_": {"doc_hash": "3e0643927c7ef95f915ce03bb3a7decc0b21863e4666ae58a9bdda8ea501acfb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vstack_test_hstack.assert_eq_np_hstack_x_y": {"doc_hash": "df334cad7986502494c75501f7247ef7677d8bc70e10a09113ef04b0ac8d939e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_dstack_test_dstack.assert_eq_np_dstack_x_y": {"doc_hash": "39d35a5d277efba219f80299a3e62c3fc195597ede553e5112d93ee205308534"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_stack_unknown_chunk_sizes_test_stack_unknown_chunk_sizes.assert_eq_np_stacked_dsk": {"doc_hash": "28be35f76de26c6ccae181fd62e44b4007a8511b6b69cb35c1636e98c61f93b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_test_take.assert_same_keys_da_take_": {"doc_hash": "2da290f9036e73c20a023451be41d8a5fcd7908fe11dd0b7b54225b960c0b8a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_dask_from_numpy_test_take_dask_from_numpy.assert_eq_z_np_array_2_": {"doc_hash": "89e522d72d4dc63d5f6641e803e46c8bb81ec2e18ea196212d946405cbe2bbcd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_compress_test_compress.None_2.da_compress_True_Fal": {"doc_hash": "99c2aa5fbe2c25e75c9a3ff87efca6a84aa5750e37f0578bd94fd1c7632a7115"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_extract_test_extract.for_c_dc_in_c1_c1_.if_isinstance_dc_da_Arra.assert_np_isnan_res_chunk": {"doc_hash": "4acaa4dacbff074ad0485879eff1b7b69cab4dfdfdc64c53ce801bd436913f8b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isnull_test_isclose.assert_eq_da_isclose_a_b": {"doc_hash": "72e521c8776ad8b7a2fee6a94c8086cfa9871bada9e2b4a25c0f8a673eeba523"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_allclose_test_allclose.assert_eq_np_array_n_r_": {"doc_hash": "78b94c0d16a58ba90dda863c4fbb1511a05b70175e5a33e2ced2c0f8078260a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_choose_test_choose.None_3": {"doc_hash": "137f4f9aea513c0369c5702947b5636747464a071d1f5c65a344021869115d7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_test_piecewise.assert_eq_": {"doc_hash": "a528dd24f0281743d6178294708e100f05e4c39fbe61b720308fdd89436802f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_otherwise_test_piecewise_otherwise.assert_eq_": {"doc_hash": "9b4f78b81259d861bd4a8faa16b7a53db9102f2347a3ad37fccf3a0604a31275"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_argwhere_test_argwhere_str.assert_eq_d_nz_x_nz_": {"doc_hash": "b2d0ffd7040dd9047c73d3afd94a227d088117bbf42377ebaf2be470ff5b0c37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_test_where.for_c1_c2_in_.for_b1_b2_in_0_0_.assert_eq_w1_w2_": {"doc_hash": "5a2303828733af5c3dde03344987a1af3b7971abbad2c5811bd8f702d8e1a420"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_scalar_dtype_test_where_scalar_dtype.assert_eq_w3_w4_": {"doc_hash": "6e0132e1b2e3d686dd207a4cb10a439c0e7210ae2941f97f8787b24dbeaf9b00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_bool_optimization_test_where_bool_optimization.for_c_in_True_False_np.assert_w1_is_ex_w1": {"doc_hash": "0a356516c0047e9e322f4d4969e659bd013524c5e0868de92dd3747a1315fc64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_nonzero_test_where_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_w_.assert_eq_d_w_i_x_w_i_": {"doc_hash": "ea3083acc2ed229dd8d635c5f2d682bd62c2e9b39702e16086b639d3d2ccc882"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_incorrect_args_test_count_nonzero.for_shape_chunks_in_0_.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_": {"doc_hash": "19331d3752b316f71d3e26f18167417b16f1e72eda15bb89e604418425042c6a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_axis_test_count_nonzero_obj.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_": {"doc_hash": "5d956f167ebf2335d01fe1ae718fcfb175c643bf7854a494ed3c35b9d1726ca1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_obj_axis_test_count_nonzero_obj_axis.if_d_c_shape_tuple_.else_.assert_eq_x_c_astype_np_i": {"doc_hash": "98649f8c480c4f4fa585d1ad391a99c6cf17dadb18dc86cab795c7dd963cc283"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_str_test_flatnonzero.for_shape_chunks_in_0_.assert_eq_d_fnz_x_fnz_": {"doc_hash": "9d25753dd800f8c85963f44a167ad88eed0f33bc99b963d3484fb524260d3f5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_test_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_nz_.assert_eq_d_nz_i_x_nz_i": {"doc_hash": "e08068857f7bbb40aa7bc5343f6adf681155a61da5b62aeb4223d39043046fca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unravel_index_test_unravel_index.for_nindices_shape_orde.assert_eq_darr_vindex_d_i": {"doc_hash": "8233fd5c265acbd762db5b13d5f3a6366f9cb52333502a14e2eb42969e0f62de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_test_coarsen.None_2": {"doc_hash": "d3ed8a61d8121c07616d9a001a6b76c6e11459fe4e7b53494abbd1aec17f5c8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_insert_test_insert.None_2.da_insert_a_3_1_axi": {"doc_hash": "598580c40bdbfbc8bc49fcda760e19a09ca7b4abae771a0a8f7421834b8fb3bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py__numpy_and_dask_inputs__numpy_and_dask_inputs.return.np_inputs_da_inputs": {"doc_hash": "9e33dc7ee545e061e0684c6c62759e6b460f975c34edc58b79b1071e8a85a2cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_optimize_test_einsum_optimize.None_1": {"doc_hash": "3a85d5a307f734e56c7a9b17252a19382ba5266fec79cf8287cc1524803145c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_order_test_einsum_order.assert_eq_": {"doc_hash": "69444844d10697a61e69d73378169b9a79538a5885eb3257b5bdebd9266ead50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_casting_test_einsum_casting.assert_eq_": {"doc_hash": "546464071d8fd1bcc99bde4af639f70c78d37018c5914854d876e5045e2dd1a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_split_every_test_einsum_invalid_args.with_pytest_raises_TypeEr.da_einsum_a_da_inputs": {"doc_hash": "317e53317b1e0792b5df2338fe086f832f53549c25244ee0e3d9a663cfa1c0e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction_test_einsum_broadcasting_contraction.assert_eq_np_res_mul_res": {"doc_hash": "dd5064baa72948dd4d899873b330a8c7ba1ef65a1eaad53cd187e8209a7d8764"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction2_test_einsum_broadcasting_contraction2.assert_eq_np_res_mul_res": {"doc_hash": "c73352a71c6a5be85b9a2c3386adc26c75f4a9c441a7bdfe41a094710cf50774"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction3_test_einsum_broadcasting_contraction3.assert_eq_np_res_da_res_": {"doc_hash": "dc919cb7146c625adfe31a412293242949d28a184bcb2ad98416a4f02cb173ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d_test_slice_1d._x_1_8_1_": {"doc_hash": "45a736b2c46ae225cf43794670689eeba83f0eac3685b0fff91bd20e16dba31c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d.expected_14_test_slice_1d.None_14": {"doc_hash": "67ed2e3a4defa877b60c73ba423b3d33a42fef8b2b737806dcfab57803cd2c9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_singleton_value_on_boundary_test_slice_array_1d.None_7": {"doc_hash": "b36723ac891eba7cd18d1dae465d7b71a5c3db7f9c582cbaae3095b1a33fd2ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_array_2d_test_slice_array_2d.None_3": {"doc_hash": "1f42c329b15f4763f7d44756fd9949ae3c2b3190cd39876e68bf26b9498845c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_optimizations_test_slicing_with_singleton_indices.assert_expected_result": {"doc_hash": "8497d4bda28609ba854263cdab1c558dfe27016904b9f93d7915139baf8fe3a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_newaxis_test_slicing_with_newaxis.assert_chunks_3_": {"doc_hash": "87f02a3f365f68f5bfb98304a23a7403b6865b042ace131bb5c89661a3091f7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_test_take.None_3": {"doc_hash": "92464a2a2d770dfe0fa1db1e7ba2ebe9a5660e8a60a7881e6e27734aeddf6c34"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_sorted_test_take_sorted.assert_chunks_20_20": {"doc_hash": "41934223a9354aa573ea4dd3c48998b31eb4b89c571315ef06ad314cce2ba6ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_chunks_test_slicing_chunks.None_5": {"doc_hash": "d0bb37589cb916c765fc5c0550149476c52e1a9ee772c98f874fd6e600300776"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_numpy_arrays_test_slicing_with_numpy_arrays.None_1": {"doc_hash": "7d2bd780b5a0918472a724bb92a954f995152cd3346d44e8a63af22038562edf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_and_chunks_test_slicing_identities.None_9": {"doc_hash": "b23f2b5926a33b154cac8789b23242099e2dcb1d92622482aa04f6a8f03f7ded"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_stop_0_ReturnItem.__getitem__.return.key": {"doc_hash": "efe40fa22ff883a25bf753ef93afa745acaad2ca33a79c2accd566d7c723e71d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_exhaustively_test_slicing_exhaustively.for_i_in_first_indexers_.for_j_in_second_indexers_.assert_eq_x_i_j_a_i_j": {"doc_hash": "f700ce9e07447c16ad6c8a3e228f18e6c346a932f785cb7bdfa483196596092d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_negative_step_flops_keys_test_slicing_with_negative_step_flops_keys.assert_y_dask_y_name_1_": {"doc_hash": "d08c1521e0696f82a477d8a50175a1d4d5f82f8c3244d1f4b9a532821a338ea3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_slice_test_multiple_list_slicing.assert_eq_x_0_1_2_": {"doc_hash": "4c13dfb3be0d85a2c6a85d388bc0f4ca21158f1a3d23dea4c21c86fd0e521889"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_list_slicing_test_boolean_list_slicing.assert_eq_da_asarray_0_": {"doc_hash": "22ff62d496f627bf064192d671a5661be595dddbc96f11a3bf6b66e850ae51ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_numpy_array_slicing_test_boolean_numpy_array_slicing.assert_eq_da_asarray_0_": {"doc_hash": "5e9fbdc6e9d41a9b2e5a961c84bf12b2f5ecea53c905f27bf5275e1a0e501fbd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_list_test_new_blockdim.assert_new_blockdim_20_": {"doc_hash": "c1b86da6d3464590a055fccb45f231a127503194572caaa0a1a7095a089e1f87"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_test_slicing_consistent_names.assert_same_keys_a_0_1_": {"doc_hash": "e2829a9fb32be077dff88bf5b94cc5bb6b57143327ae3adca5fa5ede5d933932"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_after_normalization_test_sanitize_index.None_1": {"doc_hash": "d0a3da9596337f496160a32bf5751584201cdf7d9769ac2f13c75737241fa4ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_uneven_blockdims_test_uneven_blockdims.None_5": {"doc_hash": "6924c971f3bf2cdfa5bbcaa50bb4f51417c804e437dc1b512f51051a3ca34411"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_oob_check_test_index_with_int_dask_array.assert_eq_x_T_idx_ex": {"doc_hash": "6d1dd22c78b51c5880aa0312561fd7912e60eae4dc7c37afe9b041966faead14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_0d_test_index_with_int_dask_array_0d.assert_eq_x_idx0_x_": {"doc_hash": "aef3d7ac7ebc06135b9e51f7f973e7284112cb512b775dc0c608d2c9257c3fe0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_nanchunks_test_index_with_int_dask_array_nanchunks.None_1": {"doc_hash": "5ef2206c2cdb7e71a96276bedcbd5e43adcda127c4feb2cf1deb34c4e65c8941"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_negindex_test_index_with_int_dask_array_indexerror.None_1.a_idx_compute_": {"doc_hash": "eda17cbdca5f0abb9d7d5b8e834265fe85d2acecdd907dece0a5bfb47c7e92b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_dtypes_test_index_with_int_dask_array_nocompute.with_pytest_raises_NotImp.result_compute_": {"doc_hash": "4ec82051f66a340d048935620178ba07fa56ba91a4433a672f57f01d5ec09dcf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_test_index_with_bool_dask_array.for_index_in_ind_slice.assert_eq_x_x_index_d_i": {"doc_hash": "1d07550f8f08710fa7eebc544831e178e713a74c7b78cdbe01c27d2cfa689ec3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_2_test_index_with_bool_dask_array_2.for_i_in_range_x_ndim_.assert_eq_x_tuple_index3_": {"doc_hash": "fb234bca913a67deebe84b10a15147312ac454d73c0cf8979c12038df2c2d7b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_cull_test_negative_list_slicing.assert_eq_dx_4_1_x_": {"doc_hash": "54b601f88ec791139bd45c0abb28944c9fb123107192ef6f24b8abe0b9e4eafa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_permit_oob_slices_test_take_semi_sorted.assert_y_chunks_5_5": {"doc_hash": "a97f7963782db12ca2f458f287f56b07ba14c747e0abd8e6e2e2f5a8b79eec45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_plan_test_slicing_plan.for_i_x_j_y_in_zip.assert_x_y_all_": {"doc_hash": "6a348f56062ccdfe6e14397cd23c1cd1a8e86c425328703ff3bad1857c657902"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_pathological_unsorted_slicing_test_pathological_unsorted_slicing.assert_out_of_order_in_": {"doc_hash": "a25d67987892dd4fd8ad863ba14ad92aee85e6250b5683167baab63e4fbacfbf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_setitem_with_different_chunks_preserves_shape_test_setitem_with_different_chunks_preserves_shape.assert_x_shape_result_": {"doc_hash": "4de263d043a46efc2cfba63a31a9909311a98741ece8a59a2e0d91e1a18a60d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh3579_test_make_blockwise_sorted_slice.None_1": {"doc_hash": "6f0048cf89634cb33210a910663d735762b651496cc9cf3784bf2aa35da52fe6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_shuffle_slice_test_shuffle_slice.assert_eq_a_b_": {"doc_hash": "0faf855ce10466aea0d37f526420456e6e9ce0170652105e588b3233f792b109"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_basic_test_basic.if_yy_shape_.if_not_isinstance_zz_spa._mostly_dense": {"doc_hash": "5a1de8edeee909930af8ac3909746b84b49f3ccfd7b8af527be28820f21525d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_tensordot_test_tensordot.assert_eq_": {"doc_hash": "e33c71a3ec7cc055e21b51fbe0f957afa83fe1a5e1a1fcddf9562d32e9b86b53"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_": {"doc_hash": "95a74bbda6223e3d0da2d7403a22dc12c576a796376b0b4cde9ca480e9c75330"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_": {"doc_hash": "609129fca2e07f0f70da65dec3b1224c232553e4d08b9097ac6213a3eff8e8bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_output_type_test_mixed_output_type.assert_zz_nnz_y_comput": {"doc_hash": "8daffa956ea43fb8e11efcfdaa1c1deab53bc6af7e7d4336fff9816c9d3837b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_html_repr_": {"doc_hash": "0c69a2398d0e04c2b00f14380e43b8bae2862f591f406d54b033aea0d2e09703"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_bias_raises_test_one.assert_allclose_result_co": {"doc_hash": "44d23cf1e5937ae824ed95baf39179a4c85f3ac790088bd630f3b547f2a0c161"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_two_test_two._assert_dask_compute_re": {"doc_hash": "370be3d2349ff6cf8044a9e847b617584bea14961fe606928ae355304127a76d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_moments_test_anova.assert_allclose_result_co": {"doc_hash": "f8d134c7f99354c7824f6fb6cf11d1c20ee4f7d4e80051ff38a880c9cf3f897f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_test_errors.assert_unknown_chunk_siz": {"doc_hash": "4b11595eac375a05a27af1c2d575f83a4f0d45373f23432342f19adb0daa0426"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_size_units_test_repr_html_size_units.parses_x__repr_html__": {"doc_hash": "28b1e886cca512ed594d04aa535653bff49a9b359a0bc9ab7cbb65ebb0a7a9d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_testing.py_sys_": {"doc_hash": "bf2461999d0d07ebd986c433010eecae07e58a78c75e7d7122f20fdafc6b78bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_pickle_unary_ufuncs._": {"doc_hash": "a42998c634ccd1f61db4bc5a1836612d0098a55dd4b0e7c4750e7122ab236b93"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_unary_ufunc_test_unary_ufunc.None_3.assert_eq_dafunc_arr_np": {"doc_hash": "5a58ce652c040106b3be97a83592be60b17e8b245562e30552a3fd1a5abdf784"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_binary_ufunc_test_binary_ufunc.None_1.assert_eq_dafunc_10_arr1": {"doc_hash": "310d66793b3c1e713cc13c94a8301c50a743fb027172dfd8e206045ba38e1b41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_outer_test_ufunc_outer.None_2.da_sin_outer_darr1_darr2": {"doc_hash": "c22a1b226ecbdc561ea4d4965f5f286b13a65da0c3147ba9247f11e6ae9e1b0e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_complex_test_complex.for_darr_arr_in_dacomp.assert_eq_dafunc_arr_np": {"doc_hash": "1fe36a3324aad8d712d949516394eff07c19d02977e5d12249d81fd9221c4b49"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_2results_test_ufunc_2results.None_5": {"doc_hash": "7c36488e259cea07d004831eef07db610f5364e4f1dd5fdd26989e2e13c324bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_clip_test_clip.assert_eq_x_clip_min_1_m": {"doc_hash": "a0bb9c17818baffbcac50efad2e96cdb352bb7d351c8c1f5cf93c808b879268f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_angle_test_angle.assert_eq_da_angle_comp_": {"doc_hash": "f7c68ca77b74e31b6ef0ad433d78725ec0917b90f894c6d2916baa1bed8b2ff7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_issignedinf_test_non_ufunc_others.assert_eq_dafunc_darr_n": {"doc_hash": "ccd37c2060d40efb00baad0f5a38731a9a878c5c1866af3b1f7ee7a8307d40e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_test_frompyfunc.with_pytest_raises_NotImp.da_frompyfunc_lambda_x_y": {"doc_hash": "064ece928acf5c50fd25a5cf4e6ca7a9785cf1899b12c25a483b1a3143d63ae1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_wrapper_test_frompyfunc_wrapper.assert_tokenize_da_frompy": {"doc_hash": "e6e7769a21c1543c51072a63eb80f8e618448689dcf1f44f60d490151f893ad6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_array_ufunc_test_out_shape_mismatch.with_pytest_raises_ValueE.assert_np_log_x_out_y_": {"doc_hash": "bfe146f8fc229e85fe339828ee3ddad4089311e584b4fe9376696312a5482c9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_pytest_test_can_make_really_big_array_of_ones.ones_shape_1000000_1000": {"doc_hash": "a51177cd5469b307c5b4e2b664473c949d9b30b76d404a017ac9452f4c4e73bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_test_wrap_consistent_names_": {"doc_hash": "df994849e8dcfbccf94fc5fa8c114e311f0e531973f4be04f32fa0235a6c8a7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_core_from_tiledb.return.core_from_array_tdb_chun": {"doc_hash": "fc87d37a853829de6fc9ad351564d5b3f059e4aaaf999bf3e48d5579feba41e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_to_tiledb_": {"doc_hash": "8a0c1e406016f16541767487bc216ef1b7efbed54c2647777ab765cbe822d90c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_wrap_elemwise_wrap_elemwise.return.derived_from_source_wrap": {"doc_hash": "9e53f7e007a7995669edee4b82679d80dac8ca51bd5441cd0de4e7e7e074f0b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_da_frompyfunc_da_frompyfunc.__dir__.return.list_o_": {"doc_hash": "4e96b2b588a793f33cb1937dee99eb6d839b2e480eaff324e5f5553fb9071395"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frompyfunc_ufunc.__repr__.return.repr_self__ufunc_": {"doc_hash": "7775a6bd0d5cd0c94cf1ddd6313491c6e857303f76880480ecad000f0bc77708"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.__call___ufunc.__call__.if_len_dsks_0_.else_.return.self__ufunc_args_kwar": {"doc_hash": "29bc889effcccd1bc832d50e6a7b8ef416507052430bd5c542c876b88df75b36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.outer_ufunc.outer.return.blockwise_": {"doc_hash": "f34351505b5dd00077e265f52219d768ff46f5f5910c71eedf99cf87e4fb6f5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py__ufuncs_copied_from_thi_degrees.ufunc_np_degrees_": {"doc_hash": "1c1f97a566358b5dd319d1974838898f0f8f2c4e3274de4b212501da60f475c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_radians_angle.return.np_angle_x_deg_deg_": {"doc_hash": "8eaa6df4a796b849c9dc929d67fe16cfbb6a8956195641e5ed3aba40fdc13ab2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frexp_frexp.return.L_R": {"doc_hash": "dba005e4f983e49b251fc34ec625bba7016f3a2f28fc2e051b7002483410a808"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_modf_": {"doc_hash": "13836b021eed2f24e9640729b0e2107bb0f90de332326195e4d33677b8ffebc4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_compute_meta_compute_meta.with_np_errstate_all_ign.return.meta": {"doc_hash": "e318aed6be3d3915cae709195bffd465b9e51177c4ebfa21641738bf2704803c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_allclose_allclose.return._a_b_all_": {"doc_hash": "2c174f65913381302602963a617252a94a0b4a8c39f54e63eb0fc363908f8612"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_same_keys_assert_eq_shape.for_aa_bb_in_zip_a_b_.if_math_isnan_aa_or_math.else_.assert_aa_bb": {"doc_hash": "e72689d5b36da3cf63b6fa6fa5c11583b3cd34c28153499af12ce339d82b4a4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__get_dt_meta_computed__get_dt_meta_computed.return.x_adt_x_meta_x_compute": {"doc_hash": "66142e90fe46771a1dab021b5632f78d2e1dc3494b92c88acca10e14f50b03ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_validate_axis_validate_axis.return.axis": {"doc_hash": "f0c406fc3d040a8e5202cd06d099f368de619329c0dc1c04898faea7f90432af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_from_functools_import_par__parse_wrap_args.return._": {"doc_hash": "0b6bccabdaf812cfd8c3f8446d5e32b34715ffdb5804c5be554e7379413edbf7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_like_wrap_func_like.return.Array_dsk_name_chunks_": {"doc_hash": "faac998a73314fea65dbf98e63838cdf6ba33e2fd1b747ed148fe5ec3be18358"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/__init__.py_try__": {"doc_hash": "b7a3b4f179723f7501b0dfaeb5f3ee0d5551a56c7e1bca9185105f56a6bf9c0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_io_read_bytes.return.fo_read_size_": {"doc_hash": "ecb8a51d1a9ae9d07233e7f60b0614d349fa0d5b8f323c697abc99d976338450"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_header_open_head.return.head_size": {"doc_hash": "67d63e75f91fdc1f0b1eb4becee422b1a4d36770815b58bf7f4a9a76989f3711"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_avro_read_avro.if_blocksize_is_not_None_.else_.return.from_delayed_chunks_": {"doc_hash": "497a3e3275b1c56d0df0f64bb61a20ab456a1c168fa6fff5c95bf5f59e42612d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py__verify_schema_": {"doc_hash": "d090e20a48979bc698f864f7efed6d50f78d43f7ea2da4f49194a45b58627826"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/chunk.py_barrier_": {"doc_hash": "e486242b57f0bc415a3bfe2edb2bc332326b929b4c39bd7cccaf08831b825b55"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_io_no_result.type_": {"doc_hash": "dec1c5b0067e95ea9226747a5b382e44a4288798a0eefdf7d95e005feddc9677"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_inline_singleton_lists.return.dsk": {"doc_hash": "43b3657ccb28850ae834cf485d31735331342dd4068720148fa6cea62db638c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_optimize__to_textfiles_chunk.with_lazy_file_as_f_.if_last_endline_.f_write_endline_": {"doc_hash": "b568a69a959545837aca86031bed38295f3b1b6f344abc3ec0349d8d973fd04b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_to_textfiles_to_textfiles.if_compute_.else_.return.out_to_delayed_": {"doc_hash": "824e34a38b59f51ca38f7d587d86da8e1623ca71dc1f02a6da3fdbc7880ad89c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_finalize_StringAccessor.__getattr__.try_.except_AttributeError_.if_key_in_dir_str_.else_.raise": {"doc_hash": "4608b0f4b263628d57c16baf12d29341f0472a9ed5af9b2e86d5d28deab09876"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_StringAccessor.match_robust_wraps.return._": {"doc_hash": "4424e3d952861bf397511ee7abba7da6cabd87272cd652f3e5c6d3880ab54a81"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item_Item.__int__.__float__.__complex__.__bool__.DaskMethodsMixin_compute": {"doc_hash": "123f43866f8f0d5f21d1ff8dc01268ad8a03a8331d71f167a4ab39ab9122fb34"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag_Bag.str.property_fget_StringAcces": {"doc_hash": "20356871be469a3a372c72e7d1cc3fd714909980893bc5498474f02344330c8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_Bag.map.return.bag_map_func_self_args": {"doc_hash": "d351daa549a3c5eccb2508cce2b06e2c9b5cafd81d4713de7784c644efe24e39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.starmap_Bag.starmap.return.type_self_graph_name_s": {"doc_hash": "454c795ef6d0abcb629ac3c4855ed69a3ab78a6267d87bfff8b3b226878b1ad6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag._args_Bag.filter.return.type_self_graph_name_s": {"doc_hash": "2ac732e1e54674cfd50e3eadf308a420655ddeea2e0009b41008953534368cdd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.random_sample_Bag.random_sample.return.type_self_graph_name_s": {"doc_hash": "a0bbea49d8f8a5d3a42df3ea4ec0d679b4979428d13bb7f05cc0cc2b70d0c77e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.remove_Bag.remove.return.type_self_graph_name_s": {"doc_hash": "6732e708180c4ca11c652550d1a562cf012d8e761ffbe7f8affb20c93bebf756"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_partitions_Bag.map_partitions.return.map_partitions_func_self": {"doc_hash": "3942f90971a958f20680920559f5de642ad36eb0cbe05322bab4d591299a57c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.pluck_Bag.pluck.return.type_self_graph_name_s": {"doc_hash": "98d122935b10773adb0edf93af88644eb88f3670134317a3f0241c75e32e55b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.unzip_Bag.unzip.return.tuple_self_pluck_i_for_i": {"doc_hash": "e1e02f77c25b63835129f681f7f76078735689ed97952b899bee988a343bcade"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_textfiles_Bag.to_avro.return.to_avro_": {"doc_hash": "d5ed9cce630e41c90950db36235dd2aabbdf857eb3a409cf2856734c65658bdf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.fold_Bag.fold.if_initial_is_not_no_defa.else_.return.self_reduction_": {"doc_hash": "24f4c1d0d37119a67aa454ba34fcad13f64c85e752c620dfd2141f302d3c83f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.frequencies_Bag.frequencies.return.result": {"doc_hash": "376ec959b5064a9296cf8920864ea0ff927e3aacdcde5c058173101ef33c94e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.topk_Bag.topk.return.self_reduction_": {"doc_hash": "53d38a13a96d9c52e0f173d910e05f74567228372b635e39a2e18b948de3c4d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.distinct_Bag.distinct.return.self_reduction_func_agg_": {"doc_hash": "8be5233f7212dbe28e592c98cd2ca96d000addb88c304138a5a77d24b16d3b63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.reduction_Bag.reduction.if_out_type_is_Item_.else_.return.Bag_graph_fmt_1_": {"doc_hash": "98cb199a6ec3ec23d1aee09a5f5301f3519ab07b5076ee3bd20482216f46e610"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.sum_Bag.std.return.self_var_ddof_ddof_apply": {"doc_hash": "004db4f8cb0613adff156aca7b663b9d538ce09efca48d5bdb022b12c2750bf6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.join_Bag.join.return.type_self_graph_name_s": {"doc_hash": "399abd6c453533836401905650ca22c7cf89a273a62bf683b5262c9db80c51de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.product_Bag.product.return.type_self_graph_name_n": {"doc_hash": "cf7c2a836a6e89b53f5c9c295ab194aea486037894badf1f5319bed3c08e7e37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby_Bag.foldby._Combined_reduction_and": {"doc_hash": "dfc1a799a39c166837cbc1d960fd9c4933d6400615cfa6f8c567d7f88831e2dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby.if_split_every_is_None__Bag.foldby.return.type_self_graph_e_1_": {"doc_hash": "5c01fa68e1e35fec087c9ac84c09da32c1413b94bf1181f4b62aed969dab3c5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.take_Bag.take.if_compute_.else_.return.b": {"doc_hash": "81f3d03c45f2dfe42d7668d7b11e3eeedcdd0659af7a4ff0a33802f51d6fbb94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.flatten_Bag.__iter__.return.iter_self_compute_": {"doc_hash": "5dfe28fdaced79e06224a7ed5aaa13d029c581f0980b0f7ca785dcaefaa54245"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.groupby_Bag.groupby.if_shuffle_disk_.else_.raise_NotImplementedError": {"doc_hash": "c925d1a1d904f86bdf45afcd754d609183b9cf8f56cde34fe8923bbf9e480471"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.repartition_Bag.repartition.if_npartitions_is_not_Non.elif_partition_size_is_no.return.repartition_size_self_pa": {"doc_hash": "af1a7f2fe11d87659416939ab21c01f1937a412fe1232ebb7567cb3da99463d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.accumulate_Bag.accumulate.return.Bag_graph_b_self_nparti": {"doc_hash": "81814dd2f56007f83c6ade17844877ad9cb8453d583e9512bc545bc37824d7aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_accumulate_part_collect.return.list_d_items_": {"doc_hash": "051850a6ecfb5d5b27da679d579bee925fef8d0f2f351932af342b659864e290"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_sequence_from_sequence.return.Bag_d_name_len_d_": {"doc_hash": "cdc09c4aea0c2b72400bb06c5450c79f22eb3ef2942af12f8e5bef8d4b4609dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_url_from_url.return.Bag_dsk_name_len_urls_": {"doc_hash": "59bcabc679748a9d805c885ae8a3ce2775c9c34932d7ec853d97cd92d05a1fca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_dictitems_reify.return.seq": {"doc_hash": "b3996376dce02250e1c39c45f1dd22f34667f5dab289b19355278d5c571c23a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_delayed_from_delayed.return.Bag_graph_name_len_valu": {"doc_hash": "fb6c3ab115471184528795bf62770ea6ec6351be9acc1fc64432ad3b265b2ad9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_chunk_distinct_merge_frequencies.return.out": {"doc_hash": "cdbb1050ad7c682f86d4af3e7807256ecafed2311ee5ec5d380f1aaed6023701"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_range_bag_range.return.Bag_dsk_name_npartition": {"doc_hash": "90a5ac551edddb1058e9adec296481f66faff4e443122b9eee1223cc15049746"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_zip_bag_zip.return.Bag_graph_name_npartiti": {"doc_hash": "564201d8614e408d15410be0bb3d1eda8a0f4a23bad12960a3bfef9b2cb0ea68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_chunk_map_chunk.return._MapChunk_f_iters_kwarg": {"doc_hash": "1e15d09daceb28e25cdd59b0dcf840cadd8966c13c6a94f952cbd01e39d13ea5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__MapChunk__MapChunk.check_all_iterators_consumed.if_len_self_iters_1_.for_i_in_self_iters_.try_.else_.raise_ValueError_msg_": {"doc_hash": "a6194206d15d7afa522515384bb3e8214ca55b2609c844efbde64a5f1e616e63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_starmap_chunk_unpack_scalar_dask_kwargs.return.kwargs2_dependencies": {"doc_hash": "569fdcfdea17cc23ab5e6c5119962009de352d129c1e6b14daef4a307cee3760"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map.build_iters_bag_map.return.return_type_graph_name_": {"doc_hash": "41232288ae16e954cdeaec4f945f98789c5d1759d2f47291bb8c0211658a2761"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_partitions_map_partitions.return.return_type_graph_name_": {"doc_hash": "e24311b5a4d35b7997aa2110537082db984e0f9dbafcdbd0e9585cb248ab4dd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__reduce_groupby_tasks.return.type_b_graph_name_len_": {"doc_hash": "af1be7cf73874266406bcc567f57112a31b9e58fd29d7e6ba013db403f46d7d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_groupby_disk_groupby_disk.return.type_b_graph_name_npar": {"doc_hash": "230478153c5a000a581a3bf692744ad9d4a0958c077c6c2df95e1cf2b83a70d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_empty_safe_apply_safe_take.return.r": {"doc_hash": "47f63aff398482c053151fdd5c4fe01d10f850478f0aeeb8bf3c0c8724e9719c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_sample_random_sample.for_i_in_x_.if_random_state_random_.yield_i": {"doc_hash": "f7e20d28b52949abca422ba4ebc4dc64646884b1d4d04d0e64e190b943fdea1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_state_data_python_random_state_data_python.return._": {"doc_hash": "f5fe0b96908ee250266ae2f0e9ac949c02638a4e7556e85c9734bc6a52ae64d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_split_to_dataframe.return.res_astype_dtypes_copy_F": {"doc_hash": "f0de6cc5a8338033478a4da9050b2f86ef28fdfb812ab107efcc4cd8dcd2499b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_npartitions_total_mem_usage.return.sizeof_partition_": {"doc_hash": "f7f96f9841c9e7beb2c20a217438db4981802fc76393246935b34c753d477df2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_size_repartition_size.return._repartition_from_boundar": {"doc_hash": "d46130febda80e8e89f808dd02b98de3daf80efb019cbacda7d7e412f562cabf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__split_partitions__split_partitions.return.Bag_graph_name_new_name_": {"doc_hash": "47bb0ce63ee58cdff9fe930d2a9ace45c490a46c4693aa75b7c04da8a7176730"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__repartition_from_boundaries_": {"doc_hash": "9648f945b92946e5a8d4e832dcd7cb6649fc802f0f461eb74c5eb37b230e4026"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_heapq_sample.return._sample_population_popula": {"doc_hash": "52dcbfd04280adebfc8ead5b1ed402f3420785a459c47a011aa65aeb4c3e4ffd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_choices__sample.return.population_reduction_": {"doc_hash": "97bb6f4e1259890f9c4f01e762748a61e3cd615571af1a82f891c6b0b74b4bea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_map_partitions__sample_map_partitions.return.sampled_lx": {"doc_hash": "77c21fa3cb2546b8d204e39cb0a1fbad7e098653b2530751e6db8f7c8d1e5cc3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_reduce_": {"doc_hash": "45f7cb4bc4b15313e2dd68158683ea670abf8821f9c5b62c4918191b06ac902b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_os_test_onefile_oneblock.assert_b_compute_exp": {"doc_hash": "c7d190fb8252a92ba51007f9e95c2c2f18d4bf9b80d901c2612c88239ed5233b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_oneblock_test_twofile_oneblock.assert_b_compute_exp": {"doc_hash": "b6185b3811c6d650cec902cdf8c4f736253044b4e89b57b49c38b44b66be6a01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_multiblock_test_twofile_multiblock.None_3": {"doc_hash": "c3f54e959c2264b7ea675f7564137350f738bc2538728895aeddf234b6456c4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_simple_test_roundtrip_simple.assert_b_compute_b2_": {"doc_hash": "c11bdc5a3f657f02520ea86b7bba4220e580133865bd2a7c65f5fb9a682a6e59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_test_roundtrip.assert_b_compute_b2_": {"doc_hash": "f01dd0bff9f67e79c3b5c5986c2053b6942d71217c885baa344483598dafb11c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_invalid_schema_": {"doc_hash": "22993c35870e4851a1977a78bce769886f1006fbe7fae2a922706216688bf318"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_map_test_bag_map.None_3.db_map_myadd_b_b_unequa": {"doc_hash": "aa7f0efce4c604c060aef70d385a3686e5ab97bfdcea83790e4e890012c99a89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_method_test_map_method.assert_b_map_myadd_b_sum": {"doc_hash": "760a546eed50a6bc57558a197178098ede44b92a3bb8d335eb4460d921a9c749"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_starmap_test_starmap.None_3": {"doc_hash": "66176e4187d0a899b67e42f452e5b4b1971daf32e9940cef64f66f8ec0c924a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_filter_test_repr.assert_from_sequence_in": {"doc_hash": "03a94cda05567cc5748c8bcb0869e9069fb50f371d35bdbf6b5aee87822a9f8a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_test_pluck.assert_b_pluck_1_0_na": {"doc_hash": "8800635d01d75b02d89f0f9911cf57b66b2a485923e65d05becb6d7e21beac34"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_with_default_test_unzip.assert_one_name_two_na": {"doc_hash": "95f430def4f29c6c4675d296e5135c9399dc043a76dc0f3df4a78e1f29946ec1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_test_fold.assert_set_e_fold_add_in": {"doc_hash": "3e1f41e0e5e8a2481030f04bf9c6f827c74182254c2745564c3039b71b328e7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_bag_test_distinct.assert_bag_filter_None_d": {"doc_hash": "79cac33404b0e2c11ea3511e5491e1ee7f9180bcfe6640f46b4da4803a1ac0ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_distinct_with_key_test_distinct_with_key.None_1": {"doc_hash": "a8a82100fce227a1042480f23fa58e247049f9d920894b41d5b8e69aca10403a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_test_frequencies.assert_eq_bag2_": {"doc_hash": "922aeeca8d7e383ef57ff76f3778618b929865ef3d156138668ce22f900eda47"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_sorted_test_topk.assert_b_topk_4_name_": {"doc_hash": "3cc7e44d37f213ef28dba3b0858d5353c4dff38d3b8cb9396fd247a02fcb356c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_non_callable_key_test_topk_with_non_callable_key.assert_b_topk_2_key_1_n": {"doc_hash": "3bb0002646517311bea362da8f368bd3101d8ea735b82c5b974c41ad9741b22a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_multiarg_lambda_test_reduction_names.None_3": {"doc_hash": "e62e2ce9f0a15c3dc728fa4c17f870fac057de875a666399dd988da21285911f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_tree_reductions_test_tree_reductions.assert_c_key_b_sum_k": {"doc_hash": "3e5dabcd765b0057812c758c81bff2b95d4145b622b94d9675226a81a42b0fb3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_aggregation_test_var.assert_float_b_var_": {"doc_hash": "2d80171c298ce3d3dbd6afe5afe74582948800d3a006bc7c2d642d58dac8c955"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_join_test_join.assert_c_name_b_join_o": {"doc_hash": "e4aed67a9cb69878dc71ab408d79c2de111d2a5b1aec5b27c67d69da29814784"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_test_foldby.None_3": {"doc_hash": "79cc570881a7c18c01fe4c7439afc14a212d77fdcd37cbf8aa48cad3a56cfb05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_tree_reduction_test_map_partitions.None_2": {"doc_hash": "7e6defc5ac92f37e8ffcffabeb07d78a7a91e09a18d0ae36536159f46c23f33e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_partitions_args_kwargs_test_map_partitions_args_kwargs.None_9": {"doc_hash": "eaf9d58ced3176aea90c9b39c6e1c3cd3f1bd54553f471f84058765f1a17fc91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_random_sample_size_test_random_sample_random_state.assert_list_b_list_c_": {"doc_hash": "6ad16f98f12d3e935deea1217eeb9e3e0ad8f80b71b8c52dd6ea792faec60673"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_lazify_task_test_lazify_task.assert_lazify_task_a_": {"doc_hash": "fe8f67007b2fcc8fc9fe6b11e946cce60c3ced538507d755a54defe37d6c4d2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_f_test_lazify.assert_lazify_a_b": {"doc_hash": "9eddee3f29bbcad645fe4e32e989273253c535e5ef1c626d47a06343299f13b0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_inline_singleton_lists_test_inline_singleton_lists.None_5": {"doc_hash": "3e06bb88fe519316fc933c506636d8d73a47de901209d40fe28a71495aec9707"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_rename_fused_keys_bag_test_rename_fused_keys_bag.assert_optimize_inp_c_": {"doc_hash": "1cd078f729830bd0aff519a1fc3525888375defca41260874a09dc6a00f546c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_test_take_npartitions.with_pytest_raises_ValueE.b_take_1_npartitions_5_": {"doc_hash": "0a36b7156d0ba67c5d68eb4d02aba82ecb1c029e72baf9299aee47d8575a7c9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_test_read_text_large.with_tmpfile_as_fn_.assert_list_b_list_d_": {"doc_hash": "64a51748a951370ac14da86965d88003060e611c9a9d6aa596b918a868526f19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_encoding_test_read_text_encoding.with_tmpfile_as_fn_.assert_list_b_list_d_": {"doc_hash": "dee38d89ba0e64664a4867582791d5790cb198bc287d139c1bb2f8358bfa36a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_gzip_test_read_text_large_gzip.with_tmpfile_gz_as_fn_.assert_join_c_compute_": {"doc_hash": "68174ffd71eb2f2913e99ae98b1a664daa9959a52084b26e965603b44f35f3b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_s3_test_from_s3.assert_c_npartitions_3": {"doc_hash": "dda62c7c7bca695926755ba5f4ab6015f247756add78ac1110ae9f2fcb9b424e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_sequence_test_from_empty_sequence.assert_df_empty_DataFra": {"doc_hash": "2b7b5909ff7149a90aeee8ff309aeae2a3c3a50a8994fc62d8d4ba67e10efa37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_product_test_product.assert_z_name_x_produc": {"doc_hash": "d9cb9bbb5b4d1287a8910cc328c98e0a7a3d7339d94d50dd12396bb28224607c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_partition_collect_test_groupby.None_3": {"doc_hash": "00ca06171feb31f448d32abc6fa5dc7612270a61f906068e3391070693e341eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_with_indexer_test_groupby_with_npartitions_changed.assert_result_npartitions": {"doc_hash": "d75241db51b3cc42fa3264de888125ea82a15f3dc6dd6fa2ce6e2892696ed5b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_dataframe_test_to_dataframe.for_f_in_iter_tuple_.check_parts_df_sol_": {"doc_hash": "f8a858aae4e0bca27171e6a68de19148a274f349899fde49e7705d7ebfb7be65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_ext_open_test_to_textfiles.with_tmpdir_as_dir_.f_close_": {"doc_hash": "96babe7657f0d826bb1e9548c981e4625ba2b412ba77dbb75029b2b13e6f9396"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_preserves_order_test_to_textfiles_name_function_preserves_order.with_tmpdir_as_dn_.assert_seq_out": {"doc_hash": "16b71a58f3865123050036ed887af62f0686af1aec3d4693a6cb99c8291c2cb1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_encoding_test_to_textfiles_encoding.for_ext_myopen_in_ext_op.with_tmpdir_as_dir_.f_close_": {"doc_hash": "30d8e5acc9c59a9ee86bea04cb2adcd707fc28bba96ca52042f3c9df2364c2a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_inputs_test_to_textfiles_endlines.with_tmpfile_as_fn_.for_last_endline_in_False.assert_result_a_n_": {"doc_hash": "78abfa35aff744343e42df3247a42ea49fb7cb113826e62ca6503b0f2149d793"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_test_string_namespace.None_6": {"doc_hash": "8facf602e61be44e0efbf5a61e6bace5fad3fca0f770850ee9aa317997663b78"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_with_unicode_BagOfDicts.set.return.self_map_setter_": {"doc_hash": "70cfc2bfeb58d72edb514b1b2a61bc1b3ce9483e8f4b570828796eb913bf1a60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_test_to_delayed.assert_t_compute_21": {"doc_hash": "f67d58abf59fbdaefc2c1d6726a224acd99684f563d1b001f17cf21f2b2b3402"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_test_from_delayed.assert_asum_value_compute": {"doc_hash": "550f14dd8e14ec22cc01be245301c947fb65631ac810552b2e5fc8396a185dc2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_iterator_test_range.for_npartitions_in_1_7_.assert_list_b_list_ra": {"doc_hash": "79274adb4f304ca68e2bf558da0d21cf881bedf233045e9111dae4ee9270ae9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_zip_test_zip.assert_list_pairs_lis": {"doc_hash": "782f31fc9b487063d4fe328c3612cdba4490fdb13658f46b2505e0b879589290"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_npartitions_test_repartition_npartitions.assert_all_results_": {"doc_hash": "f3195439dac9fe9ebc80f7ce2087a81325b0de4f15c0c7d9b42aae6f8edf1e60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_partition_size_test_repartition_partition_size.assert_eq_b_c_": {"doc_hash": "420cedb9c100e4a172d6763a91e73beb3434558e8752debf97c52e8811e0757b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_multiple_repartition_partition_size_test_repartition_input_errors.with_pytest_raises_ValueE.bag_repartition_npartitio": {"doc_hash": "3bb673f45bf7635d2590cbc0eec63fa2af265f599b55ca47214c65586bdb5e6b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_accumulate_test_accumulate.None_5": {"doc_hash": "a94fe93ea8e11b4d5b2b126dc5a25b2bfe6ea433a3813b224a3b1ad01a564d47"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_test_groupby_tasks.None_2.for_b_in_partitions_.if_a_is_not_b_.assert_not_set_pluck_0_a": {"doc_hash": "f6c74cb6de802c24a4ff1ae6d700534f7f28986f5db11da98ade592fce822c9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_names_test_groupby_tasks_names.None_2": {"doc_hash": "a6d852a21ed896e8e6534553cdfd9da4f5e4dcb19ed1d65be6e2418756d6cee7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_2_test_groupby_tasks_2.assert_dict_result_gr": {"doc_hash": "7d1e285261e2ebf872ad67c10f9b5273b2ac6a895007635ecda1ba9b93c7aa76"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_3_test_reduction_empty.None_1": {"doc_hash": "3e6bb4d578d6cb9e3800e9decf665c90d721559121e008a32695bc81b19486f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reduction_empty_aggregate_test_reduction_empty_aggregate.with_pytest_raises_ValueE.b_filter_None_min_split_": {"doc_hash": "4b22aa80341cad3cb3b2e6774da9df632f620a9b57e9603b912cb27199d8fd2b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_StrictReal_test_bag_with_single_callable.assert_eq_b_f_": {"doc_hash": "4d0d38766654bdc0f8bb6ada3eeb0d0d7f9f55e92537580d3d5a186ac4551911"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_optimize_fuse_keys_test_optimize_fuse_keys.assert_all_k_in_dsk_for_k": {"doc_hash": "924eb58ae4ecef11f8951e67dbdf1255dcf6a6c1cf3455936c4b0bad23952f61"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reductions_are_lazy_test_repeated_groupby.assert_valmap_len_dict_c": {"doc_hash": "7866f56f92fed79c717ff552b822f54ce51ae073608794977e8625de728db305"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_empty_bag_test_map_keynames.assert_set_b_map_inc___d": {"doc_hash": "fdb2a3e3df65f1e473f0c24d406b470a023efd2c129fcb082d94fea4b522731c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_pytest_test_sample_size_k_bigger_than_smallest_partition_size.assert_len_set_li_le": {"doc_hash": "33db67a3b38da1ad04fd187b6cbbdb7acd974cb27c2372214ee8c3b3d72424fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_read_text_test_read_text.with_filetexts_files2_mo.assert_join_line_for_b": {"doc_hash": "9106f4eebda52c5e44def73f4dc649234a953b3f6f4556f79fe58b8812ea1168"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_io_read_text.if_isinstance_blocksize_.blocksize.parse_bytes_blocksize_": {"doc_hash": "61570dc07ba8945c875f6dfc6c0a9d9fc4faeb9498246c00e24afaaaa334b3c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/utils.py__": {"doc_hash": "c006cad3ecdc983bd4bc327d6346a1047b85b21c220e748bb56c37cad36884c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.persist_DaskMethodsMixin.persist.return.result": {"doc_hash": "b305d7eeffb2b079639394ac084bd40cab98ce9258e078a72d9bd972604a16b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.compute_DaskMethodsMixin.__await__.return.f___await___": {"doc_hash": "3fbb52bce024e2223defd881544157a4ee78c63ecd84e434a4df84572255cf3e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_as_if_collection_optimization_function.return.getattr_x___dask_optimi": {"doc_hash": "fffaeb7d9fb6da09b62e2de4c3fa01e746f076e4b4b1b5c94598f0b960f33171"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_collections_to_dsk_collections_to_dsk.return.dsk": {"doc_hash": "0d83a7e0c6a29402b71aa802524d1dcd53216c03dadf045b7a40d836e92f4a52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__extract_graph_and_keys__extract_graph_and_keys.return.graph_keys": {"doc_hash": "9a82860491682ba5dca0c587113de6a0dc73321f4c975cd43d6abb66390b6cd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections_unpack_collections.collections_token.uuid_uuid4_hex": {"doc_hash": "4a799f6c71edd512bb6c46481fbef056249840d242d6e15ce0dbf6022d32524c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections._unpack_unpack_collections.return.collections_repack": {"doc_hash": "5a78bf6dc84b14dc2dd931c349e94d1a068c1c1f67ba31bde4910d13d21df5fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_optimize_optimize.return.repack_postpersists_": {"doc_hash": "8fa491148c380d0870917d4947331a282ccd281d61885026c76743624859a4d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_compute.return.repack_f_r_a_for_r_": {"doc_hash": "54a3232cf16c84d1d9311fe94d79091ad0c6ebed146ba5e5c4f1be2243017253"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_numpy_register_numpy.normalize_ufunc.try_.except_AttributeError_.return.normalize_function_x_": {"doc_hash": "8be3143ac1870434453651fffb9d2c0226c7d905d7dbf9a94755b387dd2af208"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_scipy_register_scipy.normalize_dok_matrix.return.type_x___name___normali": {"doc_hash": "6673c90922161621018be273613d37a01924fbadb9d2df3cc1671c7bde675a03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__colorize__colorize.return._h": {"doc_hash": "b06bc20dd6b213e2c8e49ceb441122120e54dbc4cca91b301287997e410d82f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_named_schedulers_get_err_msg._": {"doc_hash": "73859e6d6e9c65d7624f985c800147ef630a77fd29f62b4cc9f4da8014686fe3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_blockwise_blockwise.return.subgraph": {"doc_hash": "556c16994c724aca8b330854ba5605363eab7031bc588b70df60f4d7da236604"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph._Tensor_operation_make_blockwise_graph._Tensor_operation": {"doc_hash": "d6c90a75ff49659cf431bf968849f8d459fbbac47265cd42324e41e85f60a197"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_product_lol_product.if_not_values_.else_.return.lol_product_head_value": {"doc_hash": "c06d9792f0091c1526d7fe92932b912f329fcac443c0cddecbf5298e75e313f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_tuples_lol_tuples.if_ind_0_not_in_dummies_.else_.return._": {"doc_hash": "516b776ac03b939a6df2e19ee51377359c7257737142b3c97f40c3ec50020124"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_optimize_blockwise_optimize_blockwise.return.out": {"doc_hash": "8c9d307f76af1127f52d2f157dcd17cc19b2d10e7347c039d8265a0c9ee4da7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__optimize_blockwise__optimize_blockwise.return.HighLevelGraph_out_depen": {"doc_hash": "d1d1c861733175bffb3f8e6f26bded1492f874d63aa8753d066ed466947b94a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_zero_broadcast_dimensions_zero_broadcast_dimensions.return.homogeneous_deepmap_f_lo": {"doc_hash": "f10300a86957e0abb6ed9af33e3185bb62f75d20416fe40a634b6c451ffc1b14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_broadcast_dimensions_broadcast_dimensions.return.toolz_valmap_toolz_first_": {"doc_hash": "86847d922a4d3c5ea2cc01c74ae8fd718a4d8055ddb1d59dc9957a8e9f10a3dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_read_bytes.if_blocksize_is_None__": {"doc_hash": "e7d264f581ea4072afc6d28e1d4e7716160063cac4b4b0a1ebbaa03cca24e363"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_io_test_read_block.for_ols_in_0_3_3_.assert_b_join_filter_No": {"doc_hash": "e0d17fac10eed586f96bfa8daef9090b2afa0d7e86b7e21125f54b8337411a70"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_seek_delimiter_endline_test_seek_delimiter_endline.assert_f_tell_7": {"doc_hash": "1111e20246acb881045d62d1551a6c6e1d2f78a870f8d7f26e3a843917f38380"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_test_infer_storage_options.None_2.infer_storage_options_hd": {"doc_hash": "38f6beea2666d33e75700e455cec00c953f9b89d014f76c9ab060a1c92dd88db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_c_test_infer_storage_options_c.assert_so_path_expe": {"doc_hash": "d7e45876f46fe81d11a2e1fbc896da4af7a97d0c1e7ec70cbd50bcd170e215f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_stringify_path_": {"doc_hash": "59915d54c3a6b46a109d903c6ac0f38f4a0feb3aa5bf9f57f6b13e7723ea49d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_compression.py_from_io_import_BytesIO_": {"doc_hash": "f0055e8f0e2dcc4935c42b9325d5c6d6123cc6c9c3ba57efd125d49ed7a72597"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_os_require_pyarrow.pytest_mark_skipif_not_py": {"doc_hash": "9fd567f5259c951837a6e9486f33d17966e28e4cffc37fe879757a66321b0fb2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_test_read_bytes.assert_b_join_r_for_r": {"doc_hash": "17e9104a98916329a29e9911c181816795af594a8e21fbc0e7c9e8935861a9ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_URL_test_read_bytes_URL.assert_b_join_r_for_r": {"doc_hash": "6d290f359c2ce73e13d84c41f71d33f4aa505a5e296ce64df51cd612ff194884"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_big_file_test_read_bytes_big_file.for_r_in_results_.assert_set_r_decode_utf_": {"doc_hash": "e2aa272048157180819c62b75269064948e2c9cab78c8dfe41d921484e2ec7f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_deterministic_key_names_test_deterministic_key_names.None_4": {"doc_hash": "c894a21720db49796c06a983ec53fea11ee11669ca20295a73dc66f1c72885a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_open_files_write_test_open_files_write.assert_data_results": {"doc_hash": "d7703a848c3942b443cad6b1e28da8456073af09cfdccd449625531de91933a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_csv_test_read_csv.assert_df_id_sum_comput": {"doc_hash": "252bbf49442f98304052d10aa56278b165d8e1d7f5de13c1a74d6a85c6b44ee5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_unicode_test_read_text_unicode.assert_len_result_0_stri": {"doc_hash": "0504be5d29232e277b4ecb181b80cdd4576c9061e5317aa5de9affacc6aee5e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_parquet_pyarrow_test_parquet_pyarrow._smoke_test_on_read": {"doc_hash": "97d3d449516f168241587a6afd60a97fb2357ea20e3bd44c193b6087da23923b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_glob_test_glob.None_10": {"doc_hash": "a546086891053da7ab792a2f562d3783432026f87496972ba9d4bccdcd1c8bbe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_distributed_": {"doc_hash": "a06b4a0544b06d12fc9981ac8c9ce741e04a9f6871cf99bbdd729b23a6b3276d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_dir_server_dir_server.with_tmpdir_as_d_.p_terminate_": {"doc_hash": "543868ad175113bd28ac22e0c51a595efd40858fc3d5fce3a5d4dfd3a4d53fcd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_errors_test_errors.with_f_as_f_.with_pytest_raises_ValueE.f_seek_1_": {"doc_hash": "6e31048ce01ca257b556477f0961f8c5eb9ced20a1c7858061732d53047b4155"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_files_test_open_glob.assert_fs_1_path_htt": {"doc_hash": "f3a0df761dc77ca799ebc64343902ffdae025a2fe6a0082c60451f8aaa1f2cf0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_parquet_test_parquet.assert_df_columns_tolist_": {"doc_hash": "60322eb456d80f9976158da6b01776d2942ce01e4866e119076a6bb17af146f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_gzip_test_unordered_urlpath_errors.with_pytest_raises_TypeEr.read_bytes_": {"doc_hash": "a9d45847d0e2908371c14412ba0d10ac5deea3f64a1e4bef5fc72b6cc8792109"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_test_read_bytes.with_filetexts_files_mod.assert_set_results_se": {"doc_hash": "14e9f27c323f969730088b35edcf975f9c88314e25c1651ced4336d1b3f4806d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.with_filetexts_files_mod.None_5": {"doc_hash": "2f1ef6a417886bb9e788047fd3d1e432b066071fee61487ec6d8774fcf61fbd8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_with_paths_test_with_paths.with_pytest_raises_OSErro.read_bytes_url_blocksize": {"doc_hash": "dc67c81e3c35a81d571560d3e73152bad6898ec8a419b7a6f6c3ef245c6013e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_block_test_read_bytes_block.with_filetexts_files_mod.for_bs_in_5_15_45_150.assert_set_ourlines_s": {"doc_hash": "1eadd733fb56382b96d26cb6e35b766d61f7b831f469b3d3b2cf996555886af5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_delimited_test_read_bytes_delimited.with_filetexts_files_mod.for_bs_in_5_15_45_1_.assert_ours_test": {"doc_hash": "c00aa149c3d985184880516480e5485854e82877d60d19aecfebf3decb5323c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_fmt_bs_test_compression.with_filetexts_files2_mo.assert_b_join_results_": {"doc_hash": "25e0a6e1e2ffbe33a6d893b4d17df4125bf3d93e63a2838b43b6778b315bf147"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_test_open_files_text_mode.with_filetexts_files_mod.assert_list_data_fil": {"doc_hash": "82d5ddfe4bb0ebdf839f914060559387dfdb86c7f5ba792ec2fb83e879255c17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_compression_test_open_files_compression.with_filetexts_files2_mo.assert_list_data_sol": {"doc_hash": "12d7ec7511a915f63d817bb85e4cee78aa438899eec0c2cb5a2edff676bfaec4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_bad_compression_test_names.with_filetexts_files_mod.None_4": {"doc_hash": "490a649b957f7724c290edbd51ecdd961638e0429341d496eb5c86ec7de08139"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_write_test_open_files_write.assert_d_b_000_": {"doc_hash": "04f30aa83320efb1ffc97ad918333864745abafb3c6d1d2c363dd2994c49a42d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_io_endpoint_uri._http_127_0_0_1_5555_": {"doc_hash": "0bfb9fb8cc735c30855476d2cddcf1d6f813160cd6b6889d7060190a23c8eec3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_s3_context.try_.finally_.fs_rm_bucket_recursive_T": {"doc_hash": "0eda9d444523366de397597e4c3ea428a81c660ac5a36e2db7d1e5a96578c614"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata_s3_with_yellow_tripdata.data._": {"doc_hash": "993a870e35c3b21d274f2d5be4cc9e74eeffce12b638ece11aa9f1c8ec3874e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata.sample_s3_with_yellow_tripdata.yield": {"doc_hash": "3ca90b408789aac0bdab1317668187f4b1a59d433cc28b18cb6975a3d9db0daa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_get_s3_test_get_s3.None_1.DaskS3FileSystem_secret_": {"doc_hash": "935771fe071c23866b083a16cf4c735c9ed04fd75aebd2ce073794b79c2e3e57"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_write_test_open_files_write.assert_set_list_files_val": {"doc_hash": "bc103e079d1e56778fe14df0f0bb7dfe56f458b3e6a52a666ea9a2c577b35b68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_test_read_bytes.assert_set_results_se": {"doc_hash": "b9be4e1b8d51bfa65b29cdf0875f4e5ddd946f226646d70ba3d5c9eae62cc37e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.None_5": {"doc_hash": "d8dd51609d00a3e78496e0c55634af0c923a1afe76bdfec41071c16540fc362c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_non_existing_glob_test_read_bytes_blocksize_on_large_data.assert_len_L_12": {"doc_hash": "c2a417dbb26aaab91c39f1a22ddc5e2566004da03efde4107925efbce87acbaf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_block_test_read_bytes_block.assert_set_ourlines_s": {"doc_hash": "78011777c1ed34bb431a467130bc5f0f6ca4f191ba58c88b69f415dc48640fdb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_delimited_test_read_bytes_delimited.assert_ours_test": {"doc_hash": "10e45a1e37d3aac9fe1cb4194ba117010ae03a87948d17eddeb511cce7416f11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_compression_test_compression.with_s3_context_compress.assert_b_join_results_": {"doc_hash": "b7536ede813a32c6b8b123fda04b4de6ea338c72d135fdeaecf040dd81fa65f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_double.lambda_x_x_2": {"doc_hash": "a1cc4f1a90e27f32b5d1a1cbadc7fc586336ae1a9fb0ce4e6e46903bc763fb91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_modification_time_read_bytes_test_modification_time_read_bytes.assert_aa__key_for_aa_in": {"doc_hash": "cca6a4b8fd46c46b0d1089e38202220a15f28bd8212fae078300c1a5f0c79618"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_wstoragepars_": {"doc_hash": "d91bf9f5b401defbdb62586277b78a85191f12f3b22d8a6fa06bb371bb971059"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_Cache._posttask_": {"doc_hash": "72bad6928b0ce007ae8aef0d98ebb09b8e98684907c760163938532403125338"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_unpack_callbacks_normalize_callback.if_isinstance_cb_Callbac.else_.raise_TypeError_Callback": {"doc_hash": "f0f867f25560dfe15e74bd9cd365b546414b7e97dac7948e6647796c8f06b1b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_add_callbacks_": {"doc_hash": "ad8fe2ce86d5022fb82e9fd38e08008a9ef241322e41149195b7386c7198d1b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_update_update.return.old": {"doc_hash": "c71dcb3003f3e3ac2717d69dd77a9c38cadef2f0858654a802d676b3a81f9d22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_merge_merge.return.result": {"doc_hash": "4cd27ad1b01e3605a1f0fe307f5a4edcc01b6386926cf10697f60e51e167ee26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_yaml_collect_yaml.return.configs": {"doc_hash": "bc4c06e694ee02a72eb53fbecff23b44b7ccaf9a862feaab057e6173d6a17fe9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_env_collect_env.return.result": {"doc_hash": "eec13d1c985cf7f0ca1385f5d4ac9b9c3fa4f14d0b002e7dc3207a3c7a8f38f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set_set.__exit__.for_op_path_value_in_re.if_op_replace_.else_insert.for_key_in_path_1_.else_.d_pop_path_1_None_": {"doc_hash": "81b7638f8b74cda5d96e89cb6ef12fe9a50c595855066ebb89513527793d63c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set._assign_set._assign.if_len_keys_1_.else_.self__assign_keys_1_va": {"doc_hash": "a7742a52de5db84e4d68f7784a7a65ac4287cc2e872c82d8e631c1f7b2afd8dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_collect.return.merge_configs_": {"doc_hash": "0bc7e86a4f5e4e245ad8eb00e11395a2c8301c7dba6abe24db7c994f220000ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_refresh_refresh.update_config_collect_": {"doc_hash": "c862142752436152031c89e3ff8b213bba2e802ca14306578d4734da6f12fdb2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_get_get.return.result": {"doc_hash": "0fa3dd6db49c9b2a18231fe2fbe141dd7b97013c922ebf09a0ea0f2fc892ecd7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/context.py___": {"doc_hash": "9b94840308bc2df124d86a6ee688254f746aef4da8c3d0ef85ad1d002edf9b16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_from_collections_import_d_istask.return.type_x_is_tuple_and_x_an": {"doc_hash": "31a4ef00d2766bd28778e69d72b6a45aa58fa961f56c610dd9afcd071cfb855d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_has_tasks_lists_to_tuples.return.res": {"doc_hash": "a71bcb93f05edf59995445a3dea368820b6d431120a41f11ff095e27662991fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__execute_task__execute_task.if_isinstance_arg_list_.else_.return.arg": {"doc_hash": "6a15c3edf3515a7e0129829081f1f1a8fd758f503f9cef27de8f1de5275f331e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_get.return.result": {"doc_hash": "a216cd7db689140155911eabc0dd78081c1edcc5ef33141c9d86aa26cfc128c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_keys_in_tasks_keys_in_tasks.return.ret_if_as_list_else_set_r": {"doc_hash": "5b9d5b6397ace8dee5f0a8b626c495381f1e76745a2d3335fddffdd867921532"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_dependencies_get_dependencies.return.keys_in_tasks_dsk_arg_": {"doc_hash": "e82606560c47aa77e9e170d6cace852f0fb977d1d4b1eedfbe4d56d0e69de537"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_deps_get_deps.return.dependencies_dependents": {"doc_hash": "5f40eb7e9d1d79c241cab8baeb72ca8111a242769952bfc7b5e0d96e9feca485"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_flatten_flatten.if_isinstance_seq_str_.else_.for_item_in_seq_.if_isinstance_item_conta.else_.yield_item": {"doc_hash": "bfa4256fcf6373e29acc4f6962e1a735cc731053478215cc134f42f0d35c6129"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_reverse_dict_reverse_dict.return.result": {"doc_hash": "61f986782a27d6c5e414f730911857a427d8495e832d37f2e8d4c75fa83fd450"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_subs_subs.return.task_1_tuple_newargs_": {"doc_hash": "571937b53e806f9d2f983c66637161668b848362fea75b8d0322ae1763dc4e52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__toposort__toposort.return.ordered": {"doc_hash": "ced4455f5568101f5886feb9e6a287e8b47d50316a18fcd65820657716297546"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_toposort_getcycle.return._toposort_d_keys_keys_r": {"doc_hash": "a3bd47916acb605eb33b6f7ed7555c7c19765d166048b22ace08f39aba479ff0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_isdag_isdag.return.not_getcycle_d_keys_": {"doc_hash": "91fe022049e156aa453bb6be6028c4af997d7dee832020aaeb2918ca8516c691"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_literal_": {"doc_hash": "ec07413818b6b23ff67ea46bc6ee800a5074d8d4da93c6a6a78593f8d2dea810"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/__init__.py_try__": {"doc_hash": "cb99295805602ccdc52dd827e2141020523bc837943a730736324cb5f3723845"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_compat.py_string_": {"doc_hash": "5bcdf31f5f3df2b82d2cd9a7220eabe69dfbaaa926fe1ebbcc8e7b2cce33b0af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_dtypes.py_pd_": {"doc_hash": "b906d107f0a9ce408d775b6bebf4ee20548c4ba3ef66c28016adc5ea139b1d15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor._function_map_Accessor._function_map.return.self__series_map_partitio": {"doc_hash": "124f67a027bcaef5787a5acc5811f1f35a3171cc39b6b0f4760252ce9f74028e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_from_collections_import_d__categorize_block.return.df": {"doc_hash": "9ca737a4d3e5b2f0581befdce8a042a3315d872df3007abfe7e7b6ff156dc7a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py__get_categories__get_categories_agg.return.res_res_ind_0_append_re": {"doc_hash": "7c145bb2a96adfe8c81af92a651512a1b0875d1c1d423c11405e8fe0177b4c79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_categorize_categorize.return.df_map_partitions__catego": {"doc_hash": "7e259ce1965333217595eadbf25b3c454f5e69ce4947ea1b7f81f06a5f578f35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor_CategoricalAccessor.known.return.has_known_categories_self": {"doc_hash": "f9944594f35d600578198f6e6184e8d258ee0ae2617aeae0089a8b5d2d09dd00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_known_CategoricalAccessor.as_known.return.self_set_categories_categ": {"doc_hash": "4429e7aa6782760148b07fb0ceae4f6328a430fdd814de528965c5470eb30a91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_unknown_CategoricalAccessor.codes.return.self__property_map_codes": {"doc_hash": "b0fa5c51427b166d9989066d995d7ce65081f73a77cff2c81431258b208275c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.remove_unused_categories_": {"doc_hash": "8f0bdaf6aaf4c8abf0367d1f32cac18bdc206b8aa1dbb29f69adabafa05c6df4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__concat_finalize.return._concat_results_": {"doc_hash": "b84dcc351ff13456bfe236a0ea9fedd5aead305d56571756b4ccd4501b3ea9e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__scalar_binary__scalar_binary.if_return_type_is_not_Sca.else_.return.Scalar_graph_name_meta_": {"doc_hash": "9104f1eebc5dd74fce60ac95cf8dd8b6d910005a60c0b8ae63afdcc7249376f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._elemwise__Frame.__repr__.return._str_fmt_format_": {"doc_hash": "4ceed427a8c779934c3c6332e7e991f6f6e43e341fc8914b9d358f989d94da87"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.drop_duplicates__Frame.drop_duplicates.return.aca_": {"doc_hash": "7699ed750ce017b74c2dd525be1cf4f89bd3e02f425f0f8173ed996bf254970c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__len____Frame.__complex__.return.self__scalarfunc_complex_": {"doc_hash": "cb92767f6756902e129290d0f41f8d9e862ffb2493549267728df3a741a73198"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_partitions__Frame.map_partitions.return.map_partitions_func_self": {"doc_hash": "873cf849f213ab3d402e841fc47a707d8311bc522c38b27e655d81d4c209afc6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_overlap__Frame.map_overlap.return.map_overlap_func_self_b": {"doc_hash": "9530658ccda9f39c0790b144660a4712a0f485750d23c01c3b4ddf7d0ede94da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.memory_usage_per_partition__Frame.memory_usage_per_partition.return.self_map_partitions_": {"doc_hash": "566456afc062eee7cf0cc77adbcf44189dff337241c35c3fd878b47aa1b36484"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction__Frame.reduction._Generic_row_wise_reduc": {"doc_hash": "ae5f9449a7dc0d4ddafba71dcc89d974112bd3f37688d214d19b8d2ef692de16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction.if_aggregate_is_None___Frame.reduction.return.aca_": {"doc_hash": "ff4fcec0d10b35a4d5f34fb6b965b9458aad25c7a2638e2bdcd9d082dfff844f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.pipe__Frame.pipe.if_isinstance_func_tuple.else_.return.func_self_args_kwarg": {"doc_hash": "9ed7dd07abe16eb032828dbe16e31ed334762fedefea4a1b8b5fc188a8bc414a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.random_split__Frame.random_split.return.out": {"doc_hash": "45644ce7bc4b37088abc2cf95e5c08f59079189b39b51b5262b652eba6708a2d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.head__Frame.head.return.self__head_n_n_npartitio": {"doc_hash": "23afe4632f73c14a85060ee4b17278007817cd8fd3a66441c6a1367f49f94b83"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._head__Frame._head.return.result": {"doc_hash": "751411ac20fc7bbd5ecaaba698a290bf753e7c608c2e35ca9a26c33161cbc2b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.tail__Frame.loc.return._LocIndexer_self_": {"doc_hash": "c2771e74c29a85be5caf1897fcf9bf2074847febcedd34c8e493a9e740dc0427"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._partitions__Frame._partitions.return.new_dd_object_graph_name": {"doc_hash": "064d36739508c8a7e51aad2b9f46e4d70ab1f257d62c342760313b6f2ee8a2f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.partitions__Frame._Note_iloc_is_implement": {"doc_hash": "72ef4e3b8aaab183dd8b016670907d736b6f0858624794a9af9128bb3e04023e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.repartition__Frame.repartition.if_partition_size_is_not_.elif_freq_is_not_None_.return.repartition_freq_self_fr": {"doc_hash": "9464bcd26d17141b3862f61fef296ce049a9b6682d88e5b065cb0cb3e2a700a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shuffle__Frame.shuffle.return.dd_shuffle_": {"doc_hash": "ef47e44ee1f06ef4d41778431def17c806e29ec7d633c916182ea8c1712ff288"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.fillna__Frame.fillna.return.parts_map_overlap_": {"doc_hash": "41acfc559a9c2c8a81e4a07b8ed048a3571b46a326600aa0e5a50f90697c391e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.ffill__Frame.sample.return.new_dd_object_graph_name": {"doc_hash": "48fb49bfca56caad92a15e1637c5ab43f643ce875d3133d9ae22cf92eebfcd56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.replace__Frame.to_dask_array.return.arr": {"doc_hash": "21c19426c551f8fba78b0f0bd920600802910c2082527e1f7e4d08847552b0a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_hdf__Frame.to_sql.return.to_sql_": {"doc_hash": "8257d18a489c434d19c50515d9e981edc711e63cbad9b43610e721164e281ec7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_json__Frame._get_binary_operator.if_inv_.else_.return.lambda_self_other_elemw": {"doc_hash": "af1c2fdda0f8c04f950ea89ca47ddd3d504dadc7436d6fe0b56fc731ece6bc3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.rolling__Frame.rolling.return.Rolling_": {"doc_hash": "2abe969aa449b8ad70737da837dda34760c4ebd3f0388ae67543114b4eda6639"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.diff__Frame.diff.return.self_map_overlap_M_diff_": {"doc_hash": "4fb6f2bed1fff80b61e6136ae04903e2a068c9042820c51ba2df8c6fec701d95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shift__Frame.shift.return.maybe_shift_divisions_out": {"doc_hash": "7ff6f98b12af221c1b7060759422f6cc5392826fe8a62eb94418fb700e73463c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._reduction_agg__Frame._reduction_agg.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "93a24f926b775aa614315d65346b94cda7b2b360fc3494fbbcfd2fcc3f467a7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sum__Frame.sum.if_min_count_.else_.return.result": {"doc_hash": "903ca60bea03a3fa04f3cde47575c80a2413c976db43163034bfcd467b27fccb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.prod__Frame.prod.if_min_count_.else_.return.result": {"doc_hash": "dd154b384cf1e6adbd321a9b7cb33d7765a613f77f03c957f085eebc51ebfea5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmax__Frame.idxmax.if_axis_1_.else_.return.result": {"doc_hash": "f7c27d7bf5a0c74cadf9b7ec6d79728efbf4ae170d46da473fc0ace730bd6957"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmin__Frame.idxmin.if_axis_1_.else_.return.result": {"doc_hash": "6a61682e16b95f5d60869ac551c85bde75b5d118cfd54e7333eb2d3067d060e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.count__Frame.mode.return.mode_series": {"doc_hash": "3e04812778d1e4c43641e7aa8ef57c588701698b4245cfca307d0cda000dfad4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.mean__Frame.mean.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "3aad05efb8bcbf14809ad1842353c58b8b656d0075b2c2260a42d91786e073ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.var__Frame.var.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "47c9263977d0098f220fdca4e99448eb7a1271a60c787d81a1521d39900c2b2b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_numeric__Frame._var_numeric.return.new_dd_object_": {"doc_hash": "b7fa964f6efbf1f94fb4a9a845c074ac792323355796adfe85000e4d3cd5099b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_timedeltas__Frame._var_timedeltas.return.new_dd_object_": {"doc_hash": "a7ec4dccf1f23b4e2d44db5957890a413f0d1d710fec54e395f8a7ef77befa2c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_mixed__Frame._var_mixed.return.new_dd_object_": {"doc_hash": "86cb136dfe73aa3bf5699137681cdf4d7c398e44ebeffb4178af96be340710e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_1d__Frame._var_1d.return.new_dd_object_": {"doc_hash": "5543e598532483e3aa91885124a06acabfab39abf5f05c46c5427d72c4ac917e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sem__Frame.sem.if_axis_1_.else_.return.result": {"doc_hash": "584034ec4f6cfb7f5179916f27fc1ddd541ced1c9294acabd44fea0e630a51f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.quantile__Frame.quantile.if_axis_1_.else_.if_isinstance_quantiles_0.else_.return.DataFrame_graph_keyname_": {"doc_hash": "a39c7ac0b3b205c5717362c8eb3252a59e456f8b6ae4ce2859617bb344f18521"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.describe__Frame.describe.return.new_dd_object_graph_name": {"doc_hash": "dab2cae73d7b75ad35e527cca9643944cbe6fad0578a2ebbf62d2acb6008a205"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_1d__Frame._describe_1d.if_is_bool_dtype_data__me.else_.return.self__describe_nonnumeric": {"doc_hash": "ff065fb19bf9ddc36dd054c1df5f7dadac34ddd1b9cf3909ffcaa6052d49bf94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_numeric__Frame._describe_numeric.return.new_dd_object_graph_name": {"doc_hash": "03152bb84e372843dad5cbf5845ff66ea58159f2c6779f5e0b7233982a2eb63b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_nonnumeric_1d__Frame._describe_nonnumeric_1d.return.new_dd_object_graph_name": {"doc_hash": "98911554d08074fae8a723430717663754a5444d53a7e46f5c96ed5f1d2331a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._cum_agg__Frame._cum_agg.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "b34edd69c05369a873c94f31f66bec823abf81b92478a28c285a3872b8838c0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.cumsum__Frame.isna.if_hasattr_pd_isna_.else_.raise_NotImplementedError": {"doc_hash": "e6e369ea5b95a5f03faddc25cd3a5e5c0b72f0e11494f4930b4f6a7766b25ba1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.isin__Frame.isin.return.self_map_partitions_": {"doc_hash": "bb0eec5f1f2df6265e9c6fab6aed3627b7e8928255edabf886942d9920a1c12c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.align__Frame.align.return.result1_result2": {"doc_hash": "a2622bd6ef1ac9f414987e5db81dce7f98dd537d67d0f5154b754e033a964157"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.combine__Frame.resample.return.Resampler_self_rule_clo": {"doc_hash": "f9a4ceb1d344fb471f7fd5789f31336659f3d4ef62bf30afcfd48e8665ac2076"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.first__Frame.first.return.new_dd_object_graph_name": {"doc_hash": "a0292be45fbcbd1456eddf239bd627c561dcda96a18562180733d3b7b71ab019"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.last__Frame.last.return.new_dd_object_graph_name": {"doc_hash": "59bb0bde6da79aeba4b3839f8fc99df8e5f4cba0549286508e6dc6f8ed547665"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.nunique_approx__Frame.nunique_approx.return.aca_": {"doc_hash": "b939ae26f9f169c7c12fecab2b86c985636022f598f54492ba0109cefbebf3c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.values__Frame._validate_chunks.return.arr__chunks": {"doc_hash": "839c9a01b01017e70435878bdc9a31896b499992cf13b3ab5e49f95e054728e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._is_index_level_reference__raise_if_object_series.if_isinstance_x_Series_.raise_ValueError_s_no": {"doc_hash": "1b911491c258916531ab0de438b2ffea19fe6b9d48b8c31ecc854f9be93d14de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series_Series._repr_data.return._repr_data_series_self__m": {"doc_hash": "560f4da35c050da82fc6e0c056b492980da2739fa58230e66a27f70df5799bfe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.__repr___Series.__repr__.return._Dask_klass_Structure": {"doc_hash": "9fe87dc3149b7a61636725dc0b7b098a6fbc5334424717b2f96bf63d3a1b9749"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.rename_Series.rename.return.res": {"doc_hash": "8086109222832fb61514660176199e346e6a34d1e8f700876f19b89af439be33"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.round_Series.quantile.return.quantile_self_q_method_": {"doc_hash": "7524d3f4ef393c59ac190930d9f4d5731dc6709de67c75d7996610cd3d9f9f00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.value_counts_Series.value_counts.return.aca_": {"doc_hash": "9278999a19e9d3e0fb07b1340855a93f54c7a1b29b916dcafde644fd046c1308"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.map_Series.map.return.type_self_graph_name_m": {"doc_hash": "3f77b15f1cc8dc46dd93a82d7dea3978eda4e4408fd180006efdd31876ed49ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.dropna_Series.to_string.return.self__repr_data_to_stri": {"doc_hash": "eed0944831f50b813965af17b659555425d40bdc70dd99cdc4831f859bd0abbc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_operator_method_Series._bind_operator_method.setattr_cls_name_derive": {"doc_hash": "586149de8ea2184d2decc7f4cc3d20f2970d0ac62f59868dc1c90ea28f091573"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_comparison_method_Series._bind_comparison_method.setattr_cls_name_derive": {"doc_hash": "4eb43160ae98c1f139b8e2487b0d552deca5355908ad43ff8a0e966d83bd9512"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.apply_Series.apply.return.map_partitions_": {"doc_hash": "53e50abcf4318436c215ba6fbcb522c62704a9ec4118e94e781c31fc9b7d94d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.cov_Series.corr.return.cov_corr_": {"doc_hash": "5a2b8e8f66f40ec88b1938e3b596f6294ce91897619cc226a6d7f20c3f46d49e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index_Index.__array_wrap__.return.pd_Index_array_name_self": {"doc_hash": "930141ccfc318a58924741ba4c6ab0f904f1ca981b07ca9476c27fa15e2d168e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.head_Index.head.return.result": {"doc_hash": "427c8a052dce818b496bea08f129808b44211dce6b1a8d934c9c7e342750fd07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.max_Index.count.return.self_reduction_": {"doc_hash": "c551c26b9057dd03164f0150e4a391558e505de1f0c86284476e58b53665cce7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.shift_Index.shift.return.maybe_shift_divisions_out": {"doc_hash": "d26042185c3f9b8fd7a57f96a38a95895ac99e70e4bcc6206cee91a859881377"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.iloc_DataFrame.iloc.return._iLocIndexer_self_": {"doc_hash": "a341558da15c3e6ca6b652c585aa40ba27f55b87b81f863a4b01046dc7ccf019"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__len___DataFrame.empty.raise_NotImplementedError": {"doc_hash": "4b84a20345f44c2f2662e1a944d183f0fe7a74c31e2b1807197e31f20a119d38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__getitem___DataFrame.__getitem__.raise_NotImplementedError": {"doc_hash": "c764936b3082493a780c48fa608840335207048a3dd2fd40d90877d43f45fa35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__setitem___DataFrame.__setitem__.self.divisions.df_divisions": {"doc_hash": "c7a48e4c5ce7012784df9c79b6d0a54ddaabe9b2ad7ba434ad2e5548edfd1284"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__delitem___DataFrame.ndim.return.2": {"doc_hash": "5649c016cd445700bfee3d0cd315c0418e5ab2fe4d8f84dc5610afbc69d07d68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.shape_DataFrame.shape.return._row_size_col_size_": {"doc_hash": "b2bbebfa2b85687629a7d3b84796ce974dd4da4205f96939f437413a3dfeadc1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.dtypes_DataFrame.select_dtypes.return.self_list_cs_": {"doc_hash": "592fcee05b6144cdd1563d95d8ffc95b825cd6b3a441cb711196f6fa4e660544"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index_DataFrame.set_index._Set_the_DataFrame_inde": {"doc_hash": "43b0970a3d27943affbf43a77d88258b1549007d6e46b9d54e6fe22130d6ac24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index.if_inplace__DataFrame.set_index.if_pre_sorted_.else_.return.set_index_": {"doc_hash": "39f7486cf7b22ff837537f6794c543183d420d42c55737cbd4a8b9a71ef71503"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.pop_DataFrame.categorize.return.categorize_": {"doc_hash": "d3c33e7e60d3b03fde44bd4d8e80879ac99f7d643462b0fa8d35d8d9efcb85b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.rename_DataFrame.query.return.self_map_partitions_M_que": {"doc_hash": "a360f13c21b24582c2df4ed79aed54879c2c9b7cb48b355e84f721f93fb256dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.merge_DataFrame.merge.return.merge_": {"doc_hash": "52c39242241a8546e2c1312a0f1b69b1d8f518761f8ea329eca32edb48cf8626"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.join_DataFrame.join.return.merge_": {"doc_hash": "5ce428ac1ff1262c0f0ca4626de68fb7ff99e14b443c23952e566a737d3b3077"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.append_DataFrame.items.for_col_idx_label_in_enu.yield_label_self_iloc_": {"doc_hash": "9e174bd32806de1451ab87c09c16e50558c57bb6d72801852ce5b822577bf761"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._bind_operator_method_DataFrame._bind_comparison_method.setattr_cls_name_derive": {"doc_hash": "61450eb6a9802c818153f06467c5d6337cef12120227707a1bbaf85db8769627"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.apply_DataFrame.apply.return.map_partitions_M_apply_s": {"doc_hash": "f327c7d0624e770670f956dc4ad70e8bbef2cb1e93069ae8fd65573492c6461f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.info_DataFrame.info.put_lines_buf_lines_": {"doc_hash": "439506b4ff219cd283818144c55cacb19fe1b9e65fda7dc189bc4a68fc370c25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.memory_usage_DataFrame.pivot_table.return.pivot_table_": {"doc_hash": "98342371e3e99524c7a300b46b692b95b50b4570f8fb11980b3dd8d77e8b561a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.melt_DataFrame.melt.return.melt_": {"doc_hash": "b855cf83783db9df009269f6d437c7d9427f78b385ff3485ddac1680b8122766"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._select_columns_or_index_DataFrame._is_column_label_reference.return._": {"doc_hash": "7b02ed53f82733ea07a03eaca07ad8e904a637801205167da72c6dbb131208db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__bind_operators_is_broadcastable.return._": {"doc_hash": "c1e1538881ef8b8b5bc83a5209ef4c6ce345219d5ac512d7fb295657bae52779"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise_elemwise.graph.HighLevelGraph_from_colle": {"doc_hash": "d83f1e6fadc72e661044cb04634eb8c5d2c09609b45ee0d6375b927ffb55e150"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise.if_meta_is_no_default__elemwise.return.handle_out_out_result_": {"doc_hash": "542e8d5698a7458e6e2371e4956acd69cd35d54109721cc4a96e5a647929bcc3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_handle_out_handle_out.if_isinstance_out_Serie.else_.return.result": {"doc_hash": "8f40e3bb6285c1b234644e0e05192c62e3d52b3853cd76a521efd5551e50b8af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__maybe_from_pandas_split_out_on_cols.return.df_cols_": {"doc_hash": "54ae8a3ec828f35c61de0fb28cba17df2e7ee7e4f623141d4eaf22b1bdba89e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply_apply_concat_apply.npartitions_2.npartitions_pop_": {"doc_hash": "b40adad79faf10919ca6c27b18efaab969d7b292c88e692906fb2304f616959f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_aca__emulate.with_raise_on_meta_error_.return.func__extract_meta_args_": {"doc_hash": "6013835b0f07295ab9bf101dcfc2d317b9b8e5013da03331613d06191608f327"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_and_enforce_apply_and_enforce.return.df": {"doc_hash": "e606df89878704585505bd938ec7e5b86a4b8d62fecb7b1176162b0b6b6f4800"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename__rename.return.df": {"doc_hash": "13a8f72b99544060d30010ee8cb4303eb6bdf6b69b835d9d1603e7edf4500399"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename_dask__rename_dask.return.new_dd_object_graph_name": {"doc_hash": "d572e875167460985ad420ad9cb5648522bcbc5cc56e5dd45113d5235e770dc5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile_quantile.df.df_dropna_": {"doc_hash": "9943f3dc733e8803b3b08970f330b6893d65025d80fd37d5369f4ff232b77d97"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile.if_internal_method_td_quantile.return.return_type_graph_name2_": {"doc_hash": "16c8a64d550cd0d9c7a749ccec8aa7c58065f8b4e040af095d3cabc94f3131db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_cov_corr.return.DataFrame_graph_name_me": {"doc_hash": "0060c84d4269d77610359c9a854a8ca1038fd8f9751b893fe38de0932a8e8dae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_chunk_cov_corr_chunk.return.out": {"doc_hash": "19c9a3005a69ad9b1e629bf60ded496347d6333228a55b035ca5f2e80c4b52db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_combine_cov_corr_combine.return.out": {"doc_hash": "c7975dfa1a5e50475bb4e64cc04a3c3424adb6ed05f2a30f645f0d2b8679b092"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_agg_cov_corr_agg.return.pd_DataFrame_mat_columns": {"doc_hash": "588eccccb28d73ab249cb04496485bc00fae6581e02e1e67d1024159d90e3cef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_pd_split_pd_split.return._df_iloc_index_i_for_": {"doc_hash": "239ba94d59c461347575acac40abe2852e1e6a5cd5457bc216bbd8f39d68c7df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions_repartition_divisions._left_part_of_new_divisi": {"doc_hash": "0aa9f2ca169c6eb57cf96fdf7c617b0a39799ba286de03e66f8b01aa2a6bc3c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions.while_i_len_a_and_j__repartition_divisions.return.d": {"doc_hash": "f083b87dfe44bd2a5bd7654949845d8b42b0108f4cb8343f5eb7b9263ac18d82"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_freq_repartition_freq.return.df_repartition_divisions_": {"doc_hash": "4cdf75b0da0dd0acb6b19e6821337d775689623c3b16f85a47b1f2a02fbfa3be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_size_total_mem_usage.return.mem_usage": {"doc_hash": "7e154490d492b3f0b507807bf58478010614763411323cde41b6522d967064f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_npartitions_repartition_npartitions.if_df_npartitions_npar.else_.if_df_known_divisions_and.else_.return._split_partitions_df_nsp": {"doc_hash": "305713e3614694ea4bfb25305e829b088b2261b585c296056b8deae169cbadd7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__repartition_from_boundaries__repartition_from_boundaries.return.new_dd_object_graph_new_": {"doc_hash": "2e553d7af69e861b4d34868ab4241cc0e302e85d6388eca071cfff898821d03f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__split_partitions__split_partitions.return.new_dd_object_graph_new_": {"doc_hash": "c40e2df8fc6dd010379476595cd2cb3365e3c598e569898340862fb7e530727a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_repartition.raise_ValueError_Data_mu": {"doc_hash": "a5a4b817dccca825c49b2af11f9a7b2da058959cb5498f750f7c22722ce49026"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__reduction_chunk__reduction_aggregate.return.aca_aggregate_x_kwargs": {"doc_hash": "f30416425faba5d33e90d2710eb5ad8417348d089c6e522cd70fadcbc1b78786"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_chunk_idxmaxmin_chunk.return.pd_DataFrame_idx_idx": {"doc_hash": "d06e3b38461269b7176649a041f2b988da4e69eea5d565ee8e616bd2b36035ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_row_idxmaxmin_row.return.pd_DataFrame_idx_idx_": {"doc_hash": "97884027d4d7f08232744cce68ce1cb6d9d07c102f3a432c8e9088b3fa62d4f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_combine_safe_head.return.r": {"doc_hash": "9a86488d3ceaab0b3ba0c7e565089762af066eee4bf5fd50d027b51b607609de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_maybe_shift_divisions_maybe_shift_divisions.return.df": {"doc_hash": "4e10a3e14dad1aa4724008b9f35c1e46f90496e9eb063e7c21854f86c3d03bec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_new_dd_object_new_dd_object.if_has_parallel_type_meta.else_.return.get_parallel_type_meta_d": {"doc_hash": "3611e299557fd97ebb8b64eb8fc5d3fce65b8ba29cb94bd19a981b9b20446c73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_partitionwise_graph_partitionwise_graph.return.blockwise_": {"doc_hash": "b7f6d4b7209a90df24091403666d52aabe0aea7e272775d9ad6c3760ceee28f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_meta_warning_meta_warning.return.msg": {"doc_hash": "c8981c3cfd6be84d9502d443e49823edd84a62db4f227f47fa3ef1e96007de39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_prefix_reduction_prefix_reduction.return.new_dd_object_graph_name": {"doc_hash": "543a63a550ddc2c2fc9b4b60e50901f9f33c07900028af2dfb3e3ece705edee5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_suffix_reduction_suffix_reduction.return.new_dd_object_graph_name": {"doc_hash": "a6d50e7a76f40a9ac3519563e65d3f5fe3c61a0d0bdf5a0d041c26880a43f85b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/extensions.py___": {"doc_hash": "f259bbb32da47f5b1abb9df24d9d6ab8e0154038f61bedcb2a04f22a783c05f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__maybe_slice__is_aligned.if_is_series_like_by_or_.else_.return.True": {"doc_hash": "de7c340c75ae1b1bf6ca71e09ccec98221f26ed84a22536f4e67ef4343eec111"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_raise_unaligned__groupby_raise_unaligned.return.df_groupby_kwargs_": {"doc_hash": "a5aa703cb67886624a12a4908e8905634f47bafe0368c7894fc1768641fc532a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_apply__groupby_slice_apply.return.g_apply_func_args_kw": {"doc_hash": "6647c42ee56f093577acbdded336a877e3d6dbe6028abb734549a44f3eaf87f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_transform__groupby_slice_transform.return.g_transform_func_args_": {"doc_hash": "3076ea5b1232b19151a43d686934d543dc268a698e611264709dcf83435b57d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_get_group__groupby_get_group.if_get_key_in_grouped_gro.else_.return.df_iloc_0_0_": {"doc_hash": "0e21f5f0c941126b6b905d108a78fc2851f384b4e2205c30b5df59291cb6a0db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_31_Aggregation.__init__.self.__name__.name": {"doc_hash": "7b32ec788f1a1ca53e6e2ed8d04f8ed6f2385dd2dc23028f9f44b1166379dc51"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_chunk__var_combine.return.g_groupby_level_levels_s": {"doc_hash": "343911852dfb2986c36799d93719eaa9c409df063601467d9588163b2a3fde71"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_agg__cov_combine.return.g": {"doc_hash": "341b31f482ba3fe808dd3ab0e6f8d56be076539689e4b42918dc10b305f2ee35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_finalizer__cov_finalizer.return.pd_Series_vals_index_ind": {"doc_hash": "41ed449221a6283a24dc306b14f270bc8e08ea0ce6d561ed5733033fb06ad283"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_agg__cov_agg.return.s_result": {"doc_hash": "cef5e87a5546d0cbd073fa69b32d123230f4315396f3ee34723f57e21c33aa59"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_34__nunique_df_chunk.return.grouped": {"doc_hash": "dceef6c002592fefe7582aa016fbefaa7fca261dd9439e1d1f4afb45a12c9949"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_spec__normalize_spec.return.res": {"doc_hash": "a7c7cf5733b738352eff17a7f59cbd887c9ee04c5c1e964edcb66173e2b0ad22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args__build_agg_args.return.chunks_aggs_finalizers": {"doc_hash": "293f3f19730d0b0299e6ee016f8ac6163fb2f90997019c8895cbe82821fbb113"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_simple__build_agg_args_simple.return.dict_": {"doc_hash": "b6355c9cd1ed256be6e11db3c3d96c0154454e8b0164418bd86f13416aaac80a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_var__build_agg_args_var.return.dict_": {"doc_hash": "9f8e8695ae0d09a48fd5b6a08d764aacbfac2d306b51a8798af73ed685bc926b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_std__build_agg_args_mean.return.dict_": {"doc_hash": "7502a9e5211d2e3a9ea79236e700fffad2984d8e9a0ccf017a8520c144b966c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_custom__build_agg_args_custom.return.dict_": {"doc_hash": "635def7548548d02fe44efc4a579486cb0ca1006ad55cbb2718e2aa0c532f399"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__compute_sum_of_squares__compute_sum_of_squares.return.df_groupby_keys_sum_": {"doc_hash": "12ebf6bde14d424b434453d449b63181b1bb762701e54784c41f44147867743e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__agg_finalize__cumcount_aggregate.return.a_add_b_fill_value_fill_": {"doc_hash": "a0e61a99f6974563e3345b65891fa25b9119d5567d49c9ff042d680d61950fba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._meta_nonempty__GroupBy._meta_nonempty.return._maybe_slice_grouped_sel": {"doc_hash": "968bc94734eba6f09a01d64fa0984648d2a0416e6cf83b556d6b91983868fb17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._aca_agg__GroupBy._aca_agg.return.aca_": {"doc_hash": "8e86ad3d502213d70486ae72c94b97bbece1314eced11a4015c58e9507618bd3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._cum_agg__GroupBy._cum_agg.return.new_dd_object_graph_name": {"doc_hash": "eb54384cfe9f20c754ecf0eb570fea18c4b0cbfb7cc6e52d7dcffadd129fd461"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cumsum__GroupBy.mean.return.s_c": {"doc_hash": "0089e037f897aa1debbd8a264a4d2a818201994daef16a76e57c1b26ecb8d618"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.size__GroupBy.var.return.result": {"doc_hash": "8306d9f535efb88dfa74b5adee3e01cd6f61357ff23f4311e22b47da4ea16380"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.std__GroupBy.corr.return.self_cov_split_every_spli": {"doc_hash": "f6279c413f99f48e61d08335338a4e3f5c19ededb274cd7a1c58ec68c91b646c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cov__GroupBy.cov.return.result": {"doc_hash": "23273bb4308bb6a73d87c7f6fc1e04955d10b339307509959591c0f4bf3b2ca1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.first__GroupBy.last.return.self__aca_agg_": {"doc_hash": "4e920de8d94784c1a26df8db94239cb836328c730a605338ccf1ab3e16e479a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.get_group__GroupBy.get_group.return.map_partitions_": {"doc_hash": "585cd000e72a6afd411cf46a49b9d157e2d01468e1020733f78f418e6fab0fea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.aggregate__GroupBy.aggregate.return.aca_": {"doc_hash": "8aff2661bf91365116a8277d2af4c4da8983432ff1ca25bfbb2a744885215454"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.apply__GroupBy.apply.return.df3": {"doc_hash": "f81ea20a79509e7b630e21066af99dd1358d096400e9e0ce12ab43b88e43365e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.transform__GroupBy.transform.return.df3": {"doc_hash": "42e897ebea00af65d935074d330c13aa1a9fa420eeac8f7dd71acd0d6e8fc260"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_DataFrameGroupBy_DataFrameGroupBy.agg.return.self_aggregate_arg_split": {"doc_hash": "11edb16f853e275bb65f254552a325ad97bb8c68186e96fff3e99e984cebbd23"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.nunique_SeriesGroupBy.nunique.return.aca_": {"doc_hash": "3beca15dbb1a7d190599c6e22ed15500324f9c4264aec6214db898a5edb85182"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_compute_hll_array_compute_hll_array.return.series_reindex_np_arange_": {"doc_hash": "b24d9752c56f9d25e92b1038d2f217c6b3129051a7bfcd9643448ad290930291"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_reduce_state_": {"doc_hash": "58c6bcd71a86ec286ad7b8f5b661e6ac3f11b562b227776f531f518e64cb4e81"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__iLocIndexer__iLocIndexer._iloc.return.self_obj_map_partitions_m": {"doc_hash": "ec410441280d2886fb614112270ca7e8af4d8e854938240c42887c18dcf560e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer__LocIndexer.__getitem__.return.self__loc_iindexer_cinde": {"doc_hash": "696147c57568f1a7894e000551fe2621781f9ada9c1d0feac0a7d4c629e457d8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc__LocIndexer._loc.if_self_obj_known_divisio.else_.return.self_obj_map_partitions_": {"doc_hash": "68eb172622939cdeb8ec05cd7ae409b538c50c3458a4eab93a8101be168e4af0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._maybe_partial_time_string__LocIndexer._loc_array.return.self__loc_series_iindexer": {"doc_hash": "72bf5b445a4e909164191aa2f529959b4b1c497cf8512e92286cc6595c30c16f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_list__LocIndexer._loc_list.return.new_dd_object_graph_name": {"doc_hash": "20c435c01dfebaebd1a4d6ba9dd587de43fa937ac93f5f77ad4f5ef45e7e85b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_element__LocIndexer._coerce_loc_index.return._coerce_loc_index_self_ob": {"doc_hash": "79e6ec9c246ed76321e4723528c0101cbca36d376fe49b0e2bd7c62454c0c8e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_slice__LocIndexer._loc_slice.return.new_dd_object_graph_name": {"doc_hash": "80a17f587d58cc1aad50a054e7524db483a389648e58684a4d2c865d40ac4376"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partition_of_index_value__partition_of_index_value.return.min_len_divisions_2_m": {"doc_hash": "cbe4fb55ad75eed4b2ff9a73f6a87334f680705641be0d5bce31c34ddd4d2161"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_pandas_read_text_pandas_read_text.return.df": {"doc_hash": "916ddba5939aa05afd361b2f44d47d277892d7401218c8402ef42f30da4404d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_coerce_dtypes_coerce_dtypes.if_bad_dtypes_or_bad_date.raise_ValueError_msg_": {"doc_hash": "3c8225aa288a51bb5f569861513ceafb33128e3835f84737591d038bae476d76"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_READ_DOC_TEMPLATE_READ_DOC_TEMPLATE._": {"doc_hash": "0a2913b1c21a2eacdbe0d83cf8cc8c3e5c40fe37cd4bad77d5d277afa0937e65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv_to_csv._": {"doc_hash": "0bbc2a44e78e7ca3c1cdb469ec64724425ea3a84b4f0e5e51833d1471adf98f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv.if_single_file_and_name_f_": {"doc_hash": "ce2aac432043f2546187297b6b5be889e60bb01ff350c487b5c43e6de8edcb36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_part_make_timeseries_part.return.df": {"doc_hash": "fb0e4d46cbfcb8616da058cf898e36b2ee0f5b5d39663a56f057ccd2c9252fed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf_to_hdf._Store_Dask_Dataframe_t": {"doc_hash": "24c79970126b6181551f918baa03e6ff9f1c35edab82d80cc78213fba339c2d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.name_to_hdf.for_i_in_range_0_df_npar.filenames_append_fmt_obj_": {"doc_hash": "fde1d532179e25ca3f4b97668a2ebebff3b667af46dfc0d0bbba9c963a512bcc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.for_i_in_range_1_df_npar_to_hdf.if_compute_.else_.return.delayed_Delayed_k_dsk_": {"doc_hash": "96b241b150adb8f84e7b7350722478739808758861b0e88bae57c81550da4ace"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_dont_use_fixed_error_message_read_hdf_error_msg._": {"doc_hash": "ec55d5a64433581a1fb152c9b6c33ce837417eedcbc166d6b5c2467a7941888a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf_read_hdf.if_chunksize_0_.raise_ValueError_Chunksi": {"doc_hash": "8e9a08be5d485c5e119561f08374d979165679bd1e8b90bb2faa1c6e4091a6b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_array_from_array.return.new_dd_object_dsk_name_": {"doc_hash": "b9250930bf19fcc550dc1d533955c1e8384b5673360d15d8305b250257354fd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_bcolz_from_bcolz.if_index_.else_.return.result": {"doc_hash": "231f14d9309efbae78fcbc980d506781b73b29005871cf15f01b5034e1c1a01a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_dataframe_from_ctable_dataframe_from_ctable.return.result": {"doc_hash": "f565af2b4c9d771f5571ba1a26c78a9df39af61634496c09e0907305bde4d44b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array_from_dask_array.dsk._": {"doc_hash": "a4a048dc021be03b849eb8fa0ae9a2c43969b42d5cab8dcd3f691a08d3abb385"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_bag_to_bag.return.Bag_dsk_name_df_npartit": {"doc_hash": "08a51fb888a0fc70b32aa375e5a44e0afaff13140d07d078e8b4f37581fe9790"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_records_to_records.return.df_map_partitions_M_to_re": {"doc_hash": "ea314e2a3041a5e7caa2489f870d5078df65c4d7ee09ab746b1369a0971ffe7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_delayed_from_delayed.return.df": {"doc_hash": "d58b0026ed90fd5134e80e8442589cfbc7ccb40d1eb76a426b513bd12595a2bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_sorted_division_locations_": {"doc_hash": "d35fabe35f4455f9bae1510d22977c1d21b0afe50e5afb1d35483ef10acc5ba7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/__init__.py__": {"doc_hash": "a1ef1b0ab3e9f7468462e669760f818b374c0907708e427d580ae898952aafae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet_to_parquet._Store_Dask_dataframe_t": {"doc_hash": "2ca9dc8fe9246af4208a0dd4eae41d71452989d10e4a9e3c3965d51d2a3ee12a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters_apply_filters._Apply_filters_onto_par": {"doc_hash": "5680a3d58fbdb62ac58e555584029ced11b90fd5115fee9fa44928b0700f9173"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters.apply_conjunction_apply_filters.return.out_parts_out_statistics": {"doc_hash": "7e0ef0d532208930599f2311ac11b118213ef5fd01204c841dc147244458f21f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_process_statistics_process_statistics.return.parts_divisions_index_": {"doc_hash": "0ef101cb3b2245cd6f0339507974b23986fc577ab9ffe3973200c750b4d61b40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_set_index_columns_set_index_columns.return.meta_index_columns": {"doc_hash": "b853fc6b1d7f3c850c9a1ff748e2bffc90523919c4cab9e6844962e2f3a8f0e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_aggregate_row_groups_": {"doc_hash": "2b299af811a1279bfb06f60cc26c1e9bbb8b3db3073aaf600c995f216fd6e6d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.initialize_write_FastParquetEngine.initialize_write.return._fmd_schema_i_offset_": {"doc_hash": "8008909ec94c591cf01b0681becfe89044d14a8d1b256b0bfa499e1e6e941007"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_partition_FastParquetEngine.write_partition.if_return_metadata_.else_.return._": {"doc_hash": "16ac9086d32e17cbd96783a58ac69c7b88c5d657fe4aea285d58276b848aec49"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_metadata_": {"doc_hash": "587f613c6a27ce5f86950209ba29b6fb69b6f16649e646c97fdbda18d83f6268"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_re_Engine.read_metadata.raise_NotImplementedError": {"doc_hash": "94c69cb94a6b3ea2ae8c2589f688e450838a5cc508cffb5989e19a989a24da94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.read_partition_Engine.read_partition.raise_NotImplementedError": {"doc_hash": "439bea3c68b26190dc93c755807ed7622d9313069cdb94642058d20d5915cf11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.initialize_write_Engine.initialize_write.raise_NotImplementedError": {"doc_hash": "0738cc1a832dadfb2e084660dd708d93220508a2266075582f36695ccbc875cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_partition_Engine.write_partition.raise_NotImplementedError": {"doc_hash": "83019f2d0181559d2826064e0fbba31366a93df2f2f8527d4f383bd189be7782"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_metadata_Engine.write_metadata.raise_NotImplementedError": {"doc_hash": "1b5602b8675a6d98e568a363b233d41c23147f28ca6ee86ff2b3d25f95470b63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata__parse_pandas_metadata._0_8_0_allows_for_dupli": {"doc_hash": "47b6ef76ffbfeaf044b0f17c4609c6964366faa238c92cc85bf73040dfb3e83b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata.if_not_index_names___parse_pandas_metadata.return.index_names_column_names": {"doc_hash": "c464d4f516a1919e04f3e244e6c96c0932dc7f486db6980fbcfd2a40ba579978"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__normalize_index_columns__normalize_index_columns.return.column_names_index_names": {"doc_hash": "3364e81381f12531be3f08603ce946178842c3e8b535d70090c49a61153c6c7a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths._join_path.abs_prefix__analyze_paths._join_path.return.joined": {"doc_hash": "7a5f295adc273257c79f4c0cc16f36aac927036d3cdc3745f665ab078c7e9504"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql_to_sql._Store_Dask_Dataframe_t": {"doc_hash": "24aa63c9a7c4b25e831662b74937ce64528986e902226fd42869c82c3b110a16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_simple_test_text_blocks_to_pandas_simple.assert_eq_df_amount_sum_": {"doc_hash": "bf710217da4bc62c3c9bd357877d9523fe43617ec1a5093cb476dab3a04fb328"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_kwargs_test_text_blocks_to_pandas_kwargs.assert_result_columns_": {"doc_hash": "b6ff139a4c3bc5ad8445fffd6f07ece3c7db12d9d11d1b44308e125088913ddb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_blocked_test_text_blocks_to_pandas_blocked.None_1": {"doc_hash": "5e29465922d6488a9621c09c7c1033416a3141c1f449cdc93b9c84c8157953bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_test_skiprows.with_filetexts_files_mod.assert_eq_df_expected_df": {"doc_hash": "6d51c4e5c8647c08ddde626b745bd05f354950182f1b72a04ecbb799a23918d8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_as_list_test_skiprows_as_list.with_filetexts_files_mod.assert_eq_df_expected_df": {"doc_hash": "bae6a9a2f7294532104907a4a2d7804ef9e4a5600f727b05f27898c874c5991f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_blocks_tsv_blocks._": {"doc_hash": "0099c80dc505bd657e1f01e7d28070d0bed901785da41c031c4d1668e5332dea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_dtypes_test_enforce_dtypes.assert_all_df_dtypes_to_d": {"doc_hash": "ace910592524eb4fa722bb25e4a0b9ed2b115cc86ced63a08cb6085bca7dab14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_columns_test_enforce_columns.with_pytest_raises_ValueE.dask_compute_dfs_schedu": {"doc_hash": "3933bdacba7ea3f4ff66948fcedcf8889e75be446ef935ae83fc9ffc9b813ddc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py___test_read_csv.with_filetext_text_as_fn.assert_eq_result_pd_read": {"doc_hash": "fc70550d7f9487f8f74944f0602ee4f9372d014c7531a105d0ac2e1457229fd1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_large_skiprows_test_read_csv_large_skiprows.with_filetext_text_as_fn.assert_eq_actual_pd_read": {"doc_hash": "c3011123206797b28727bf71dae59e932279df595c696ad8e61d425c7ef97964"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_only_in_first_partition_test_read_csv_skiprows_only_in_first_partition.with_filetext_text_as_fn.None_1.with_pytest_raises_ValueE.dd_read_fn_blocksize_30_": {"doc_hash": "cd023dccae065cc1a9e56213db4c260e0c2ec52c267f27e51ae116cf73da7035"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_test_read_csv_files.with_filetexts_files_mod.assert_eq_df_expected2_": {"doc_hash": "40e842318df7f117a3d286e98c52782f861232aa37a44d01b3ce780966a26788"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_list_test_read_csv_files_list.with_filetexts_files_mod.with_pytest_raises_ValueE.dd_read_": {"doc_hash": "a4ad5c5f50f49364e0b5872abdbfd1adb1768c2812cc8deac5c873b8519f1150"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_test_read_csv_include_path_column.with_filetexts_files_mod.assert_2014_01_03_csv_i": {"doc_hash": "d61f49997aefbd12c7014cb914f46b21af44dbe34d779fa1f49a11e66319c725"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_as_str_test_read_csv_include_path_column_as_str.with_filetexts_files_mod.assert_2014_01_03_csv_i": {"doc_hash": "a1e7036d15fd210d6c1e978aae150e767af6550c0493ae659cf98cc60f465352"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_with_duplicate_name_test_read_csv_include_path_column_is_dtype_category.with_filetexts_files_mod.None_3": {"doc_hash": "c4d8b715c71ced591422ee352c36e99bc866f29feca5febc40684caabef87460"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py__After_this_point_we_te_test_read_csv_index.with_filetext_csv_text_a.assert_eq_result_expecte": {"doc_hash": "94c2500110ac24242aa7db1268156c6b4710b3c8697c93f7539f301e42d8ea5e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_range_test_consistent_dtypes.with_filetext_text_as_fn.assert_df_amount_compute_": {"doc_hash": "08686ddb059ba9efa6f29e4cdc32c5599604586f0a760bf941cad7df71dc2027"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_consistent_dtypes_2_test_consistent_dtypes_2.with_filetexts_foo_1_cs.assert_df_name_compute_": {"doc_hash": "17bd19e311ae294e7d3627f196525c86c2cb3a5b01c9926266fd632343e81153"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_dtypes_test_categorical_dtypes.with_filetexts_foo_1_cs.assert_sorted_res_fruit_c": {"doc_hash": "859864e9c8128041505314ae4873b132be36217686007e272c84750d8bcb4eb0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_known_test_categorical_known.with_filetexts_foo_1_cs.None_10": {"doc_hash": "1f5f3523daef1c3fee9974190e81187fcb5d1eccb7b773ec607087fc4486ef7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_compression_multiple_files_test_compression_multiple_files.with_tmpdir_as_tdir_.assert_len_df_compute_": {"doc_hash": "23d392865bf21cd2dfd2895fdb53bce122c395d169a664dc3c14235564c767c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_empty_csv_file_test_read_csv_sensitive_to_enforce.with_filetexts_csv_files_.assert_a__name_b__name": {"doc_hash": "4f2f529a78c5b8eb66fdd5e9bd7e3c022954d85122ee2e56c9338cfeffc8823c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_warn_non_seekable_files_test_warn_non_seekable_files.with_filetexts_files2_mo.with_pytest_raises_NotImp.with_pytest_warns_UserWar.df.dd_read_csv_2014_01_cs": {"doc_hash": "e33f173cec24cebee419388a8cf91307169cc41562b601c30be4e7560e5855a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_windows_line_terminator_test_windows_line_terminator.with_filetext_text_as_fn.assert_df_a_sum_compute": {"doc_hash": "2cbf202a53ad3f185135002da31adb4ff6d42351dce90e8467b8fc3f9c03d569"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_header_None_test_auto_blocksize_max64mb.assert_isinstance_blocksi": {"doc_hash": "80a7244bf21580c5638e1ad6d7702d8cdd436a41a81a5c26f590d7e0e3e93504"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_auto_blocksize_csv_test_auto_blocksize_csv.with_filetexts_csv_files_.None_1": {"doc_hash": "c8132450184938802e0cf17992e9ce67979d7a71628f86556d73eb9411e0e86c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_head_partial_line_fix_test_head_partial_line_fix.with_filetexts_files_.assert_df_dtypes_i8_": {"doc_hash": "d7c4d45e81a9661e558ad8d9b371262b8234d5eb5473664b9cbf473e2fad9320"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes_test_late_dtypes.date_msg._": {"doc_hash": "041a4c6431059e42c4d97badcd69b929741b2b33a2441b216022ffbcba9bf99e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes.with_filetext_text_as_fn_test_late_dtypes.with_filetext_text_as_fn.assert_eq_res_sol_": {"doc_hash": "4e56d147f95767dc9bbe23c2008bccf47c5c0b186479e719e802c20508fd398d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_assume_missing_test_assume_missing.None_3.assert_df_numbers_dtype_": {"doc_hash": "27c8b3f748b3c5f18b6354d9a5e5fb2ce0614167e9cba7796f007b8bce10ebb7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_index_col_test_read_csv_with_datetime_index_partitions_one.with_filetext_timeseries_.None_1": {"doc_hash": "0b41e95d6faefe487982510b9e2b6cbdf4b189d8d6e7bf4777bac10d2902f3f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_encoding_gh601_test_encoding_gh601.with_tmpfile_csv_as_f.assert_eq_d_a_": {"doc_hash": "e6cb4d919d8fc629ba854bde0e283eb5233e291dc51383b7de8e2eafbd5dd3b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_header_issue_823_test_none_usecols.with_filetext_csv_text_a.assert_eq_df_pd_read_csv": {"doc_hash": "5fa628a3a100f2efe30f65e3a4e1202548a57e5947120a17663a1ec245e4b4af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_parse_dates_multi_column_test_parse_dates_multi_column.with_filetext_pdmc_text_.assert_len_df_len_ddf": {"doc_hash": "1405ca82e589fda0f27671ac4af0929aeb5e6c783fe7ba4fb35f360950aa6140"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_error_if_sample_is_too_small_test_error_if_sample_is_too_small.None_1.assert_eq_": {"doc_hash": "0b848cbb43d12ce69b3f3d597c9095d7a90bf7009f6a7c9e6de73f0dab700789"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_names_not_none_test_read_csv_names_not_none.with_filetext_text_as_fn.assert_eq_df_ddf_check_": {"doc_hash": "81e178b2138ca454ada2c747390bf32ad391a4842007bf0954f6bae86a44636c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_multiple_files_cornercases_test_to_csv_multiple_files_cornercases.None_3.assert_eq_result_df16_": {"doc_hash": "0c8f568d7a89331c5619fca00112b0eddb9042ae54ff468e0f2747ab24821f65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_test_to_single_csv.for_npartitions_in_1_2_.None_1.assert_eq_result_df_": {"doc_hash": "25c7bf2c76f6f309f0761a7627a2c1dc6ff062c414f7548babcdc9fda8643061"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_name_function_test_to_single_csv_with_name_function.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_fn_name_functio": {"doc_hash": "9a816e0953c3a3f0ea0c05096b99e02bc4c3e3115a2fb716a19ceb6ee251d015"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_header_first_partition_only_test_to_single_csv_with_header_first_partition_only.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_": {"doc_hash": "3568b22c9af8bb47f5310c6bdafd2b9d65dce062d7074b467b7399333c27b61c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_gzip_test_to_single_csv_gzip.for_npartitions_in_1_2_.with_tmpdir_as_dn_.assert_eq_result_df_": {"doc_hash": "0e7d9d730c78923920420298a51c4fe278a9f1de03830002d61892953816a473"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_gzip_test_to_csv_gzip.for_npartitions_in_1_2_.with_tmpfile_csv_as_fn.tm_assert_frame_equal_res": {"doc_hash": "62f749b6282e253a4a33b6fbe649a503a1e0a716f9340768d6055f44d3d552f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_nodir_test_to_csv_nodir.assert_result_x_values_": {"doc_hash": "73b3fcf06bdaa45605667b1cb64dea779c99065264af2e39dcb87ed3fe2a42d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_simple_test_to_csv_simple.assert_result_x_values_": {"doc_hash": "d225527be6de23e9992866334e1f1699f4984418fa4c48d5a993660c65ae7bfc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_series_test_to_csv_series.assert_result_x_df0_": {"doc_hash": "06a05c0b9be00e21b7bfbe78e1f1c2d6cbb4a677d601cd15fdb9a7c36451416c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_with_get_test_to_csv_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che": {"doc_hash": "197aa5a0dd4c146361730b3a0ebc4b1abc39a268031436d6959f6121a9dd5b36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_warns_using_scheduler_argument_test_to_csv_warns_using_scheduler_argument.with_tmpdir_as_dn_.with_pytest_warns_FutureW.ddf_to_csv_dn_index_Fals": {"doc_hash": "c1d913122dbac9d1696895528033556eaa6cd623ec9b374b913437370ef1b150"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_errors_using_multiple_scheduler_args_test_to_csv_errors_using_multiple_scheduler_args.with_tmpdir_as_dn_.with_pytest_raises_ValueE.ddf_to_csv_": {"doc_hash": "0327914c997849bac56e4acc98ea4dd07c25435540a91cd94ee40d501c672940"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_keeps_all_non_scheduler_compute_kwargs_test_to_csv_paths.os_remove_foo1_csv_": {"doc_hash": "cd9c462adc46350e38894c1b61fb0486b7fef47e168f1d863f57c90119e99c98"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_empty_dataframe_test_to_csv_header_empty_dataframe.with_tmpdir_as_dn_.os_remove_filename_": {"doc_hash": "24682d92117ad6d5896dd0bf1648de4859806bb124fbe211d20ff82e9ec1238c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_test_to_csv_header.with_tmpdir_as_dn_.None_2": {"doc_hash": "ae80bd697ee74c6b435a09d9e47cface77724aad9d9d6e4bc3be52c5d8a0172e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_pd_test_make_timeseries.assert_a__name_e__name": {"doc_hash": "738a7a1309ba759047f28e1697aa5967d4b0ea694d843adb29915ca9df478c81"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_fancy_keywords_": {"doc_hash": "f8baceca9283c6b90922177ee4d96602983112c6ce180b980ffc0804eaff23ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_link_optimizations_test_to_hdf_link_optimizations.None_2.assert_dependency_depth_d": {"doc_hash": "5a31857f643fc4d8b33ebb56aec2d9e9bb7d87228288c9f255de4bb4c8f9542e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_lock_delays_test_to_hdf_lock_delays.with_tmpdir_as_dn_.assert_eq_df16_out_": {"doc_hash": "6c3118e2dea77def85299dccd52a528158208524267239dea634c5fe5e41aa39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_exceptions_test_to_hdf_exceptions.with_tmpfile_as_fn_.with_pd_HDFStore_fn_as_h.with_pytest_raises_ValueE.a_to_hdf_hdf_data____": {"doc_hash": "c170370377e125edc70aec091ab129a5fa4af7c1b9bebb3cad6bb3bfdccba68c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_schedulers_test_to_hdf_schedulers.None_2.assert_eq_df_out_": {"doc_hash": "c3580870a37dad63a2a957de5f710e946affd160fe22027a56cb89c461b05938"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_kwargs_test_to_hdf_kwargs.None_1.tm_assert_frame_equal_df_": {"doc_hash": "a3b9270a49d01576fad8d05a684dae48f1413ad484cc4539f5345472e9a0c1d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_test_read_hdf.None_2.compare_a_compute_sort": {"doc_hash": "667c9010e84dbb02666a651de4ce00fa34f56b9c81879e81ea0fb26121d7dfd7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiply_open_test_read_hdf_multiply_open.with_tmpfile_h5_as_fn_.with_pd_HDFStore_fn_mode.dd_read_hdf_fn_data_": {"doc_hash": "5e5beab4adafb5eea6912a37ef7d5938599c145699b72ad20816e1d803c9a340"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiple_test_read_hdf_multiple.with_tmpfile_h5_as_fn_.assert_eq_a_r_": {"doc_hash": "cd8c01976d1c28fd656c85af1a78691422453d481924e8b996ed2fa27ad10c13"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_start_stop_values_test_read_hdf_start_stop_values.with_tmpfile_h5_as_fn_.None_2.dd_read_hdf_fn_data_": {"doc_hash": "8aed0cfece1c2f6a1c52bd73a577e186a0ab103082b156beb72c5f67ab38c612"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_globbing_test_hdf_globbing.with_tmpdir_as_tdir_.with_dask_config_set_sche.None_4": {"doc_hash": "bcf94ea4a7ac2d9b22ebe9dcf6567298abc45b24e8fabfc4c9f2a76e9813d696"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_file_list_test_hdf_file_list.with_tmpdir_as_tdir_.with_dask_config_set_sche.tm_assert_frame_equal_res": {"doc_hash": "146f5757cca3915e6ae026e0bbfa97a28300b6874d5702514210f070115c949a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_pattern_pathlike_test_read_hdf_pattern_pathlike.with_tmpfile_h5_as_fn_.assert_eq_res_df_": {"doc_hash": "7fb67657ecd4b6f5db86649b3d46f0bd631e045abd8969b5119f2ecd49992722"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_path_pathlike_test_read_hdf_doesnt_segfault.with_tmpfile_h5_as_fn_.assert_len_ddf_N": {"doc_hash": "6eb0ffbb91625c289b7c1ef0bbaeaaca478fb224f07385ef523afbea768d6ed1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_filenames_test_hdf_filenames.os_remove_foo1_hdf5_": {"doc_hash": "0419e00f47fd4ac512080ef1d550a248f7f3730d21e2191f0ca5edab2d187a95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_path_exceptions_test_hdf_path_exceptions.with_pytest_raises_ValueE.dd_read_hdf_tmp_": {"doc_hash": "c6fd0ba3cb06244fe2a2a9ed4561f83e9d426bf5c33c8fd7b968ad912475e8ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_nonpandas_keys_": {"doc_hash": "a78fcae665529450c78caa130f211af834abafa4d8e48a6c474b2746f4573c60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_1darray_test_meta_from_1darray.with_pytest_raises_ValueE._meta_from_array_x_colum": {"doc_hash": "5816f4ece05c8a493d0f64cd896ccc6b9bea8e3ff4f645d55f3bd7db083c369e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_recarray_test_meta_from_recarray.with_pytest_raises_ValueE._meta_from_array_x_colum": {"doc_hash": "79c000be533d704fd616c415f193d3ce0256b9496f4af54d0df3d32ab658fa7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_test_from_array.with_pytest_raises_ValueE.dd_from_array_np_ones_sha": {"doc_hash": "6641183919342883c173e3284c08f91b5c450725ef49a1993007d3d1b25a5779"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_no_lock_test_from_bcolz_no_lock.assert_not_any_isinstance": {"doc_hash": "14c5781fa81fdf014685a5055f199d937574d6bfc173b6f1de1736e993139dfd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_filename_test_from_bcolz_filename.with_tmpfile_bcolz_as.assert_list_d_x_compute_": {"doc_hash": "c44488add7c0c87c1908d5311591c6650c4f307f857b26b4e8687a0900bba941"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_column_order_test_from_bcolz_column_order.assert_list_df_loc_0_com": {"doc_hash": "e23e368f59bc88c57a9dc29061b523b94b190de5f0807df9637c2ef78bc708a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_dataframe_test_from_pandas_dataframe.None_1": {"doc_hash": "af67469e63a30ce7089997a9820b25477ec0aad1f585e76ef5a0f4de242548a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_small_test_from_pandas_small.for_sort_in_True_False_.for_i_in_0_2_.assert_eq_s_ds_": {"doc_hash": "be41fa43548ea185034aafe429b11cf1da579fd0cfb8129e53ac8669485705a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_npartitions_is_accurate_test_from_pandas_npartitions_is_accurate.assert_dd_from_pandas_df_": {"doc_hash": "ff6de579b1fdaf8845bf182a86cdd2b674ef0c1893255c26a70c74d66958d4e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_series_test_from_pandas_series.None_1": {"doc_hash": "da7fc0515365b02bfe004b682852416e5a51689bd105ef54f1932e932dc6c834"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_non_sorted_test_from_pandas_single_row.assert_eq_ddf_df_": {"doc_hash": "4579b36733badea73721e512694c2318fbaa1de1b6e7916d94284690dd0f8f3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_with_datetime_index_test_from_pandas_with_datetime_index.None_1": {"doc_hash": "350e79d60973980d011b50c60be56c237d070271d03e0a581f3fb20fd52d48d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_DataFrame_from_dask_array_test_DataFrame_from_dask_array.assert_df2_divisions_d": {"doc_hash": "1ad22f27e9bfddfdd5181a281604ce89ee6f3a5575426c324caf9250835a753c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_Series_from_dask_array_test_Series_from_dask_array.assert_eq_ser_ser2_": {"doc_hash": "8d53ff6d6675a07c91b34b771881da6e36e950552907161e081f7bcebeabffd9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_test_from_dask_array_index_raises.assert_m_match_4_2_": {"doc_hash": "0071a5526a1c86ca5cad5b1060aa776568b011c0887ab244a9f283d8d5afd0a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_test_from_dask_array_compat_numpy_array.None_3": {"doc_hash": "4f63048360e2b532d123f9f743fe5cbf9eed66b91e14167b1b4d80b985c17703"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_1d_test_from_dask_array_compat_numpy_array_1d.tm_assert_index_equal_d2_": {"doc_hash": "f6b44ba2b3dd2f7faa4a24c89fe32c213f430a1651d65fcd7c4b656b9a111898"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_struct_dtype_test_from_dask_array_struct_dtype.assert_eq_": {"doc_hash": "5da6e8c7b0fc83af9a8c66c0827f58937b7d779af87886f6bc65407af1dca4f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_unknown_chunks_test_from_dask_array_unknown_chunks.with_pytest_raises_ValueE.df.dd_from_dask_array_dx_": {"doc_hash": "36ee3c206f18be49f47b69d9b5605988b9b6b7d721f57c0363d2d89fdd392704"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_with_lengths_test_to_records_with_lengths.assert_result_chunks_e": {"doc_hash": "621f07b96b69dc9fdddc6dcc34c9b2fb639ce70c55456201aded8d15b9f4a146"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_raises_test_to_records_raises.None_1.pytest_fail_Unexpected_v": {"doc_hash": "1adba1988b7b45de9957b2934a7fded69436b7f16483a8639e2cdae83edd9d36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_test_from_delayed.assert_str_e_value_start": {"doc_hash": "742cbc5a6dd733b2f7a6848f94b4923ea8b9d50a3676289b38a8e4f7b81e073e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_misordered_meta_test_from_delayed_misordered_meta.assert_msg_in_str_info_va": {"doc_hash": "719419cf9fb1b3f7aa57a8209e7e0b71a9218e4b4d6d0cac0493286765e08df3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_sorted_test_to_delayed.assert_eq_dx_compute_x": {"doc_hash": "06f807281c9b4a7338413d10ea0fdb71bdb396e7894603e10d69959c0ab2ecc8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.assert_eq_dx_compute_d": {"doc_hash": "f57fe8c3894174ffecc69a4f5cb5132bbff17b5ddbb75208408578428a7ac03d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_dtype_": {"doc_hash": "2e65929284817857929b3971ef6b1dbaccf765d68b1d48be13ecef82bb4108b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_fkeyword_test_read_json_fkeyword.with_tmpfile_json_as_f.assert_eq_actual_actual_": {"doc_hash": "c4314d6bcd411f2cf9c8172061d874e7f566f02bb8322604f6561d022d970e26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_meta_test_read_json_meta.if_orient_records_.assert_eq_res_sol_check": {"doc_hash": "adcf274b5220fd27dc39c57280e17cd4e4de3e7e78b9bfdb88f65e4be388b2d8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_write_json_basic_test_to_json_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che": {"doc_hash": "718c9ad6a5bbd0c4f7a82067f7e1c8993daf5d686fcff4cc062c11770d61a5f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_error_test_read_chunked.with_tmpdir_as_path_.assert_eq_d_df_check_in": {"doc_hash": "ce90699c295543784a5ae4d8fbb26b4dc4e548614247373df5349797bd84b9c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_test_empty.assert_eq_ddf_read_df_": {"doc_hash": "257501b3215af90d67e723c45f2eca38fb05e42421c0a580b0724e4e6e4dd772"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_simple_test_simple.assert_eq_ddf_read_df_": {"doc_hash": "88ccb415e6525220bd4578dd69376aeabade8873ac01edfff921b20b52ce0a0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_delayed_no_metadata_test_delayed_no_metadata.assert_eq_ddf_read_df_": {"doc_hash": "fca3091abe076577b532627923e9cac441fe961bfbf874bf832f8ec74b8190c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_list_test_read_list.assert_eq_ddf_ddf2_": {"doc_hash": "62175903d6e7083eebd21c6a002c13238638900a04e833a9a92ca43e99db8553"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_auto_index_test_columns_auto_index.None_4": {"doc_hash": "8a687a17182a277f9435f65edd1c0f68db8b42c0f7e60bb926e01fa52ed23b37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_test_columns_index.None_6": {"doc_hash": "6c2fc9f861b12636db4a8e068d78031805bbcb3dd3d11aecd9c4335bcdeccf4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_nonsense_column_test_gather_statistics_no_index.assert_not_df_known_divis": {"doc_hash": "c7cdff9ed955fb2b413e07d137f681691513d5137305bb7a1b581e1c7c0dc983"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_with_multi_index_test_columns_index_with_multi_index.for_ind_col_sol_df_in_.assert_eq_d_sol_df_col_": {"doc_hash": "662d482d2dcb0c4529e5c58f8a6ed94ef0339632bd1086e8e66fb1b41b3f1fc2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_no_index_test_read_series.assert_eq_ddf_x_ddf2_": {"doc_hash": "2bc7236bcac1d3943a5a8d27b03b0c0eb294c7fb3226021c9cd82d76b31b2547"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_names_test_names.assert_set_read_fn_colum": {"doc_hash": "c396434a38ad94facedee8062484f3ef4424990ef3b83d34b49202cfab9ef072"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_from_pandas_test_roundtrip_from_pandas.assert_eq_dfp_ddf_": {"doc_hash": "0b82caff01a18b01dbfd27a6e2780b25c55d90f2adb9b079b6cd505636ba99ad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_test_append.assert_eq_df_ddf3_": {"doc_hash": "2f121965d727f04586aae39385dd5ea2352562eadc4695ee22026e62bcc64f5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_create_test_append_create.assert_eq_df_ddf3_": {"doc_hash": "5f53858500fd21a8c93dbe1d16b2a9b234cb5920d5dfead8c580947db18f0ce6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_with_partition_test_append_with_partition.assert_eq_": {"doc_hash": "ce23b877a5f7a66e057da3932e8eb8c6dfc2138ea73b2d076e186fb679bf106f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_test_partition_on_cats.assert_set_df_b_cat_categ": {"doc_hash": "d8023e805d3b2fe448a51cae3eff4a65513c0d03f9691310d37b0027b2ab0690"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_pyarrow_test_partition_on_cats_pyarrow.assert_set_df_b_cat_categ": {"doc_hash": "584f637093d41b0608819d7a36694f39a798eb0054f6774b3220268b055635b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_2_test_partition_on_cats_2.assert_set_df_cat_categor": {"doc_hash": "f3547f88ee189a242d2ff788b75a14be3bb60e7b63e88b1bdb09364a3f1b2d07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_wo_index_test_append_wo_index.assert_eq_df_set_index_f": {"doc_hash": "f47c802635fb5365bef246d7c38ed058c07e3408b2b7bb851401095c1d8d7679"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_overlapping_divisions_test_append_overlapping_divisions.ddf2_to_parquet_tmp_engi": {"doc_hash": "c114d364f87801cb1c6d20a08b04fadf576bd2b88b9f88d3b9b7f763ad427b24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_different_columns_test_append_different_columns.assert_Appended_dtypes_": {"doc_hash": "1fce1bc967f5b030ffbd710f0a7e974d01e6f03f2f0ca17f0594c6d1fd38dbe2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_ordering_test_ordering.assert_eq_ddf_ddf2_chec": {"doc_hash": "341e9fb94c254f44729adb84635ea7f06741f184d48c377e7527a39004dec84c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_custom_columns_test_read_parquet_custom_columns.assert_eq_df_f_i32_": {"doc_hash": "11956b6ded9d2b07ef78040ab666f74d46e5fb48b723bf3f655ed76e53a7500f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_test_categories.with_pytest_raises_Value.ddf2.dd_read_parquet_fn_categ": {"doc_hash": "b60564999fbd822299bc9085345a36c228fbc0f82c82e6cb45181a46029df34d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_partition_test_empty_partition.assert_eq_sol_ddf3_chec": {"doc_hash": "6cebf1cb52066117c97e9749a5599e9f647183f7dc80f2a5e925f53da4f2057f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp_index_test_to_parquet_default_writes_nulls.assert_table_1_null_coun": {"doc_hash": "c450b959e36d5a0f51e4d8d8f51372367788e47175f2172ad8c24a7e267e52f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default.with_pytest_raises_ValueE.None_1": {"doc_hash": "ec297335267321a454b76b44e5254375ad4c7a70a39fa56255a217182ced8aea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema.None_1": {"doc_hash": "e5972bd6bb140cbc5d154b2a88e6a17e5e631a9c17d8635f5eb51179b37279b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_schema_inference_test_pyarrow_schema_inference.if_index_and_engine_f.else_.assert_eq_df_df_out_": {"doc_hash": "b5f127761e347afdfeedc8aff31923a222c7de682c96da621d40d97bc4b9e50d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_duplicates_test_partition_on_duplicates.for_root_dirs_files_in_.for_file_in_files_.assert_file_in_": {"doc_hash": "1e3d700d2f55c4629cf813dc990e6042f106d1da7e60f53c87a75ca930d80897"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_string_test_partition_on_string.for_val_in_df_aa_unique_.assert_set_df_bb_df_aa_": {"doc_hash": "c5086a8e8a164593a5257ac6620d43becfa8b7a11875196ef3a692e2221cb66d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_categorical_test_filters_categorical.assert_len_ddftest_read_": {"doc_hash": "d7c0d2d5cd120bd683a1e6becc1d4878e8f79eb28b29f6930cdae8f1b99b2cf0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_fiters_file_list_test_fiters_file_list.assert_len_ddf2_0": {"doc_hash": "f722ae46db50bb22f58a23b362a3eb00f967a2ee30ec7d180a2885d1540639a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_read_with_filters_test_divisions_read_with_filters.assert_out_divisions_e": {"doc_hash": "392639d60375401d83f551e7d1224f682b05d0c02cf097995bf7bc96fce46876"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_are_known_read_with_filters_test_divisions_are_known_read_with_filters.assert_out_divisions_e": {"doc_hash": "14879a7ebde170c7b48e1f26867709dcd2152a491faa479647cf92d5453ace19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_from_fastparquet_parquetfile_test_read_from_fastparquet_parquetfile.with_pytest_raises_Assert.out.dd_read_parquet_pq_f_eng": {"doc_hash": "054ec1dd1af879a71f36a86ddec99ac27b0ef184d272df1ac8da77f0614aa09a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_lazy_test_to_parquet_lazy.assert_eq_ddf_ddf2_": {"doc_hash": "65d315ebfecb52b1c677ce4f92225c7c3003cf7d167c4918ec1ddfab4e5cc5f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp96_test_timestamp96.assert_eq_out_df_": {"doc_hash": "e3333527ec4100817b181fc2644a4c2b5882b1f7010abbafdaad8a81ec7671e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_drill_scheme_test_drill_scheme.assert_np_unique_out_dir": {"doc_hash": "f62b9bf3adc67e6c1fb53a642c08544793cc41e0507ccb25936dca5db38e779a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_select_cats_test_parquet_select_cats.None_1": {"doc_hash": "fde3b9a5a2ef68837fa2c09126e20c33e1d58bb88ed197a9095b57388dd8fd65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_name_test_columns_name.assert_eq_result_df_": {"doc_hash": "2f876c067e0b9c73da8484b3f259bec0d5b508789614976dd36aa4520f4de748"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_check_compression_check_compression.if_engine_fastparquet.else_.for_i_in_range_metadata_n.for_j_in_range_len_names_.if_compression_is_None_.else_.assert_": {"doc_hash": "460299fdbe5be32db1ae526158adfbf7a536b4732cf13462225c6e2c3e34d100"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_compression_test_writing_parquet_with_compression.check_compression_engine_": {"doc_hash": "289c80cee2c30a88c83e1e902ac317a3957cc7a443414eee1005c29b45f18dfd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_partition_on_and_compression_test_writing_parquet_with_partition_on_and_compression.check_compression_engine_": {"doc_hash": "37276d2eea40dffd8e868288e7a74f79bc1fd87825189e39976c6ebf196035f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pandas_metadata_pandas_metadata.return.request_param": {"doc_hash": "fc479da81e5986846dc5e3d2994034b7b264fbfdab3a9b35362c9570db6be52a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_test_parse_pandas_metadata.assert_isinstance_mapping": {"doc_hash": "2467e3e6a4782eec8730cd926d9e8ab992ba2644eb819d3a3fb3026aac31e9d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_null_index_test_parse_pandas_metadata_null_index.None_9": {"doc_hash": "09c63fcb7374f9562b856dbdede17e95fc03838fad32508fc4f1df5e3602e310"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_no_metadata_test_read_no_metadata.assert_eq_result_expecte": {"doc_hash": "6964a4b14cc838d1e131adeab8d0c6ac1219ed4971e6f1a963fb1a100c8ab905"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_duplicate_index_columns_test_parse_pandas_metadata_duplicate_index_columns.assert_column_index_names": {"doc_hash": "d7d87ad0c81e8ddfa3620b53d56e6e0bef1b9cfb2240919021297ecddf914aa1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_column_with_index_name_test_parse_pandas_metadata_column_with_index_name.assert_column_index_names": {"doc_hash": "d789468f551f4530eef7a43d0b0250d7a08bd1ba62d1dc54564bfc93a6c67961"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_kwargs_test_writing_parquet_with_kwargs.for_val_in_df_a_unique_.assert_set_df_b_df_a_v": {"doc_hash": "7c7c9f20a22c244ca70416c434fc1210b618b816e21e6ad425e0150d34f5073d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_unknown_kwargs_test_to_parquet_with_get.assert_eq_result_df_che": {"doc_hash": "43b84677f06d561de583a341a1f569fe0aa3622d76dbceb7e4aa4cf76241b55a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_select_partitioned_column_test_select_partitioned_column.df_partitioned_df_partiti": {"doc_hash": "c05197bfea3b4af9671c3b2c50a40c7dfa1f9b5a2327ceefbdbe1c9dee31944d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_with_tz_test_with_tz.with_warnings_catch_warni.if_engine_fastparquet.assert_eq_df_df2_check_": {"doc_hash": "f136f5f3b1218e545855413067bf4708d7c7290e25798f5f50402d85c3ecde26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_arrow_partitioning_test_arrow_partitioning.ddf_astype_b_np_float": {"doc_hash": "2773534975be01508b5e476aebe4166c746d06d6c3973a3e95abec97b94e5dde"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_informative_error_messages_test_append_cat_fp.assert_d_x_tolist_": {"doc_hash": "7fdc1a0c9efe947ad589eb1a4f693338ce327c13807e7f35221dbe42f604b9c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_arrow_test_roundtrip_arrow.assert_eq_ddf_ddf2_": {"doc_hash": "41c8af0849c9a0edbe6ca8d90589f72c00f6c924fd30522f7e70253e08092752"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_datasets_timeseries_test_pathlib_path.assert_eq_ddf_ddf2_": {"doc_hash": "60d5ffb40ca1ba155edf169a6a27373dc14ef143095530d71a8d46af93f72fa1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_metadata_nthreads_test_pyarrow_metadata_nthreads.assert_eq_ddf_ddf2_": {"doc_hash": "522ed86cca848e25a591514caefec3173a6f2f1650ced04f2820a28fd34c9e9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_large_test_categories_large.assert_eq_sorted_df_name_": {"doc_hash": "47119d0b863cc8a5542c373d287fa33093c4bdcac04401ea79b7e9cbe8c64424"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_pyarrow_test_timeseries_nulls_in_schema_pyarrow.assert_eq_": {"doc_hash": "84f86c4bef9c57029916b17995cccd2f9556d8758c451dc3db751a6bcc924da3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_graph_size_pyarrow_test_graph_size_pyarrow.assert_len_pickle_dumps_d": {"doc_hash": "f4477e4d394b417eedb96b0ed847bf455abe7adf15b3c0e2b762764290438211"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_test_getitem_optimization.assert_eq_ddf_compute_opt": {"doc_hash": "6244ae48442df1bf16496a9685bb73ac5d90434ac6a325f6a65082e62473ffec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_empty_test_getitem_optimization_empty.assert_subgraph_columns_": {"doc_hash": "719b1e08f57f4af5a1bc3f3d47cc25954b32b75e0e2c7615c9f42df428563194"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_getitem_and_nonblockwise_test_optimize_getitem_and_nonblockwise.df2_a_b_rolling_3": {"doc_hash": "27a802abd14034e47cf3e58f32f6ff09fc88e101b675dd984358ed2c24590164"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_and_not_test_optimize_and_not.for_a_b_in_zip_result_e.assert_eq_a_b_": {"doc_hash": "92c8d994f0b75ac5c88bff5a1a828f3e9483c97394affb736fb26a8a56d33194"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_pandas_chunksize_test_roundtrip_pandas_chunksize.assert_eq_pdf_ddf_read_": {"doc_hash": "20a77f1286ee094cd65ee3881e420670dc79130d5ce5a444be097550c4836a62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_pandas_fastparquet_partitioned_test_read_pandas_fastparquet_partitioned.assert_len_ddf_read_compu": {"doc_hash": "7f0ebc20251223a812b00bbbe0999f5a51b331fa830bbec019009af0e364e89c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filter_nonpartition_columns_test_filter_nonpartition_columns.assert_df_read_time_ma": {"doc_hash": "270eca13cc9b0dcb1ccddebf205c943fdf8231ff33d65a68d2d4fdd81aef2d4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_metadata_nullable_pyarrow_test_pandas_metadata_nullable_pyarrow.assert_eq_ddf1_ddf2_che": {"doc_hash": "f66f900285cbbf42a1934f9cb15ee2d7e8d92f4fd5ab4f9119015dc8a3f35164"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow.ArrowEngineWithTimestampClamp_test_pandas_timestamp_overflow_pyarrow.dd_read_parquet_str_tmpdi": {"doc_hash": "7114cd42b3fbaa6bef4fc6fc820abf66bee552a9d57094b8b4efebce52d6e128"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_preserve_index_test_partitioned_preserve_index.assert_eq_expect_got_": {"doc_hash": "c4d9dd154a846b7c97bfc02af832745db3984ff74c45c829ab10d20af28a68bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_index_test_from_pandas_preserve_none_index.assert_eq_expect_got_": {"doc_hash": "754e3fdfb0300de27e3437e20fbf6b436c2b9041d04fcf9da715668989d616ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_rangeindex_test_from_pandas_preserve_none_rangeindex.assert_eq_df0_df1_comput": {"doc_hash": "42d76cd8f85936c5967465a41ac993fcd7f7c76a55f0406f02c51e9a99f41235"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_illegal_column_name_test_illegal_column_name.assert_null_name_in_str_e": {"doc_hash": "aa26800d6ddd01c4c62c6b00df7a69b731f4f87250535aa394680d2f853bf7f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_test_empty.with_tmpfile_as_f_.assert_pd_dataframe_empty": {"doc_hash": "35c08f59a10b7633b3011ed6836ca21a70397a3c2fa2d9a3aa054ec74d4d86c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_other_schema_test_empty_other_schema.engine_execute_DROP_SCHE": {"doc_hash": "86c8d4a54046707f2f4721f0eaf0d78a01bde3cb5092f2ba2e7b4e3052d66f38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_needs_rational_test_needs_rational.with_tmpfile_as_f_.None_4": {"doc_hash": "8f84c1aa62cb242dfd0f4b0b94eefa7e0f574610189fa634b45c9a0c2003dcbb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_simple_test_npartitions.None_6": {"doc_hash": "67d587031d8bdac333fa6392494d1e5d7fbc19b33bc049af0c6d246ecd1c1cbf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_divisions_test_division_or_partition.assert_eq_out_df_": {"doc_hash": "691dceb07ba1b9a1cf9c89345e5d3bdde54971bb37d1ec75f2b2cc333646096a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_warnings_try_loc.try_.except_KeyError_.return.df_head_0_loc_cindexe": {"doc_hash": "5e18f7e7fcf309a1b5c215cfecddce151a05b37c65b692458930644dddd8aa18"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_boundary_slice_boundary_slice.return.result": {"doc_hash": "1cf5014d3334b3e6d109898c2e68d1567efcbb699563656eed0c58f37a396454"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_index_count_describe_aggregate.return.pd_concat_values_axis_1_": {"doc_hash": "b7c4781f47784b69aeb9756627f96cc8ca3d86de5c5c1f6909d4360f86770510"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_numeric_aggregate_describe_numeric_aggregate.return.result": {"doc_hash": "30be867d767c484bff7531645daba5187a5f39c770c8dcf2c9837cf41836fcb3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_nonnumeric_aggregate_describe_nonnumeric_aggregate.return.pd_Series_values_index_i": {"doc_hash": "c6bf844a8e83932a38fdad162c0d8dc3fd0ab9391f93ca82f6c58b4ee5a45119"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_align_partitions_align_partitions.return.dfs2_tuple_divisions_r": {"doc_hash": "f94c1da1d99c67c01bdf6f993283a3e5a8004e30d480f8758ecb0b4fc92980fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__maybe_align_partitions__maybe_align_partitions.return.args": {"doc_hash": "ae7d74c9a0947735e1b50de2a40790df7d631103997c5d779ea3fda0fca614ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_require_require.return.divisions_parts": {"doc_hash": "7312709933437668b0a2180ce502bd18e5c412bd415b4e3ff835c6464c4aef99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_1_merge_chunk.return.out": {"doc_hash": "d48f8452b20120c90db4bcb0fba67b28e1165787a4b34aacf335bfdf22d51cad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_indexed_dataframes_shuffle_func.shuffle": {"doc_hash": "19dbcc72932e564503bee38eca794dbe631a0ae8dc6237e9175857f4a79c3fa4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__name_sometimes_conflict_hash_join.return.new_dd_object_graph_name": {"doc_hash": "bd49385320efe9de292e04cc1f594c9a6a1963e6ec76b1c3940d0f1682ab5957"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_warn_dtype_mismatch_warn_dtype_mismatch.if_all_col_in_left_column.if_dtype_mism_.warnings_warn_": {"doc_hash": "680da0b29348d1dad9f75487e8099d5bb1ac1b8b07e9aeceaa612485581f7d41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_merge._Both_sides_indexed": {"doc_hash": "b0940e343e67ed03d295bb669322d91b04f21c526361c9e16149af0f831db3ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge.if_merge_indexed_left_and_merge.if_merge_indexed_left_and.else_.return.hash_join_": {"doc_hash": "6026a8d9612d4c35b15047aacb7e3156f845b1b88acd4e8134f738c2bd142322"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_5_compute_heads.if_by_is_None_.else_.return.suffix_reduction_most_rec": {"doc_hash": "88c7ed7730ed3b944e0034904712f3ad8c49bbf0875f27472658bde9163fec0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_pair_partitions_pair_partitions.return.result": {"doc_hash": "b96d83e8ee219a2fd263cf1536b2e6e536d58ebaa8c2fe77a1c5e4ae8841d2a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_indexed_merge_asof_indexed.return.result": {"doc_hash": "944749f83095850e40a4f85763d35cd99b036b01fb5731e784e9aecbf110843b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_merge_asof.return.result": {"doc_hash": "ca319b3ffff32ce6d08a1901fd085de1b6ae6a339086b46ad73880968a91357a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_8_concat_unindexed_dataframes.return.new_dd_object_graph_name": {"doc_hash": "5fdbc8363a27a65a2a6d0af63fd3432692027cf69af26b255144c2bffce1ffd5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_indexed_dataframes_concat_indexed_dataframes.return.new_dd_object_dsk_name_": {"doc_hash": "86196a700b0442cd9e0accdb7d0b0a61a27753562f47e9ca9d0a7e18424d2afa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_stack_partitions_stack_partitions.return.new_dd_object_dsk_name_": {"doc_hash": "a445008465935f28a9e1b36740ef781fa90be4650a5379130fafbec864ea6aa8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_concat._Concatenate_DataFrames": {"doc_hash": "4728c512bf0332fd47686d70b5a39be033ad88bb2f0025d245a0597e6b91804c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/numeric.py_pd_": {"doc_hash": "0112d0a7ef53ebb4f0a2a08f6246aeab2b454b97b0a349f51e74aa44dfa03f0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py__Dataframe_optimizatio_optimize.return.dsk": {"doc_hash": "8325a1ab397ab49e203fca8786b5a44041efe07ca9ce7ddb30610d593959030d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py__Determine_new_partitio_math": {"doc_hash": "334bd96cea6d0b7a7d94a0f0816efa69518e4398c431281c8de674bd50775ab5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_np_sample_percentiles.return.qs": {"doc_hash": "5349b510397369ea97aaa06978763e1a592a6e914c3179933c29fb50b86d61d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_width_tree_width.if_to_binary_or_num_group.else_.return.num_groups": {"doc_hash": "66eb22ff94aaf9d79bd9227029debb76d8f10504e0cb8147ffb0aaffd47ce808"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_groups_tree_groups.return.rv": {"doc_hash": "d2c8499c9e970fc53d080a41e59138ba19efbc63c18fa5eaaaafab63ab3afde4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_create_merge_tree_create_merge_tree.return.rv": {"doc_hash": "8c952f3295fd53dd74ec1205ae3b85953b3916b8b4c1c0874c6156d4044e5242"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_to_weights_percentiles_to_weights.return.vals_tolist_weights_to": {"doc_hash": "37f44c36cdca27385060965de83b568403a36cd98fd1efdd210e88072157b365"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_merge_and_compress_summaries_merge_and_compress_summaries.return.vals_weights": {"doc_hash": "ae6d2afab3ceba92bc3a7404513f9087ab0a0e8518b42d2ec8499e0ee946f90e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_process_val_weights_process_val_weights.return.rv": {"doc_hash": "219c560d1f844739dc56ba87e9fba5fab190be5507014a594ec593760846a853"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_summary_dtype_info.return.df_dtype_info": {"doc_hash": "5db5503d93a87f4f96c230e34ee43bf4c32cf436c312a90432a5b7778c9e4f85"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_partition_quantiles_": {"doc_hash": "5c313d7e1e4852d8c8e791809c032b1395ad7002547f233fd1267809e4955f6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_get_dummies.if_isinstance_data_pd_S_get_dummies.return.map_partitions_": {"doc_hash": "995681fff475b135e02f0ff9013fc97173f44e825a345810d2730e72a58c54cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_6_": {"doc_hash": "f8b12f9935d31676f2d267aed4db0e464c47e5ccc2db4f71e5b6fe34053fd388"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_datetime_overlap_chunk.return.out_iloc_before_after_": {"doc_hash": "97c5f3717ead7532ea4fc2689177e74d01550a0daef291af12c08cfa32975b5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap_map_overlap.timedelta_partition_message._": {"doc_hash": "b475a2e56b89daa6270233836b4eeaebba2c45f5f85471d02d68fa89f98dc020"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap.if_before_and_isinstance__map_overlap.return.df__constructor_graph_na": {"doc_hash": "8cbb11a8ccc34e83bee4661e2d74112a2742363634f6324a9977b86f964f01c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling._call_method_Rolling._call_method.return.map_overlap_": {"doc_hash": "e99574f22f54ab2e2f6c74a66a9cb7695065eda66d9b28f29416e4cc181f46ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.count_Rolling.quantile.return.self__call_method_quanti": {"doc_hash": "92f405cdd7e61e8c34f30b00836b98423ac305668dd8c46d4281224f174f79fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.apply_Rolling.apply.return.self__call_method_": {"doc_hash": "824e688769346101aa438578d3d5eb83405a3d18fa8e70877bf691d3b3c989a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_contextlib_logger.logging_getLogger___name_": {"doc_hash": "d8fcc313d98e138e70cca93a9fe343caf1aa3097b5b92c37df7321cff886114d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_index_set_index.return.set_partition_": {"doc_hash": "55a00c02aeeaaee2002cd49c7acd16d2671b5f9e766e8a98826fe32b9526e010"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_remove_nans_remove_nans.return.divisions": {"doc_hash": "ccd74e9904c18611ccbe201d506293c82f0e2fead40597e130ee37714fe6726a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_partition_set_partition.return.df4_map_partitions_M_sort": {"doc_hash": "891fe4095468632176331042431ae85e3999cb97822409a6631167e52897381b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_shuffle.return.df3": {"doc_hash": "4c7096cc9737763cc22920c47ea7a4c2a046559dd407b8371f3dbf1621a37672"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_divisions_rearrange_by_divisions.return.df3": {"doc_hash": "9d6349a6ebfcea98252132a8b5eb56607474570aa506551364a3d08804cb8c9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_rearrange_by_column.if_shuffle_disk_.else_.raise_NotImplementedError": {"doc_hash": "973b54ff017054f99400b4fda18c1ef2bafece36cbae38874a2a0efa8161db1c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd_maybe_buffered_partd.__reduce__.if_self_tempdir_.else_.return._maybe_buffered_partd_F": {"doc_hash": "6de32b46419d29a451d10064a9056df190d831198cbfd010319a0187d932d7ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd.__call___maybe_buffered_partd.__call__.if_self_buffer_.else_.return.partd_PandasBlocks_file_": {"doc_hash": "4eb8862aed2c999cdde87b50952a74b151cb08ad8a482fc81d3892850448ef21"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py___partitioning_index.return.hash_object_dispatch_df_": {"doc_hash": "516156ee0146cb9b06bcb504d8fb134a92d5ae3a57f78e58f9e4d1318283c71c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_2_shuffle_group_get.if_i_in_g_.else_.return.head": {"doc_hash": "3a0c5aa57c515ba40f8c7f4f8954578c167c09ce66888256ce210676d4791d0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_shuffle_group.return.group_split_dispatch_df_": {"doc_hash": "3a15180f4cb6263a37ccb96991504a872809ba4628a614b487cd5a1d67b4f52e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ensure_cleanup_on_exception_ensure_cleanup_on_exception.try_.except_Exception_.raise": {"doc_hash": "c644d2737240e62dd245912fdf8ed360f83ac92a5fa96c8b715b620f9d4040c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_3_get_overlap.return.df_loc_index_if_index_": {"doc_hash": "cff74768816b7d825885434cb212d9d08d55002e81523553be81860fdcdc49e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_fix_overlap_fix_overlap.return.new_dd_object_graph_name": {"doc_hash": "5c5f8828f63dd3ed617d5646fda8a38021fdb5f6f0c5b3729ae12caaf4853e89"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_sorted_index_": {"doc_hash": "2638a2eaa1f76b136eb2608bf9331cd378208fc78bdecfe4940ada801d2dfd47"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_contextlib_MyAccessor.method.return.self_item": {"doc_hash": "9ee49b8ba40fa64760ef782ddb3712022892ed133c17e862017bfbafa655a750"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_register_test_accessor_works.with_ensure_removed_dd_Se.assert_b_mine_method_": {"doc_hash": "5d866894c442da9317ed3e5bd811e3e9eebb36888facd9b3f9cf7343eb343f00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_df_ddf_df_ddf.return.df_ddf": {"doc_hash": "9926de3bb3ea2a04dd8e3dbab9e593b27fd07626787302e0d5be1c842cec8d4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_dt_accessor_test_dt_accessor_not_available.assert_dt_accessor_in_": {"doc_hash": "adae725be97812f3c2f2d7b36c7101f98e33a68612a552140f4bd129d5ea91b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_test_str_accessor.for_regex_in_True_False.assert_set_ddf_str_col_st": {"doc_hash": "2a0281af12e01b78ea792728867de3f833c7aefb4ebd931e9120d82c602d2fb8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_not_available_test_str_accessor_extractall.assert_eq_": {"doc_hash": "33ed514b6b733d48a58035b4656756c516b61a3faf1a9b0843c83b47d9b6de36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.cases_test_arithmetics.ddf8.dd_from_pandas_pdf8_4_": {"doc_hash": "4f98a906fe18ac98b84ca092f1c04059af4b58ced7fcba949484dd240fb1622e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.pdf9_test_arithmetics.None_1.check_frame_arithmetics_l": {"doc_hash": "5ec5c7a4f133649ff0948cfe51e002c81dbb4f715ae46549b9a35a1655f64518"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_arithmetic_names_test_deterministic_arithmetic_names.None_2": {"doc_hash": "ee3ca48ba15ff1904cc2f32c0bd9ed91f3ed72f07c885fa63f58943f5510b3e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index_test_arithmetics_different_index.ddf6.dd_from_pandas_pdf6_2_": {"doc_hash": "a7b875111deb85e378db952492391f82532f31254dc4791f9253aa0bcf17a59b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.cases_test_arithmetics_different_index.ddf8.dd_from_pandas_pdf8_2_": {"doc_hash": "49b384c6aacf81dde10d88505b52f040c13bda62766f54a127747d23e9fcac22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.pdf9_test_arithmetics_different_index.None_1.check_frame_arithmetics_l": {"doc_hash": "ce9e6c53fb1182129abe2d0ff496e8a218f225827b3b63ab2d65fb62ee2e4797"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_series_arithmetics_check_series_arithmetics.None_1.assert_eq_l_r_el": {"doc_hash": "37bfd646933860ffe92a5f0d9be2fd93d5e0c58da293cf6c956c5530499267c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_frame_arithmetics_check_frame_arithmetics.None_1.assert_eq_l_r_el": {"doc_hash": "9b9dc7f8b9fe59916719dc097a35805a1b35511c045f7df639aabf0aae23eb69"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_test_scalar_arithmetics.assert_eq_l_r_el": {"doc_hash": "dc851ff01415cbdef5a2ddcba4b08ce90a685351b51b025fa2932051b2f81b8a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_with_dask_instances_test_scalar_arithmetics_with_dask_instances.None_7": {"doc_hash": "a3b07c2d7b1e47bcef4f2027188c2925a4ad6b45571d3627960bc9be539b94b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods_test_frame_series_arithmetic_methods.s.dd_core_Scalar_s_0_": {"doc_hash": "ff83f635105d72160e3ebf53f028b0de39b5284f88e3268bb46f11efbb0c03b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf.pytest_raises_ValueError_": {"doc_hash": "16d5512dc4ca5be8005fc96285738073d9a35fd6e34756bb4e7cde9b4f6d1e37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.None_2_test_frame_series_arithmetic_methods.None_2.for_axis_in_0_1_index.assert_eq_l_rmul_r_axis_": {"doc_hash": "1a0adf4ff88fb72d071d8cee5e2857487850b5a89a92a39fe6136bcbc406d676"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_test_reductions.boolds.dd_from_pandas_bools_2_": {"doc_hash": "cd7026cd7e2913e94fc61eb4082cfd6052ab2183340c0f759e4ca30799a5011f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.assert_dask_graph_ddf1_b__test_reductions.assert_eq_ddf1_index_coun": {"doc_hash": "86032984ce60a70cf2adfee07303f879efe58606fea729be5acd1099d96bda3c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_timedelta_test_reductions_timedelta.assert_eq_dds_count_split": {"doc_hash": "113f4e0d2fdc2e4073ead66b10b50837841f2aae73e3935532ea07f7f66b98ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_out_test_reductions_out.None_4": {"doc_hash": "7ac6f96100a15e7f4e87c09a5f84a88aa51b603ce187d3888787361c219aaa1e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_allany_test_allany.None_19": {"doc_hash": "19f3db16841ceab3b6f09c936577042c0a7897f2af560b496813baf1017db352"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_reduction_names_test_deterministic_reduction_names.assert_": {"doc_hash": "c98442f74ef4b260469b286902348864edfe938790d560fbfe136cf3b091cf22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_test_reductions_frame.assert_eq_ddf1_mean_split": {"doc_hash": "53991063d1a6b916f71c54561887ddfb0db9ea9036f10067d4f0fb45526305c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan_test_reductions_frame_nan.assert_eq_df_mean_ddf_": {"doc_hash": "4dd408960799e29d9f379e91ad44d9e9d4968ffc7ebda30dd33b3fa572947fa2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_comparison_nan_test_series_comparison_nan.assert_eq_": {"doc_hash": "398365e3ff529e10b9c5158aa3496454e0c43ec826e3248aea4bedb7ce29fb17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_moment_test_empty_df_reductions.None_1": {"doc_hash": "8a2ce597501142c7dd224a149357de7e96ddb4dd961e8ee901baa1920fcbce31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_pd_test_meta.dd_utils_assert_eq_": {"doc_hash": "5c517f14f83d31e9c2e9090353f06d1d11595fb35c782bcc4ffffa25ba5a23e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_test_ops_": {"doc_hash": "488fe3a1d13b57cbba6636bf7b404f610c6ea88122b0cf22dc25873db39ef944"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_operator_frames6._i_set_index_i_y_i_x_": {"doc_hash": "ed86771ffa2142cbe4b2d16890a1af46649e9441244f60f2a9f1ec18e4480aa1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_concat_unions_categoricals_test_concat_unions_categoricals.None_8": {"doc_hash": "f0d11539fa8cab276e9343b3c295e6d465e4e4688aef3c1547c98000fadfd53b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_unknown_categoricals_test_is_categorical_dtype.None_3": {"doc_hash": "ad43c1a7b0c9a742912b709dd58e9ee0c067f5eb7f3fe390b69e42ddaf275909"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_test_categorize.None_2.ddf_categorize_split_ever": {"doc_hash": "707a51be02f0d6be8a8c281bb6d786c633dbb199a8d4c87764a61e52afa34ec7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_dtype_test_categorical_dtype.None_5": {"doc_hash": "51ee7aa209ad84c46901223e44414b8f0c98c8054fd2df5586431dfdd4e1a4fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_index_test_categorize_index.assert_ddf_categorize_i": {"doc_hash": "c13d71593aa75a29ea3198d61ec61cf35d50a5e171b501d7811726c8c26cf04b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_test_categorical_set_index.with_dask_config_set_sche.None_8": {"doc_hash": "cbedb1264a3d80c226381ebfeeeb96feffb2a0928749faf7b7d4079a7e9b5665"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_npartitions_vs_ncategories_test_categorical_set_index_npartitions_vs_ncategories._Test_passes_if_this_wor": {"doc_hash": "fd8145630dbcd888e77851f56fc8e0d5060068210155358364397eb7810d8d5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_repartition_on_categoricals_test_repartition_on_categoricals.assert_eq_df_ddf2_": {"doc_hash": "b4a5e28f0a999891056190e8fe889b1115666cf71b624de9eba9b0314e5f1c0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_accessor_presence_test_categorical_accessor_presence.assert_not_hasattr_ddf_in": {"doc_hash": "62eb9f8c34927ce7557a4b7da7192958fbc4e10771f8c54ea0ba601758e94c2f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_nan_test_return_type_known_categories.assert_isinstance_ret_typ": {"doc_hash": "d4f381fd21a6e7664e965a6dd08a063fdacfe973d24b8b7692fdc2fc2ba0b425"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor_TestCategoricalAccessor.test_callable.None_2": {"doc_hash": "5e8848f857de277e9ea5ae5a2798c9443b9b1ba622a81ad49f335aa69a14eb96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor.test_categorical_empty_": {"doc_hash": "711d787134f4038ddb2d39d988829f3f4f99919db5ab2c0fc16fcaff14a21922"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_from_decimal_import_Decim_test_register_extension_type.assert_eq_df_ddf_": {"doc_hash": "6fdc5e4b73b4f3b6132d2db3c3b8a5103093f6bf8b09f0926bed996506e63659"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_test_reduction_": {"doc_hash": "a458927aa6d4c82a57c307ed2d731aa40f9044ad94041e889ba9ba0d0b3533e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_test_dataframe_format.assert_ddf__repr_html__": {"doc_hash": "b7a49382dff173d17e1c90f841e949b0b60c02936e19260d142ae8b0d58407ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_with_index_test_dataframe_format_with_index.assert_ddf__repr_html__": {"doc_hash": "440c7e2c7d216b1c4ef611cbd685eac089509212a6e1071c388d92b82cb30508"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_unknown_divisions_test_dataframe_format_unknown_divisions.assert_ddf__repr_html__": {"doc_hash": "ec21d1c48f527d6b775964483a2c9334dff9786cdac9a6d16715a2153772d0ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_long_test_dataframe_format_long.assert_ddf__repr_html__": {"doc_hash": "d0702e32976d03d62e5c794cc76e8a0e05223174f078a788657f102d179b4ff0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_test_series_format.None_4": {"doc_hash": "a8b78ad98ae61d662f258f7e8684cf9dce48c650678656e7b7015f05ed7d0322"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_long_test_series_format_long.assert_ds_to_string_": {"doc_hash": "4a1ab90dcc84ee221a3b1536fa3435c1d4d6d7abded448c10e5cb06543820033"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_index_format_test_index_format.None_3": {"doc_hash": "b253f00668e61b5a6b6fb4541c992ffa9c8e08c04929abfc85bf37bb83d9de44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_categorical_format_test_duplicate_columns_repr.repr_frame_": {"doc_hash": "03a199dad409f31019057e9f2e59bef941fe0acb4e82d692a7a3d3fdcfddd59c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_empty_repr_": {"doc_hash": "3a058838e0528cc1073c869ac691e87c405a8e2854b737fc58b0b40db36f7619"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_xfail_test_groupby_internal_repr_xfail.None_1": {"doc_hash": "3e2c6df0669ad8e0b4802efae5c721f395a8aaa7b6f4e4df929fd09219425798"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_test_groupby_internal_repr.None_2": {"doc_hash": "adacae37ba987694348c2bef094c7c2a7090ea30108fad82124b06bed883f1f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_multilevel_test_groupby_dir.assert_b_c_d_e_not_in_d": {"doc_hash": "658fa715708018fd4233ad12893a064fded1759cf35179171a3e0980ea93f67a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_getitem_test_groupby_multilevel_getitem.if_agg_func_mean_.else_.assert_eq_a_b_": {"doc_hash": "b93f75763e86c28e696954435fcffca865b4018e66820fb549876eda130a96ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_agg_test_groupby_multilevel_agg.None_2": {"doc_hash": "809498d84c88debd6bb8921fc8017b530fe8ed9997baa0adfdc84ac4c7b980b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_get_group_test_groupby_get_group.for_ddkey_pdkey_in_b_.None_3": {"doc_hash": "9e870cf3082732be652ab5e1fd89d1dfe24e283151f59eb02190c5827764f1ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_groupby_nunique_test_series_groupby_propagates_names.assert_eq_result_expecte": {"doc_hash": "89e2d705d7bbe6cafb4e793a51386f4e8bff77877036245bb6f7bbc11ab230d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_test_series_groupby.for_dg_pdg_in_dask_gro.assert_eq_dg_prod_pdg_": {"doc_hash": "64878d2a15f68043acbfe1bf5653a0e09a070d7b699456c5cef755c47a73a4d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_errors_test_series_groupby_errors.None_4._dask_should_raise_the_s": {"doc_hash": "2ca7e97f65d5939e36e260f2115726095e780efea703de6b44f6b66e4068d926"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_index_array_test_groupby_set_index.pytest_raises_TypeError_": {"doc_hash": "a5b51c917ba30ef27a296df3e6b4caa9464fa030eb2d60771cccb2786604ea0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series_test_split_apply_combine_on_series.for_ddkey_pdkey_in_b_.None_1.None_1": {"doc_hash": "2ad10e36bb31cd1495ed6d9b9ffaa1a8ee3fdc1624a56cd1a5df4d8c782286e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf.for_ddof_in_ddofs_.None_1": {"doc_hash": "09321224eac48ee00a0cd75e05c7ae1981f200e860b62f806c875d7e5cac3b7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_i_in_0_4_7__test_split_apply_combine_on_series.for_i_in_0_4_7_.for_ddof_in_ddofs_.assert_eq_": {"doc_hash": "bdf40ba35c7c918a5e256be4a4e7e6df2d092d52e7379ab7160e3e108fb88426"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_reduction_split_test_groupby_reduction_split.assert_call_ddf_a_groupby": {"doc_hash": "d5b267c49a5bd2b3b673a767bf1348d0caa48c46540edfce41a7f0d8106c4857"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_test_apply_or_transform_shuffle.with_pytest_warns_UserWar.assert_eq_func_grouped_pd": {"doc_hash": "f660623f18c290b60b1bfe94b62e4f1c0910c2586e53168e917d7222bc6f6544"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_multilevel_test_apply_or_transform_shuffle_multilevel.with_pytest_warns_UserWar.None_2": {"doc_hash": "e5e5dbcb3b6460a36011a62c902685cbf1dc00459ffb2fa6190dd5d3ccc2ce4b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_numeric_column_names_test_numeric_column_names.assert_eq_": {"doc_hash": "4eaa1d950490c13d2a5b895caf25d9357e3bf5a917835f450fd2f2ee0dd0b06a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multiprocessing_test_groupby_multiprocessing.with_dask_config_set_sche.assert_eq_": {"doc_hash": "659a8f471dcc1dc2fb3752f7fe5c2ba4ab2bda17a25df9653b3d4204fe2a2138"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__single_element_groups_test_aggregate__single_element_groups.assert_eq_expected_ddf_g": {"doc_hash": "31f23211e94bcedefb5b7c0737076594b981210b9e42e44877b129a0d18d645d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate_build_agg_args__reuse_of_intermediates_test_aggregate_build_agg_args__reuse_of_intermediates.assert_len_with_mean_fina": {"doc_hash": "dceedbd9ffb9ac84389a6e8ff0c12d4a081a88d6ce46e27ef2ffbb7cf5f8e365"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_aggregations_multilevel_test_dataframe_aggregations_multilevel.if_agg_func_nunique_.if_agg_func_in_cov_c.else_.assert_eq_": {"doc_hash": "635610e1b3de9967c7dba73a009d6360f044a80c7a47686dc82ea8bb084262ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_aggregations_multilevel_test_series_aggregations_multilevel.assert_eq_": {"doc_hash": "a3215c790401c5cb199cfc9328ed268ecea23b934bdda1be6dd20ed1df1a0f5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_meta_content_test_groupby_meta_content.None_1": {"doc_hash": "9074ddd303bce2b2426fa80c412644671b7c1dcf79e353db357d5e2a6f2e1ffc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_non_aligned_index_test_groupy_non_aligned_index.None_4.ddf3_groupby_ddf7_a_": {"doc_hash": "663e22fb581fa1b617a86d9d70a82d5318029cbf286355c347f72b54857d806b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_series_wrong_grouper_test_groupy_series_wrong_grouper.None_3.s_groupby_s_df_": {"doc_hash": "1fe4c617894b2c3b49d601b0d0fe1414305f4698755554b4cf95c5a788617f8a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_hash_groupby_aggregate_test_hash_groupby_aggregate.assert_eq_result_df_grou": {"doc_hash": "dd76cdc27872c5e1829a653a37b435662b2e780cc217f82574ae1dcd7753e5e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_out_multi_column_groupby_test_split_out_multi_column_groupby.assert_eq_result_expecte": {"doc_hash": "667b04a19c6e41aa65246476d18da3c1a288ec5e553b4be0c178bf6a194f2d45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_split_out_num_test_groupby_split_out_num.with_pytest_raises_TypeEr.ddf_groupby_A_split_ou": {"doc_hash": "48c1821c1204898966679ac576cc109d2f76d95858f66960b2f72277ec132c4a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_not_supported_test_groupby_numeric_column.assert_eq_ddf_groupby_ddf": {"doc_hash": "31319f08f49ec1b9fde82479d35e1d642ebe2d904ad0e8b99425b7a6c9da99b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_test_cumulative.assert_eq_getattr_g_func": {"doc_hash": "483022f299fe744f2ce173a68b1e5855f8fc5e53f6fe3760c04ad234102afaf2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_axis1_test_cumulative_axis1.assert_eq_": {"doc_hash": "08cea69c9eed6cdbb94d8836717c1bb95139f7bdd2218df54a30e901ce838d28"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unaligned_index_test_groupby_unaligned_index.for_res_sol_in_good_.assert_eq_res_sol_": {"doc_hash": "ea38c2667034ce94467b2d6de465773c3afc04a76a192181287fbb09e607678e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_string_label_test_groupby_string_label.tm_assert_frame_equal_res": {"doc_hash": "5d92f31eb9c463beca411bcddecd00a514c33972bbeda7582241fd5c93718d92"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dataframe_cum_caching_test_groupby_dataframe_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1": {"doc_hash": "e3d6eb6b844a56d3eee8e19167c6dfd84b564bbfb6c52ad88e0e740e7e3cbc44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_series_cum_caching_test_groupby_series_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1": {"doc_hash": "e377b596772bd16e2759d51aa36077c165a7d36a22c62e3b4f0912c6d798fff3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_slice_agg_reduces_test_groupby_agg_grouper_single.assert_eq_result_expecte": {"doc_hash": "68c9e936b361d2f9d8da7f4961251dbfb6f74a9d676772b7df1c862c58cbb8aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_grouper_multiple_test_groupby_agg_grouper_multiple.assert_eq_result_expecte": {"doc_hash": "3380a0f8e912ea6fb01c6af96abf213923ec5db0deade244d51317287aba6101"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_agg_funcs_test_groupby_column_and_index_agg_funcs.None_5.assert_eq_expected_resul": {"doc_hash": "36d8ea4e76312c63ea1b7a9024136e7dba0222550070d16711cc212ea0056071"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_custom_mean_test_dataframe_groupby_agg_custom_sum.assert_eq_result_expecte": {"doc_hash": "41529c50237a3d5c52895a7f820ab44f48be5f2a9f48dee631b943ffe55bd22b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_agg_custom_mean_test_series_groupby_agg_custom_mean.assert_eq_result_expecte": {"doc_hash": "eb575caf00ab507373e03ce643ae359540fc9f7b82353abf60842b0396307678"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_same_column_test_groupby_agg_custom__name_clash_with_internal_same_column.with_pytest_raises_ValueE.a_groupby_g_aggregate_": {"doc_hash": "dc1117d107f6cdd497bcb3c2dce67bc8eadbde655d1770d3248137140f218d67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_different_column_test_groupby_agg_custom__name_clash_with_internal_different_column.assert_eq_result_expecte": {"doc_hash": "0b41d759feb27ba3975884f21f0f70ba34dfc6638dbfb6576377cd3d88769609"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__mode_test_groupby_agg_custom__mode.assert_eq_actual_expecte": {"doc_hash": "1033208e755c1b7a471be8a29f4902b6853937e37cdbe38cf32e2d6e507e5098"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_select_column_agg_test_groupby_select_column_agg.assert_eq_actual_expecte": {"doc_hash": "720546784cf0c1fe026e34c822b88cad4a5b35fde440d2b9b280a6ada89c9c73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_object_dtype_test_std_object_dtype.assert_eq_func_df_func_": {"doc_hash": "397c706c28f984755edba1ba62813e659d5242f7eca0da7f7ec4c42478cc40fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_columns_int_test_timeseries.assert_eq_df_groupby_nam": {"doc_hash": "8a970303d785291e15d7e15a7f39b21bf507e8b0190728e343f7d507f8ecba4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_with_min_count_test_with_min_count.for_df_ddf_in_zip_dfs_d.None_1": {"doc_hash": "81b47ddd0697318c2549f64ecba4490217a0a2cbf629ba42b1f2b00b087c7590"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_group_keys_test_groupby_group_keys.None_1": {"doc_hash": "8d41877d484a5d6edaa04f09b175d15d10f27f31892b9d1e184b48b1fc2e4a96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_cov_test_groupby_cov.if_isinstance_columns_np.else_.assert_eq_expected_resul": {"doc_hash": "e96b1e0103d790cf73a1ce9864ecde403891377e4eac1b0b541e1ef7888f3b43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_test_df_groupby_idxmin.assert_eq_expected_resul": {"doc_hash": "6ad0f11a5851aef1d06c59bb4b253ea0af892851e3fcc6b26ddcc3cf0181b8d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_skipna_test_df_groupby_idxmin_skipna.assert_eq_result_pd_resu": {"doc_hash": "40591b5e173e96b362dc2fa7a1c9fed696cfe29605627fd43319048bd0207949"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_test_df_groupby_idxmax.assert_eq_expected_resul": {"doc_hash": "2cbedaf61109ef2207ad0699c259edc74d3991d87a1d19159e44e5b29441e2c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_skipna_test_df_groupby_idxmax_skipna.assert_eq_result_pd_resu": {"doc_hash": "ee9cfc198f0de2baa3c8feee2c09f033682979485e4b5d5ff80829df4ab2c939"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_test_series_groupby_idxmin.assert_eq_expected_resul": {"doc_hash": "3bdf85163af14c18f9c0a58e911d52bb90a050f41716aa879f5ff12edcd2dbaf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_skipna_test_series_groupby_idxmin_skipna.assert_eq_result_pd_resu": {"doc_hash": "0d211391d76fbb1cae447cc6e79a6036f5ed77c15ee8b6a77dc0ddfaeff59ba4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_test_series_groupby_idxmax.assert_eq_expected_resul": {"doc_hash": "b28b9d7cab6051915d30c16eae18b566d7619be13029167350fc8b75d1a1bfc4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_skipna_test_series_groupby_idxmax_skipna.assert_eq_result_pd_resu": {"doc_hash": "93f4211fee82dd74f3de292ad995fce3d50f52b9bcac0a534748bc28ab65c361"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unique_test_groupby_value_counts.assert_eq_dd_gb_pd_gb_": {"doc_hash": "46fa2ddf59197b65b4ed6562501834a521e2e876a891d10a66de92a1154e3eea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_funcs_test_groupby_transform_funcs.with_pytest_warns_UserWar.None_1": {"doc_hash": "e90328b5586c6ab47c51e486de7f602da1c746f399b819447da4d2aa5890e012"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_ufunc_partitioning_test_groupby_transform_ufunc_partitioning.with_pytest_warns_UserWar.None_1": {"doc_hash": "6a93de237c8caba4e69caa8aad5333307f3212c7de85e5743a6511ab8d3ee9b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_aggregate_categoricals_test_groupby_aggregate_categoricals.None_1": {"doc_hash": "9e382e10eea4d2dbc8bd0202703d64f09a13eca9a93c222cb14e1a77e9ab28f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_pandas_test_groupby_dropna_pandas.assert_eq_dask_result_pd": {"doc_hash": "bc2a7d542d71178c7c7dc2a28a440197f13ae283ad7761ed9d9495277d1d69b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_cudf_test_groupby_dropna_cudf.assert_eq_dask_result_cu": {"doc_hash": "8bd8b07068d6b5562b5ca778d08ec961f5c223303b4a2ec44b4254481e830611"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_large_ints_exception_test_groupby_large_ints_exception.assert_eq_": {"doc_hash": "008e518d18a05d4f67d220250a5847009a067ad4a70018da7df18f5053219989"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_test_groupby_sort_argument.if_agg_mean_.else_.assert_eq_result_3_resul": {"doc_hash": "1b0c7a40778856c7feb3c6c047affc319748d9729feff4096ccd4fc37c216e5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_agg_test_groupby_sort_argument_agg.if_sort_.assert_eq_result_index_r": {"doc_hash": "22b0d92aac7679c2589e167f5509e638dbec23799136e2e42afefaeadd96ce24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_np_test_hash_pandas_object.if_isinstance_a_np_ndarr.else_.assert_eq_a_b_": {"doc_hash": "af89d3105848cc2220075bc0150be9edcebf275d0d3e3fe1a1e8dbd402e0cb4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_categorical_consistency_test_categorical_consistency.for_s1_in_.for_categorize_in_True_.None_1": {"doc_hash": "73b13ba4d010aec72ea654e90834e0536cf733320793ed6b59a26333e3cb4946"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_object_missing_values_": {"doc_hash": "ce19e58b44ab46b26d35e4f2743ab77b5fc7a119ccbb708656bffbced15f135a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_test_split_every_": {"doc_hash": "25ecfde19f2cf1e1d6d7d1ee6107bfb7422556165fca16e6ca9c8214b85d6fdc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_test_loc.None_4": {"doc_hash": "4f0a9d4d2af4768d57959febdc366ecbcd349e27754d263ae9cd4859ba1294ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_non_informative_index_test_loc_non_informative_index.assert_eq_ddf_loc_20_df": {"doc_hash": "3b3d84cd601d205d8fa71e7b5da22a763f8c2a5cc32bdf3e93e1cd191685b24e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_text_dates_test_loc_with_text_dates.assert_len_s_loc_2000_01": {"doc_hash": "3637d0d5e3091babbfd3c6abaa30fd4f02e402b8e39be32a4f40a8c4e08f5c0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_different_partition_test_loc_with_series_different_partition.assert_eq_": {"doc_hash": "d56cf6212890ab1e70777367f47322bfe560b287b460d31b61544058f6aca737"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_test_loc2d.None_3.d_a_loc_d_a_2_0_3_": {"doc_hash": "d81afdc744098518707f0ec2d84b9cbd77fa2591eb82b6cc17353193140fa17f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_with_unknown_divisions_test_loc2d_with_unknown_divisions.None_3": {"doc_hash": "6ef932474299d0df90c454de7f42ce460a6d14d244bce4c7318292f5e55384ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_duplicated_columns_test_loc2d_duplicated_columns.None_13": {"doc_hash": "ee6f0c35ee88ff1ff75494e78186559cfc86480b039bb764d6071f4fc6e635c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_test_getitem.None_13": {"doc_hash": "e3078e7b03cc902150430632f8226c58baaf9a96a13978ccfc880005ce385829"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_slice_test_getitem_slice.assert_eq_ddf_f_df_": {"doc_hash": "5a1b46d91b4d043006908c26d45ad073c33ddeb156265395218983d08ca86c5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_integer_slice_test_getitem_integer_slice.assert_eq_ddf_8_df_8_": {"doc_hash": "f5ebf4f355788e9af5b5a86b046af9090b7cc2ccf1fe251819c620fbd6562b41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_on_numpy_datetimes_test_loc_on_pandas_datetimes.assert_eq_a_loc_2014_2": {"doc_hash": "f269ae09fa806a4131bedceb860464c0abee9656e24171e9dd8d72062cb5f916"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_datetime_no_freq_test_coerce_loc_index.for_t_in_pd_Timestamp_n.assert_isinstance__coerce": {"doc_hash": "1ddd663e4babbff915fe1c59365a7d3ee38100bc380548fb5ef63274f9ac68c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_timestamp_str_test_loc_timestamp_str.None_15": {"doc_hash": "92ab42333e939dd2b7922a47bd3c1ffa966ebc2015de44e7769925a0d296c9d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_period_str_test_getitem_period_str.assert_eq_df_2011_2015": {"doc_hash": "324dfc1e265eb393d5f11b61427c04bcd6e1651c3a07ea783471ec5e7ea2ab0e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_test_iloc_series.with_pytest_raises_Attrib.ds_iloc_": {"doc_hash": "5de2d4e568d357dc74753f2174b1a52a274734cf7759dfd93c88231849c8e540"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_raises_test_iloc_raises.with_pytest_raises_IndexE.ddf_iloc_5_6_": {"doc_hash": "25383d0be50a8c24d36b315ae9275bf9c66605e7570e2de436459afdfce3a233"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_duplicate_columns_test_iloc_duplicate_columns.assert_eq_select_negative": {"doc_hash": "0c06b6da6d2ffe6a76742fc7570582f4f0196927c1ba7664ca43b8514819718d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_dispatch_to_getitem_test_iloc_dispatch_to_getitem.assert_eq_select_negative": {"doc_hash": "a0f1ebf7a5ea6dff7b4c72e9a6dd7056991482d678f3f0e19c802d1527eb9e82"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_df_right_df_right.return.pd_DataFrame_dict_idx_idx": {"doc_hash": "7f068968f9baa31e668e8d8573d6a6015bce1f359c6c7ba79548f5659a3f3b26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_single_test_merge_known_to_single.assert_len_result___dask_": {"doc_hash": "5f96c7d18eb53e64007b837c212176aec6c00ce25a36ce6ca0229fe60cc08742"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_single_to_known_test_merge_single_to_known.assert_len_result___dask_": {"doc_hash": "5dc7fe961d23b717952edae3e9e9eea6818a2c836f80a79874969019306b672c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_test_align_partitions._different_index": {"doc_hash": "b490752b68f19efc81fa2f0fbc13869e0a84f5c5b3731ee2ecde877ffebc8561"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions.ldf_7_test_align_partitions.None_1.assert_eq_rresult_rdf_": {"doc_hash": "0412c074a8b9e6631298890265643cd4f1c972bf219ba9c03257b630722f8499"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_unknown_divisions_test_align_partitions_unknown_divisions.None_1.align_partitions_ddf_ddf": {"doc_hash": "bb84054a5e435665cf6dcfa65840a475525972f06caefa97e52fe0f61e0258ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test__maybe_align_partitions_test__maybe_align_partitions.None_1._maybe_align_partitions_": {"doc_hash": "886b23dbbd9c503486dffdb2b51243770c05ffe5266358300592fa5f95bf7b02"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_indexed_dataframe_to_indexed_dataframe_test_merge_indexed_dataframe_to_indexed_dataframe.None_9": {"doc_hash": "e75366e702fa37bf554a4134dba3e862f6f9434489867f1ac8f2071f6269fc07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_list_eq_list_eq.dd__compat_assert_numpy_a": {"doc_hash": "ffdceb54b41400152405c6e84e4f3c683c397897081a42a00f4b6957e0b5fedd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_hash_join_test_hash_join.None_2": {"doc_hash": "1529e95fdd3f825553461ff00c5a732e7e42af4cb64442d90d562d0687bf1798"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_sequential_joins_test_sequential_joins.assert_eq_multi_join_pd_": {"doc_hash": "dedef0d135978f6607a780bc0c339b74949fd62ad7df70af605528d197fe77ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_test_merge_asof_indexed.assert_eq_c_C_": {"doc_hash": "0caa46ec40bc69658c83a584b5609415f03b44b5e6496e058961c67368527e6d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_basic_test_merge_asof_on_basic.assert_eq_c_C_": {"doc_hash": "d9b52f8aad39c41020323d56088445d84297e75d7b3413e1393387b434ec2ea3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_test_merge_asof_on.assert_eq_c_C_": {"doc_hash": "ca14093514e89d2a6dbaeb46a8cf3b3f5779411941ee05eaf5a45ea9f448408c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_left_on_right_index_test_merge_asof_left_on_right_index.for_nparts_in_1_2_3_.for_a1_idx2_in_.assert_eq_c_C_": {"doc_hash": "a34fe427356796eb6d4ece7338c7f9087163656c50de88a643760be412262e3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_two_partitions_test_merge_asof_indexed_two_partitions.assert_eq_c_C_": {"doc_hash": "023ce7e0ea32d9ec2a3d0f48a3a4d8daa34fc85e70cece8ff1c0659dc439802e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_test_merge_asof_on_by.assert_eq_c_C_check_ind": {"doc_hash": "ee6f0d573a3894f2ea30395211b465edc927bfda41fc8b57bf63ad7a945586d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_test_merge_asof_on_by_tolerance.assert_eq_c_C_check_ind": {"doc_hash": "579af33d63b0dbc3714e7770488b936f704b17319dbcf81a2820ac20b35245be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_no_exact_matches_test_merge_asof_on_by_tolerance_no_exact_matches.assert_eq_c_C_check_ind": {"doc_hash": "3b3cd6c4165ccbb9aba75180188097813bfb2441d7469bc12a756ef7dca167ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_unsorted_raises_test_merge_asof_unsorted_raises.with_pytest_raises_ValueE.result_compute_": {"doc_hash": "327fa78dcce370deff2fe6cd8ead75b570d5366cb080216a12c8412ec39958c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_indexed_concat_test_indexed_concat.with_warnings_catch_warni.None_1": {"doc_hash": "f8436bf018c56ed5b7797cc762120271b0345e0c13c6464e3d37dcbb2655f489"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_test_concat.None_1.assert_eq_result_expecte": {"doc_hash": "6d0522b63702c1c2c5737fa7122784d76cce607858e801cf486169199e0dc92d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_different_dtypes_test_concat_different_dtypes.assert_dask_dtypes_pa": {"doc_hash": "8e97b5d002dac12177f7377ce35a293e460bb67d8fe472e11d2b31f91461f9f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_columns_dtypes_test_merge_columns_dtypes.assert_has_nans_and_warn": {"doc_hash": "e221dcd2780e25968b8cb490092e2d26a1a94de6c6a69a41723d7a4fa8c6a4e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_test_merge._pd_merge_A_B_": {"doc_hash": "77879c018e659778ed4a6d26834581708f33117a0acc0fd96f7c1981623e55d2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_semi_anti_cudf_test_merge_tasks_semi_anti_cudf.assert_eq_result_expect_": {"doc_hash": "9cf10f94e8693bfacfe19b93a23540f205768def3c5ad55a0b114281d2edf63a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_passes_through_test_merge_tasks_passes_through.assert_not_any_partd_in": {"doc_hash": "a315e5df72628496ccee4d2d1c6d2d385d6d0af4bbf489d3515a337ad517bec1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns_test_merge_by_index_patterns.pd_merge.return.out": {"doc_hash": "12c22bc7ddfac548cf8f702134fff3fc68fd4c0e04265beeba3b3bc572315105"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns.for_pdl_pdr_in__test_merge_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_.None_15": {"doc_hash": "7eee8a361888374b32d896632af7713ed63d1f40dca498ff1e2d036dbb8efc0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns_test_join_by_index_patterns.pdf7r.pd_DataFrame_c_list_": {"doc_hash": "29bfb3fc376ee14bff7aa05ad3ec2558a164e9eca4f4e85e43d44bca34e31836"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns.for_pdl_pdr_in__test_join_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_2_._": {"doc_hash": "24e3946e69e33f488ed2a2d8a1cf24ad6ffa4368dc84730545973af34efb291c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns_test_merge_by_multiple_columns.pdf3r.pd_DataFrame_": {"doc_hash": "c2c7545a2d09a0f14421b681cbfa8df2ddb7ca951bd60c12e596293b1b453a83"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l__test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l_.for_lpart_rpart_in_2_.None_8": {"doc_hash": "229072102c893bed59211872a2750b478583acc34302fdce5cee47c2ee4c0789"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_melt_test_melt.None_9": {"doc_hash": "9f2aba53f761935d63ef37318a56e68e682d4b57357eede12a11e6c024c985e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_inner_merge_with_pandas_object_test_cheap_inner_merge_with_pandas_object.list_eq_da_merge_b_on_x": {"doc_hash": "4fda2e2832824d9db9e05eaee3bc652240e94e992a7a6799e56b291c2b383e51"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_divisions_test_cheap_single_partition_merge_divisions.None_1": {"doc_hash": "586cb3ca18571a76c145c606ce1e6cd245e62235ebf324de104b824695c30f58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_parition_merge_left_right_test_cheap_single_parition_merge_left_right.None_1": {"doc_hash": "7ab6c7f5a3b5f9c2e84c650389ecafaff971d90c7b6ee3a865b89699d34473d8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_on_index_test_cheap_single_partition_merge_on_index.None_1": {"doc_hash": "a3ad385d46082ed190e0ca71a660e7950be475ca4aacca321df858479ced5ba9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_maintains_columns_test_merge_maintains_columns.assert_tuple_merged_colum": {"doc_hash": "9a67a32265c3d95304b4fde64cdf4765e7774d1d09a9239b5721ac15ad72cc99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_index_without_divisions_test_merge_index_without_divisions.assert_eq_result_expecte": {"doc_hash": "4122b4e9bea7a818e0db6632e84c6eda56dd7d39eba3dfbdfac910f9fd4bed07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_half_indexed_dataframe_avoids_shuffle_test_half_indexed_dataframe_avoids_shuffle.assert_len_cc_dask_500": {"doc_hash": "97334d5dd9c678731afcc5f61d1decc0c3236d267f03712e0fc2d79fca38c916"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_errors_for_merge_on_frame_columns_test_concat_one_series.assert_isinstance_c_dd_D": {"doc_hash": "62b07c8c8fc16d9161eb167726f953bb77d45f0ab1b8a2bcbd20948986b0df71"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_errors_test_concat_unknown_divisions_errors.with_pytest_raises_ValueE.with_pytest_warns_UserWar.dd_concat_aa_bb_axis_": {"doc_hash": "2aabb716f63c01a4e1e6cfda61ce688d0eb37034c3bb2a6e3b8d33933442cee4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2_test_concat2.assert_dd_concat_a_is_": {"doc_hash": "f3a4264c9ae5272648280f05fd5bda2a08b203533a53b0612f860d9b98fcdf71"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2.for_case_in_cases__test_concat2.for_case_in_cases_.None_5.assert_set_result_dask_": {"doc_hash": "76207abe0191a1f882af26b862f6ef915a1f3a0e1842b60c38c8dc711629bbdb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat3_test_concat3.None_5.assert_eq_": {"doc_hash": "239c88ad03f821dcf59cae96523cf82b3995b16bc0e7f5e46cbd47bad44983c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat4_interleave_partitions_test_concat4_interleave_partitions.assert_msg_in_str_err_val": {"doc_hash": "25b94531401086ffc6d0159d7592e672ec12ce926e571b51323b60ef525fe7ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_test_concat_categorical.if_not_known_.dframes[0]._meta.clear_known_categories_df": {"doc_hash": "1d4a3b50846e29327dffab0dfdcee7204e650aa70a83c74d5824149fe2af3fc1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.check_and_return_test_concat_categorical.check_and_return.return.res": {"doc_hash": "c5c84e3c44814ea3bdd66812fd5ffc409d94319da36f85ff318ff73c71c1db3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.for_join_in_inner_ou_test_concat_categorical.for_join_in_inner_ou.None_4": {"doc_hash": "4a6545779b6aa18e9bdbd40b385c9940a01bad08108975c8703ebda8b16094a7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_mixed_simple_test_concat_categorical_mixed_simple.assert_eq_result_expecte": {"doc_hash": "58e7b18775dd2d3a4efb55c311f8b398a3dc5dcb18e649a90c3c23bd69a82fad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_test_append.None_8": {"doc_hash": "81639f614e45d5ee5addbe2ac4f2f7d59462be3c2285ba9e0d6aff886f7c9005"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append2_test_append2.None_7": {"doc_hash": "d6fd6989f2d2a9dd8d9df96eb52694ebc04aa20e187f9e2157acb9f7b84252a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_categorical_test_append_categorical.for_known_in_True_False.None_5": {"doc_hash": "2b6af423918bcf161e0d646492f3f6713a8bbc28571ecddc0fe9489991c06e38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_lose_divisions_test_repartition_repeated_divisions.assert_eq_ddf2_df_set_in": {"doc_hash": "71eeec5e923fcbd9bdaec6dbe26379beca3633b4b0013c420a4a53b8acc88f22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_multi_duplicate_divisions_test_multi_duplicate_divisions.assert_eq_r1_r2_": {"doc_hash": "42fd93a1215d94370968c5bc45e52d6c8594858bf4a9ce229bbfeb6d9879a151"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_test_fuse_ave_width_": {"doc_hash": "fbce4f4df15b911629334590d05abe6649d37c5a1e507c7cc9caef5d66cae24f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_object_test_get_dummies_object.None_2.dd_get_dummies_ddf_colum": {"doc_hash": "169a38ad553299c64a4b040be5f17e287065b09a3187a94c27f763457beff852"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_test_get_dummies_sparse.None_2": {"doc_hash": "3b5a7a5eab322c695cd2c8b2f0d1294cb6a3f2e67449bfbcc2e0a990d5e37c65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_mix_test_get_dummies_sparse_mix.assert_pd_api_types_is_sp": {"doc_hash": "a2e506f1b39866331376fc970bcfff67e21d134a22a48a75b037f37b669ef409"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_dtype_test_get_dummies_dtype.None_1": {"doc_hash": "15678f1d22844501cecc10843fba622d53e42a47210827fbb62ebaa839f5c9fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_errors_test_get_dummies_errors.None_3.dd_get_dummies_ddf_x_": {"doc_hash": "26b5cbcd2c089232def5357d0ec388b1d89d03a0878f12d0633a32042d0f4b58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_test_pivot_table.None_1": {"doc_hash": "07229a8e69bcfcf0632bdc345303871ec8166d4dfa0742cfa85c1d95e4ce4e2b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_dtype_test_pivot_table_dtype.assert_eq_res_exp_": {"doc_hash": "d05815d671ac1df6786d170f10cbc853d1f324930349add522b8f521fc3116dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_index_dtype_test_pivot_table_index_dtype.assert_res_index_dtype_": {"doc_hash": "aeb8ef76490505ecc04145e04a84765ca00b8d0d67b90bd95896f69ac03a5967"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_errors_": {"doc_hash": "d98766f07604f9766bbe772b63d8b18f59bb722111f39dabb7f0a7e2300e7600"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_test_map_overlap.for_before_after_in_0_.None_1": {"doc_hash": "238610693aa2146ec9161c8111adb1c7c41fc76a0f06224dee9e2352f67a8e2d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_names_test_map_overlap_names.assert_res4__name_res_": {"doc_hash": "8722d4733449e4432119fb57983a0eef0834d0b618369dade47ab09f573e8e5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_provide_meta_test_map_overlap_provide_meta.assert_eq_res_sol_": {"doc_hash": "662a35b8588bc2549bf8da58efd62e6cc703fc53634761c94288e22d522e2b53"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_mad_rolling_method_args_check_less_precise._": {"doc_hash": "48bcb8b6341a571839a9c07b7a9f2f366f8c1d723dda7522b3826f4e510cca9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_methods_test_rolling_methods.None_1": {"doc_hash": "56c1d5ef1e6cb61922f6e99c6edaba3f8754c99461c808e607333485713924fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_raises_test_rolling_names.assert_sorted_a_rolling_2": {"doc_hash": "055afcaac0963a09bf86c65c26dfa72bf6558d2ac841ac692cf17a115cc4012d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_axis_test_rolling_axis.assert_eq_s_rolling_5_ax": {"doc_hash": "782776aa96a8f854257b7153fb9ca7907b4e9ae6c8b214f9367a333d52b35962"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_partition_size_test_rolling_partition_size.for_obj_dobj_in_df_dd.with_pytest_raises_NotImp.dobj_rolling_12_mean_c": {"doc_hash": "e1a579d80ac5b157bf149592ef9a9b2bef3307f0d1b8db491ec38a08628b72a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_agg_aggregate_test_rolling_agg_aggregate.None_4": {"doc_hash": "938459439fa07eb10c7e97f8eff6003affeec08150d99697b7cad118cc5c34f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_numba_engine_test_rolling_numba_engine.assert_eq_": {"doc_hash": "6a81a96b17168cfd9849b0d1d1b6006077e5917cca3031659cd90f12a28927a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_itertools_shuffle_func.shuffle": {"doc_hash": "a9e3a0500dc591db051ba7f0c63183f85ed16e09cf381334d929e23398ab1cdb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_test_partitioning_index.None_7": {"doc_hash": "95e01189dbf1c179d30e09f03b540b9f69b9230ce5f786c3ede32d6d576c824b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_categorical_on_values_test_partitioning_index_categorical_on_values.None_1": {"doc_hash": "b5739c62acf6dd4f61298e7620fb480855ec6bcc71222634a1d88808ce1823ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_names_test_set_index_names.None_3": {"doc_hash": "98e07ad63eebb5915ebb99f7b5d350c993b2cf8bd855afbace60b7cf15f5f718"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_sort_test_shuffle_sort.assert_eq_ddf2_loc_2_3_": {"doc_hash": "70e6b7899bb1a58f58d1093ae730822ba9c0783bdf370da85d1617815cb38e44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_test_rearrange.for_i_in_a__partitions_dr.assert_sum_i_in_set_part_": {"doc_hash": "46353776e23972bb6e5ccff7db6174bdb100c6d064b6f5be971f11be1cbab1a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_cleanup_mock_shuffle_group_3.raise_ValueError_Mock_ex": {"doc_hash": "819f44a25fb7f2e346cd710ea57a7a465c5e640e4e332a6ee41e2d42bcb97895"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_disk_cleanup_with_exception_test_rearrange_disk_cleanup_with_exception.assert_len_os_listdir_tmp": {"doc_hash": "a445c75b72107ed42d9314df8bde6ebe657798005dd52d40442c321e95de7c7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_explicit_divisions_test_set_index_with_explicit_divisions.with_pytest_raises_ValueE.ddf_set_index_x_divisi": {"doc_hash": "bc95fcf5926e10990a102d01a364f2d1b5c7904a5e689c6b6de973df0cf59b67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_compute_test_set_index_divisions_compute.assert_len_d4_dask_len": {"doc_hash": "2f7410c575d49270308029e55d3dbc6d9bdedf825900ffec1d6d8e65392615d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_sorted_test_set_index_divisions_sorted.None_3.ddf_set_index_y_divisi": {"doc_hash": "2a9a0548e7dad60210c89a9f52609a9c48faf08419f14f22424218c3e17dba31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_consistent_divisions_test_set_index_consistent_divisions.assert_len_divisions_set_": {"doc_hash": "9a31e3640f63057e1ff92c61ad91b51d081053932895a3a8d4ec93141ed37675"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__set_index_make_part.return.pd_DataFrame_x_np_ran": {"doc_hash": "63854c08bd3670d023306adbf8345c567365707cffe5eb9bd970bdb5646cfe92"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_reduces_partitions_large_test_set_index_reduces_partitions_large.assert_1_ddf2_npartitio": {"doc_hash": "e8f1a206033bb6e260566d36e8905c76e4aeeff263037e8b8cc515ab20cfaabb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_doesnt_increase_partitions_test_set_index_detects_sorted_data.assert_len_ddf2_dask_d": {"doc_hash": "bde6996440c683c0b82409d7d158a760a343faecb8f0ce5015536d80ea3f3fec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorts_test_set_index_sorts.assert_ddf_set_index_tim": {"doc_hash": "e44f23f50e76fc7e9f30c454e5cf7d67050c790df1bf2b91000217e9b3307f91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_test_set_index.assert_eq_d5_full_set_in": {"doc_hash": "2887ab3b91ca954dd4754e2b7e57d2e8923b31a5a51d7bf58e2ab732e3466b49"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_drop_test_set_index_drop.assert_eq_ddf_set_index_2": {"doc_hash": "4c4bc8ec37fd59351f43fb558f737bfadc33b5e1d4da447ad15df9fc330bc4a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_raises_error_on_bad_input_test_set_index_raises_error_on_bad_input.None_2": {"doc_hash": "a3a057cb02d2185f10adb90cfb7b31fdf3d65c8cd1c6a8f7a2aa59faa2b1d32d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_true_test_set_index_sorted_true.with_pytest_raises_ValueE.a_set_index_a_z_sorted_T": {"doc_hash": "079307848146ea2fd222164e142bb4f3c1998335c4ae31bdfa24cb8f87af8f73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_single_partition_test_set_index_sorted_min_max_same.assert_df2_divisions_": {"doc_hash": "f366a5626c5a8695c6614761d857541c1dba5b97b071b2e94494ad86b53052e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_empty_partition_test_set_index_empty_partition.for_conv_in_converters_.assert_assert_eq_ddf_set_": {"doc_hash": "83960d50e19132a6fb3cd5e2aa8fb5eec1f49023a2de8fed80ff7d2029218350"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_on_empty_test_set_index_on_empty.for_converter_in_converte.None_2": {"doc_hash": "89209b944e83cd5107caaddb6fdf1c0404405be2b230fcf94d7d2ee0c357ed4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_categorical_test_set_index_categorical.assert_categorical_equal_": {"doc_hash": "9bf2ce13e3e5d3456af2dfc235e2f53b38457d65fb4706c8e61b4ef3621b3edb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_divisions_test_compute_divisions.assert_b_known_divisions": {"doc_hash": "17cd7841c7445f2f8fc6efe05672a5a46480f842eaca9499836f73d8a22439a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_empty_partitions_test_empty_partitions.None_2": {"doc_hash": "44f2c7856e5f2c1d6e4df3ab47ac3824231229f5f2dc05bc50f5adcc9c84276a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_remove_nans_test_remove_nans.for_conv_none_val_in_con.for_inputs_expected_in_t.assert_remove_nans_params": {"doc_hash": "015550f57283ac393a80d356c3fb7bd98f0b1b90960b8b33a0a60b8ac84bd575"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_gh_2730_test_gh_2730.tm_assert_frame_equal_res": {"doc_hash": "d3fd6cc3e14ae3a317547ce58332f17cd754fa4198434a84939e5935a10d00ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_does_not_repeat_work_due_to_optimizations_test_set_index_errors_with_inplace_kwarg.with_pytest_raises_NotImp.ddf_set_index_a_inplac": {"doc_hash": "8b9fa48d57ee9eda51570896fab93acb9727302cda1d16eccbca84fe0ab77111"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timestamp_test_set_index_timestamp.assert_eq_df2_ddf_set_in": {"doc_hash": "efdad4eb0c114fca87a3272e80edbd8aac40420187b4e5ad17a869b11de93fc3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_with_compression_option_test_disk_shuffle_with_unknown_compression.with_dask_config_set_da.with_pytest_raises_.test_shuffle_disk_": {"doc_hash": "e3c0d4c5a65ff0230978b3ca0585086905c5fcbfb422bc1d49ae4dbf1885f356"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_check_actual_compression_test_disk_shuffle_check_actual_compression.assert_len_uncompressed_d": {"doc_hash": "95f84b05e4bbd34debdc6b39cf1808029a2eac70df746aacc9f13735cdfa51eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_pytest__BASE_UFUNCS._": {"doc_hash": "3b146931bcf3326f8d6ece41ff35a46151b156f495c311228f37f62004389da9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_test_ufunc.None_4.assert_eq_dafunc_pandas_i": {"doc_hash": "a39dc42f23f7f26ec7a5557cb35a04a41120caf5ff354924d16943e75b6190d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_array_wrap_test_ufunc_array_wrap.None_5": {"doc_hash": "2667f98c4f1edfb0b2daafd69ff1694fc60fcc99c80f7b2c0a07f5ca4c7da992"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py__UFUNCS_2ARG__UFUNCS_2ARG._": {"doc_hash": "7c3694c493fc081d6b2e09f6246e41f72e7c98cbb8022a38578862b48ed0177c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_2args_test_ufunc_with_2args.assert_eq_dafunc_pandas1_": {"doc_hash": "9ce5b68d787dc90833dee78b28493198c216b04f19fdc1efbc6333a5f6f32d64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_clip_test_clip.assert_eq_da_clip_pandas_": {"doc_hash": "1d9e4d0fc6071c7a8216d77ca79cc1cfa47903019484054a39bbb8cdf7e80557"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_ufunc_out_test_frame_ufunc_out.None_1.assert_eq_ddf_out_np_exp": {"doc_hash": "51f51f4b3f4310f2cc233a1616fd501873f85a7c3ac269053bc2580a8a17bf58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_2ufunc_out_test_frame_2ufunc_out.assert_eq_ddf_out_expect": {"doc_hash": "15322ea73f43e36a47db974eb8f8a314b4e0188d95bc33b0c56fb54805378252"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_mixed_types_test_mixed_types.assert_eq_dafunc_arg2_ar": {"doc_hash": "c2405024918f9ab24ef6d2cceeb3b8318441dcfc10b7386fe7485a9270719a5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_2args_with_array_test_2args_with_array.None_2": {"doc_hash": "d03d823a589954a8e5e84f71ee76fe8d94d83a3e0a28451d785616d48124df5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_reduction_test_ufunc_with_reduction.with_pytest_warns_None_.assert_eq_np_redfunc_np_u": {"doc_hash": "6ed2c6892987a35659761e8f7d1a20e08bab98815119ecee2689e0cce4705963"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_numpy_scalar_comparison_": {"doc_hash": "bfc62c17ee426ab9a7d9f1e5c8584fb7073685f8a1fc8100e031e1772bafd31c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_shard_df_on_index_test_shard_df_on_index.assert_list_result_2_ind": {"doc_hash": "c67c2aafbd2aae11e0c527063ac5f4fc728b970c236b6e80f97194ea000961bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_test_meta_nonempty.assert_df3_A_s_al": {"doc_hash": "13230b7c775ad08858c74c289c5baf5ebae768b205d3ae537a582fad040b384a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_duplicated_test_meta_nonempty_empty_categories.for_dtype_in_O_f8_.assert_res_name_s_name": {"doc_hash": "1f3a52d252c271806427c68b72d5fa398bc5b187f12b9dd9401ba5942b278862"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_matching_columns_raises_appropriate_errors_test_check_meta_typename.assert_pandas_in_str_in": {"doc_hash": "cd3895d69cf041cbf8423bfe06bd089805f4cd1d94fcf02ec0ef3866120dc870"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py__resample_bin_and_out_divs__resample_bin_and_out_divs.return.tuple_map_pd_Timestamp_n": {"doc_hash": "869b00d8282915e55ebc3dbb1f187b58e7a850dc969e0d097c13e9d548571fd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler_Resampler.__init__.self._kwargs.kwargs": {"doc_hash": "9d5dc8735fd2e2558c44badbf24ebf6a5e5599bb26831f550b4bbd6d442841ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler._agg_Resampler._agg.return.Series_graph_name_meta_": {"doc_hash": "ecf039db46bd3aa92b6c57527bbd53df94e7397dd21c87abb5a29320af6173b0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler.agg_": {"doc_hash": "c648cf765f523c8a267744229e70ce9c2dc59c1167859c7be6a14a24449a1ba1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_from_itertools_import_pro_test_series_resample.assert_expected_index_1_": {"doc_hash": "872d507d00388c8b203a01ae5df5e6753a7393b3189e05fe70663023ccd67964"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_has_correct_fill_value_test_resample_has_correct_fill_value.assert_eq_": {"doc_hash": "109900deb1970e39bdb2e81dd060a8fb9d6e4b31caa3a5a92e7cd9d12662d04e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_test_resample_agg.assert_eq_": {"doc_hash": "25ea79888b69df3d8597310f635ab59cd761215503e4eb0d02bfaa9f20379907"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_passes_kwargs_test_resample_agg_passes_kwargs.assert_ds_resample_2h_": {"doc_hash": "161e0d8ad5fa7c66d42a51c17185f930923ef5cb18ac6ba7a82c3eaab8c5a686"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_throws_error_when_parition_index_does_not_match_index_test_resample_throws_error_when_parition_index_does_not_match_index.with_pytest_raises_ValueE.ds_resample_2M_count_": {"doc_hash": "b9f958fa14cba0a78f2884a5001afbf0fcde9ccc74f0ea17b9eb07306efdfdcc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_pads_last_division_to_avoid_off_by_one_test_resample_pads_last_division_to_avoid_off_by_one.assert_eq_actual_expecte": {"doc_hash": "a4d3fbdcf9c52da10bd44391e5446502751d287b951cfa6126cd6db7865e6088"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_does_not_evenly_divide_day_test_resample_does_not_evenly_divide_day.assert_eq_result_expecte": {"doc_hash": "98369f31a8d3d7ffe67656441f61fdb72e69c8202f2414d920ee4bb294e2a8fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_does_not_evenly_divide_day_test_series_resample_does_not_evenly_divide_day.assert_eq_result_expecte": {"doc_hash": "380b49a67165115f960604729859dd409b8c72619ca7f1448527166e94111498"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_unknown_divisions_error_test_resample_index_name.assert_ddf_resample_D_": {"doc_hash": "6eb999664cbc777e49b2a6cde283a39ace3e2a0d3ce80f5daa848ba9ed1e1dc6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_non_existent_datetime_test_series_resample_non_existent_datetime.assert_eq_result_expecte": {"doc_hash": "837c4035d99df4ad6b8ed261ac9a9d9c5e4c2396dbee520df053be43c8326c8b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_common_aggs_": {"doc_hash": "1ba7f413be6c596e54f59d663a8205dbdba81e342a91d6312305bcd239900ac2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_shard_df_on_index_shard_df_on_index.if_not_len_divisions_.else_.yield_df_iloc_indices_1_": {"doc_hash": "939ee5fe17dc2dcb257c2ac3e2cab1f720ceb854c106558b74d7b810c01494fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__META_TYPES__META_DESCRIPTION._": {"doc_hash": "b1ed569b73b996e2774d7cd90040d41d4f07728ac630e6f335c8ca14f9f4bb7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_insert_meta_param_description_insert_meta_param_description.return.f": {"doc_hash": "3bb20a8511c7a5646be7f7137ff2b06d72ae7f2391d480ebbbf1ff3a35a5fe92"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_raise_on_meta_error_raise_on_meta_error.try_.except_Exception_as_e_.raise_ValueError_msg_fro": {"doc_hash": "6012f379af870a306974b33083918b9b4f5da090351f10da95f366e35074b2a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_UNKNOWN_CATEGORIES_has_known_categories.raise_TypeError_Expected": {"doc_hash": "04d4d61707fca99b5f9de582984447a6104baf56705711d39f441a38df239b18"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_strip_unknown_categories_strip_unknown_categories.return.x": {"doc_hash": "666ca79a65976cb329c60c6a4bcb45f28b11db3cf3e3670a402d7d5b5c77f944"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_clear_known_categories_clear_known_categories.return.x": {"doc_hash": "235b5032acfa480897305ccfde0e24b4351a4df11466c8a41603d482cb4e38b0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_meta_check_meta.raise_ValueError_": {"doc_hash": "c04c33981c1481fcc36d16c85775fb4a42dc903fbf0f607b310d79daadea21ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py____check_dask.return.dsk": {"doc_hash": "a5f7c56d81fdd1da4584c10fe505854ba9578286b42fa694ec2e5265af1284db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_graph_assert_sane_keynames.for_k_in_ddf_dask_keys_.assert_k_split_0_is": {"doc_hash": "e9874e9c2ae3f20b347306f168030e4577577ef4c0bf07621c240814b462c9f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_dtypes_assert_dask_dtypes.if_not_is_dask_collection.else_.if_hasattr_ddf__meta_dt.else_.assert_type_ddf__meta_": {"doc_hash": "b90b6f1ce40303720b3b056eb5610851fd9fad3d44c352a4a85181a2cea26ee3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_max_deps_": {"doc_hash": "b03dc3d599c0efb7d5d5fc33849aeb21daf666cf9b3e3de7d67c46d21ce08280"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_random_timeseries.return.make_timeseries_": {"doc_hash": "d4e5f83d9fde1844b1ba9968d6cc7787279ea4d85feee5ef8412f267402b5300"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py__generate_mimesis__make_mimesis.return.db_Bag_dsk_name_npartit": {"doc_hash": "30417e3c342e9f1c9a0620629d2170725855f1e64223cd24b7419b32f3d40a1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_make_people_": {"doc_hash": "d87ffc995f44f8f730cc2a9664f299542e270fa1afcc637c81d431524e4707eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_operator_finalize.return.Delayed_name_graph_": {"doc_hash": "02353302caf08194e2fd856cd32bdd7fdbc3d9d709d8a71a70a6c7d0d6d133c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_unpack_collections_unpack_collections.return.expr_": {"doc_hash": "d8e4a04aa68ed6f92dc21427c7f31407eb3b7cf25beeae1945a06ffd872ce849"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_to_task_dask_to_task_dask.return.expr_": {"doc_hash": "ae55ac6e4d4e2b313e7db6bf8af38f69d28ba949dd9814f484dc1f91f9cd2113"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_tokenize_delayed": {"doc_hash": "82423943e5128d6fdffc4dffeb00d3a64ddeedf0b0c80f4e2ba41e20a1fb8fcc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed._Wraps_a_function_or_ob_delayed._Wraps_a_function_or_ob": {"doc_hash": "e2eac90661aaba4591e085eac4751d7c839c4d5ca6c4119d73b311b3edb426b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_call_function_call_function.return.Delayed_name_graph_leng": {"doc_hash": "2248ff7a3086e4b56a29de4e66cfef97b4125c7f28ca8b5c1e9df323c4133665"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedLeaf_DelayedLeaf.__call__.return.call_function_": {"doc_hash": "2d500a670efd2671d543176ec6503bc266933924d07adbd4ea8af69329e18b92"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedAttr_DelayedAttr.__call__.return.call_function_": {"doc_hash": "b055f154ccaf19a40c03081ff54a55efdc15ff4774a53e1b5ad06704084f2356"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_for_op_in__": {"doc_hash": "3d72b970736e16a9e7780b2f5ff461ff079b2f89f2baad2f1d25fe06972152b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/__init__.py__": {"doc_hash": "c9c6b7b9b59269a4f84c4e055e0f087828468313f620bcb0b46c4ca563ef73d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_from_collections_import_n_Profiler.clear.self._dsk._": {"doc_hash": "dd2d3ae5418c94c1729e8dd34490677c72735097a5ae516686e4fddc6dd0d915"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_ResourceData_ResourceProfiler.visualize.return.visualize_self_kwargs_": {"doc_hash": "1af1285742768d6fc354b10b0e12e139efd6a72b8478fb677bcef5e356087821"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker__Tracker._update_pids.return._self_parent_": {"doc_hash": "9425c0f9138adc3d693640ef6a7897c50c769abd5af8fd6c25d58d3c592971ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker.run_CacheData.namedtuple_": {"doc_hash": "d677181f692ea459a2e2a28227ceba312fd1e329970126ef5ad89bd22895f260"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_CacheProfiler_": {"doc_hash": "cbbc5c0b34640cdebf4f40d111c8a456107cd5f7cd43ca02b43f3e23c1da61b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_random_unquote.return.expr": {"doc_hash": "5f5d176ea1eae9e45ec60903bdd1703328acf2f5767450017e343c9f1c5736ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_pprint_task_pprint_task.if_istask_task_.else_.try_.except_TypeError_.return._": {"doc_hash": "667d5f36383d20a1fe5714656348b7b53b68c29c11a20934f815e5c93b686351"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_get_colors_get_colors.return._color_lookup_n_for_n_in": {"doc_hash": "6c2cb8a895686002e3fc05f72b6615d0c28ccceb864e4c583f86251417b9397c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks_plot_tasks.hover.p_select_HoverTool_": {"doc_hash": "37d7a4efd2bbbfe2d75ef26a5e91b37a3a4a0097d0f894533ea037b60d18ab4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks.hover.tooltips_plot_tasks.return.p": {"doc_hash": "1e4746f07e35bf17b9fa607bae3c8f87d7e47ab2639dbe3eff851da71c5213fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_resources_fix_bounds.return.start_max_end_start_m": {"doc_hash": "bbf73ee46d6508f81547f9bd3ca366110461e33255021a761526dfba768ef6a7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_cache_": {"doc_hash": "a03ee1476ee5cd3b235b757f2f894557fc01da5a2b183aa1c8aa289d26c3a582"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_test_profiler.assert_prof_results_": {"doc_hash": "f2e4b703ab22a55d81a45ee638522e241838b3db4e1244634a67a1fbc78b0f8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_test_resource_profiler.assert_len_rprof_results_": {"doc_hash": "8b69c82cdc5218268075f81ded7cbe271961e423230a80ffa12a3f04ec232f6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_multiple_gets_test_resource_profiler_multiple_gets.assert_not_rprof__is_runn": {"doc_hash": "917f016c892d0b33793c16885e203c587ec6764e996eee7908dccf16fe3432d2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_test_cache_profiler.assert_CacheProfiler_metr": {"doc_hash": "d65b4c94f95faae035daca1b844fab3bc1f95d3da08c6cabe1b802207cb739dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_register_test_register.try_.finally_.prof_unregister_": {"doc_hash": "0b7e10348e11d03590dff2518337100020ecd0fe6e8d53cb980f17bcbcad86bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_unquote_test_unquote.None_2": {"doc_hash": "c0d2146c1e58fab7d13c2b1e5043cc8f0530b36037d4dcab84940a9ff46160a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_pprint_task_test_pprint_task.None_9": {"doc_hash": "d8065822071cd497583d3bedc98d8a8e5ddbe0132ad64bc86dec48ce92f7e6ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_plot_test_resource_profiler_plot.for_results_in_1_0.None_6": {"doc_hash": "048e6bb57b724b4a264f5cde295173101acf4a971b2455dbc903b4a2d3b90d94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_from_operator_import_add__test_no_tasks.check_bar_completed_capsy": {"doc_hash": "603bd404acb2239b635ecff0abada001198197ab63a6954bd55cd618be23a88c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_cache_test_with_cache.None_1": {"doc_hash": "01c579d87a542c9b389a93a1f41273d0d28882316548262a834b5e46e3cdc490"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_alias_": {"doc_hash": "73d2fbaa553378db30a84a25f375b73fcc11fca99eeeda6eaeaf173e4093339d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/distributed.py__flake8_noqa_": {"doc_hash": "90871846cc637323720de882deb99aa1c814f8bf8edeb162c5b667259099749d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_has_sub_tasks__UUIDPAT.re_compile_0_9a_z_8_": {"doc_hash": "5f31363e824da09a889b2f27e4962f03eff0e9cc3e22659c884dc9b546aa5558"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_label_label.return.s": {"doc_hash": "0840df533f2b9c10ed4aa8e1512ab88c93593d1e634f27639402cb9d8b232ac7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_box_label_box_label.if_isinstance_key_tuple_.else_.return._": {"doc_hash": "4db15a7f0091a536abf727c33e71eeb5ebca6ba1c78f9cbcc5f97242bcbd3e1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_to_graphviz_to_graphviz.return.g": {"doc_hash": "c3ea2c209c42014debef17e348456c16f4043450f5bd43731a4c1a18c480129a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_IPYTHON_IMAGE_FORMATS__get_display_cls.if_format_in_IPYTHON_NO_D.else_.raise_ValueError_Unknown": {"doc_hash": "24df063f9ec3016ba9d8d5dca957865ad121eec5fffff62a7145ba1b16b2aad6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_dot_graph_dot_graph.return.graphviz_to_file_g_filen": {"doc_hash": "e7150a3bddd83ec56dec4c680940a1999526b5dfdd41c630222b75949055a919"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_graphviz_to_file_": {"doc_hash": "abbd3d04eb48a7fc205a88eb51e47a5ba3aac8048bce40213ca437e3454898b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_binascii_hashers_append__hash_sha1": {"doc_hash": "31f76c24621561b46d255b021222c44a033a011931558c8eef66cff35b2d6d74"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_hash_buffer_": {"doc_hash": "0d4d47f17e4823fb49dfccb9d0c2b080e69473eec05c514c7d35e664c72aeceb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._from_collection_HighLevelGraph._from_collection.return.cls_layers_deps_": {"doc_hash": "25c6ca33ef90b63d187e21dd8a991c8a62b6a204dc7eb04941e30e1039b6ff12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.from_collections_HighLevelGraph.from_collections.return.cls_layers_deps_": {"doc_hash": "11d93805ebccfa64c7dbdc57b77064c2a92601dc3a447a1b95233b267bc4e0f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.validate_HighLevelGraph.validate.for_k_in_dep_key1_.if_self_dependencies_k_.raise_ValueError_": {"doc_hash": "a9295ea35b9852ecba55ae970ef19f3b9ad74d6e12e485a9514642ea1666dbeb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py___os": {"doc_hash": "2d0a3b3ab26443918669a67e597ed46f8ca21525ed3c88d506ee13246d065ce3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_start_state_from_dask_start_state_from_dask.return.state": {"doc_hash": "3e87263cae13848ec305e26a4cd92e54f9ffa13166fc53276a25e7128ae5d7dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_finish_task_finish_task.return.state": {"doc_hash": "146973ae4fce1da2503325a062ed16fe60e2315691928cf48f5c91d48c349463"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_nested_get_identity.return.x": {"doc_hash": "2b6ce7e920b6e9ede854ecb9a72ccccb2612b17d25ff3dbf23cdf5be7eb4d138"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_3_get_async.dsk.dict_dsk_": {"doc_hash": "91576567cc24407362d4504e12bd7c1e4384f2044df91176085b8768b31b4d9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_get_async.with_local_callbacks_call_get_async.return.nested_get_result_state_": {"doc_hash": "0b0ffe69e53f189082295d62b5a2207d4dd91dabecd976775ac95817ae9656d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_sortkey_": {"doc_hash": "872b984619fc732fcef8c87bc815242121c67c0631b57f9cc68c077d82e8d68e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py__Remote_Exception_Han_RemoteException.__getattr__.try_.except_AttributeError_.return.getattr_self_exception_k": {"doc_hash": "2eb61d6bf8bcadecefd4415af8b5c500dc9d6696aee8af4ac0ada0227d3b2748"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_exceptions_get_context.if_sys_platform_win32.else_.return.multiprocessing_get_conte": {"doc_hash": "10f90841e5f81d03158901d1d53c82fac13357cbe4fadfe36658d91f8bed43ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_get_": {"doc_hash": "5a0ba41c492d7ef1c9a186414337f64cf9c342f2c5c384ce03af81f18dd4e5b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_math_cull.return.out_dependencies": {"doc_hash": "e03ed1165ccba99a1ff96847f40f5eb1abede71029d47a8bc47ceb179ea079a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_linear_keys_renamer_default_fused_linear_keys_renamer.if_typ_is_str_.else_.return.None": {"doc_hash": "78b47c1cf58aaffc6de1cbcfce0f9403ea6ec66c196c282dc482ba536dd05461"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear_fuse_linear.dependencies._k_set_v_for_k_v_in_de": {"doc_hash": "599a5f26d0d68b8d9cb1b86e62b34cb60d620d1a03ded30ea0afd984e63c6d15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear.if_rename_keys_is_True___flat_set.return.set_x_": {"doc_hash": "4860b738cd3eaf1d45590749816832479311ddd6b1c2ad93852db1fe7b47d32e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_inline.return.dsk2": {"doc_hash": "81d15664d3fe26844466ae515c75a8d807d8f72bd101b9875d14d148cef6f17e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_functions_inline_functions.return.dsk": {"doc_hash": "4bd8600a18b6abdbd9641355b6540c78f6dd26c79e3479b59f791424be23fa06"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_unwrap_partial_functions_of.return.funcs": {"doc_hash": "dd9f443d8b32873136c07079f85bb02ed6307d529f418dbfbffafe53e5815137"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_keys_renamer__default.Default_token": {"doc_hash": "c4327ce98611882d192732b55183384e0ac7871f69afa0ede2f803ebbc28944e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.if_keys_is_not_None_and_n_fuse.children_stack_pop.children_stack_pop": {"doc_hash": "b84e276bbd3abba9c3374c6116defd75d782abe20596055f56c7f019006a3d62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.while_reducible__fuse.return.rv_deps": {"doc_hash": "84d32698506bdbd819153b1b2b8a2d1b97d063ff15c85fa110814bfa1290f537"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py__inplace_fuse_subgraphs__inplace_fuse_subgraphs.for_chain_in_chains_.if_rename_keys_.fused_trees_outkey_cha": {"doc_hash": "979cbcbfb9ef7b50ae8e186df6b1f3d4c7697737db9a568bcbf04fc993718d58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_SubgraphCallable_": {"doc_hash": "46aaf860e0cea9c835123e2afae4bcf31af14074f67d04d28d6bb337bd5b3147"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_r_Static_order_of_node_add": {"doc_hash": "d8ffe5e53260f48be983d166e413e1415db5458b2f6475664b59480c640f69e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics_graph_metrics.for_key_deps_in_dependen.if_not_deps_.for_child_in_dependencies.if_not_num_needed_child_.current_append_child_": {"doc_hash": "e5b5bb0d9f12e921331896f65cb4e2af45423f6a0629b8d6250f88c08e315ab1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics.while_current__graph_metrics.return.result": {"doc_hash": "841b0f89b848d4c91bcff9a34829a8d5ef93b5a5c2acf470880a38164c2cb5c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_ndependencies_ndependencies.return.num_dependencies_result": {"doc_hash": "08c6cc063799d121677887688a81f46c003b069ef94c313b8b1678b29a8819f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_from_collections_import_d_Traverser.skip.self.term.self__stack_pop_": {"doc_hash": "fb581e71372617c73cab7718e9d6dcc52ce75a9e4d1407d66a92e959f4b0c814"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Token_END.Token_end_": {"doc_hash": "8136efed77d7133a6a6e4d763e99e08fc110c0e6d9aa58dfd938e0905d41ea4f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Node_Node.patterns.return.self_1_": {"doc_hash": "e24e38c76ac9afa8de7bbb3a64166cf86d7c347a22727b2f5501e3f650e3678d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RewriteRule_RewriteRule.__repr__.return.str_self_": {"doc_hash": "3fee097fba99c42bbd367cccbbe735cd188534759db69e62a1cd787f6fbf2f97"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet_RuleSet.__init__.for_p_in_rules_.self_add_p_": {"doc_hash": "23e78d9057ae8713eb8e46b3d18893c0b4b9e5d1d7fa01e45ce3a0c8d091b962"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.add_RuleSet.add.self_rules_append_rule_": {"doc_hash": "b6fb037a14797f9083b0a3d09c273fd757fd9effb4aec3cd179ad708437f4765"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.iter_matches_RuleSet._rewrite.return.term": {"doc_hash": "c59c4841e01cbb4b1d219d1ba813c785b78d6811982d988cc1487365990754fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.rewrite_RuleSet.rewrite.return.strategies_strategy_self": {"doc_hash": "0129a64e6c0c158ef5b9e708a2110f6825caeeb2f133ffe9b2962db670664b1b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__top_level__match.while_True_.None_1.except_Exception_.return": {"doc_hash": "41114f888aa5f2681e3393c50a68f0397a0532bb9f59c8d46e3294c17189bfed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__process_match_": {"doc_hash": "ba5e0bdca77afc2711452d1bd187e8e9cf7b7cd3c16b82cf2cb10f36b1075b7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_pandas_register_pandas.sizeof_pandas_multiindex.return.int_p_1000": {"doc_hash": "43c570505c6b93b5e23985c57f2c43fb8500fa8a912916de3b4a0bd43581abb3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_spmatrix_": {"doc_hash": "3298786fac88a2ca8b5d2866d54dd06d1c9b0abc9a770a0d5af98c6acc50916a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/system.py_math_": {"doc_hash": "17923f072debe7fad63c27945bae9b0fef109c1cda2992ec9696ad87ab8f7248"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_function_test_normalize_function.None_10": {"doc_hash": "c101a4545a0603381dcba62aa5d8051d02e87600addc01192bd52e3c4cf3d89e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_test_tokenize_numpy_datetime.tokenize_np_array_2000_": {"doc_hash": "bc894dbe580d1c56138596c92f1bb18814517ba27c2c1b5ccd1bf29652a1c8c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_scalar_test_tokenize_numpy_scalar_string_rep.try_.finally_.np_set_string_function_No": {"doc_hash": "46e64c08351b411ef50cf1983693df7cdc5edcbe20bf4579b8b34c82d58ce6fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_offset_test_tokenize_numpy_memmap_offset.with_open_fn_rb_as_f_.assert_tokenize_sub1_": {"doc_hash": "459373dd9a0dfa078120d6987b2b7e2326803554ad4dc08e959d9a95d28f6a65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_test_tokenize_numpy_memmap.None_2.assert_tokenize_mm_1_": {"doc_hash": "68554260f2ebd74662094ecd4b87decf9ec71ee0a1b9ca5f709fc8723b3e7201"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_no_filename_test_tokenize_numpy_ufunc_consistent.assert_tokenize_inc_t": {"doc_hash": "8680bcda023ace23f5ce959a31b2611be1b1f82308805f700bc8115aad240bf2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_test_tokenize_pandas.None_5": {"doc_hash": "1619e7c37e9fcf29b7f5a6f05f2a17a6df52e934cbafd21033eba23731f90b52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_invalid_unicode_test_tokenize_pandas_no_pickle.tokenize_df_": {"doc_hash": "5d1aacf8836ac44721d93448f17cd7eb26ff8206a687fc41293cbebee11fda67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_extension_array_test_tokenize_pandas_extension_array.for_arr_in_arrays_.assert_tokenize_arr_t": {"doc_hash": "90d3fe575d5946d614d8f4ef33edf3363cb2ec25609a924154e997c3130ba223"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_range_test_tokenize_range._Different_step": {"doc_hash": "048a464b7e48424f5e40fc56d08b8207dfacba16413c02e7c84cea8170532cd5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_array_with_nans_test_tokenize_numpy_matrix.None_2": {"doc_hash": "a2d4d064e8495bc1ea774e1befabf94c48bfdba4ed04f2fb8932155c069b831b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_unpack_collections.args_test_unpack_collections._Smoketest_results_that_": {"doc_hash": "7be09fe1e21fafbde3e69276ed7605164b74ecdd7b8caa2a73b33b3ce3e8e3cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_no_opt_test_compute_no_opt._See_Renamed": {"doc_hash": "e187a333f353695a8a78d0dac4509bef1183cd6bff1a1801682df0c7bf56921a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_test_persist_array.assert_len_y_dask_y_n": {"doc_hash": "f42dced34b7d37ed03be8fee90c2d5d7a9aec9debfd51c9349a43244ab5f256e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_valid_unicode_in_bytes_test_compute_with_literal.assert_compute_5_5_": {"doc_hash": "ea24323a0073a26155aa8d3f49db34a899696f9ec5f64916945ac414dee489b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_nested_test_compute_nested.assert_res_1_8": {"doc_hash": "6f72afc90809f3f5964bde808bed0b1730642c5b186e6ea56ccb2390e3673d79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_use_cloudpickle_to_tokenize_functions_in__main___test_optimizations_keyword.None_1": {"doc_hash": "059f2e4cbfc1f14b49c97fa6180689b8c14fc8419efe2efd7cb3ef28a653caeb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_test_optimize.for_a_b_in_zip_x3_y3_.assert_dict_a_dask_di": {"doc_hash": "2f75815f1bcf3fd68776d132bdf124a37259a8bc634592e36294f76377d3720f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_nested_test_optimize_nested.assert_res_1_compute_": {"doc_hash": "bb6edf1fc411903840ea3aaa4693a6b213a9c2e24548821d595c4dbcf19ed1ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_default_imports_test_persist_literals.assert_persist_1_2_3_": {"doc_hash": "5df287cabeb865b4c61f43824f5bdd4846924e4b8675f964c3a7807afc86f58c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_nested_test_persist_nested.assert_res_1_compute_": {"doc_hash": "91d61aa4fa809c441b0476b59e334f34b086e5701f620fc880e587bba456e0df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_None_test_optimize_None.with_dask_config_set_arra.y_compute_": {"doc_hash": "0400c5af849693bcd3d8fb063782be9a3ea9ce45e13208e4ba8ce9f7b2567d9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_scheduler_keyword_test_scheduler_keyword.try_.finally_.del_named_schedulers_foo": {"doc_hash": "33bd9afd41d169b5a0bbc6b8ed02bd0278c598baafdbe93fbf23d7c2319447f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_test_cache_with_number_": {"doc_hash": "ae04d3cd4b0ce18d7248301323966f1fa3f8d8fe7a63e0900a597c8f4e1ef5c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_finish_always_called_test_finish_always_called.None_4": {"doc_hash": "1c60d77d24b67810fb14bf0c7388cd389027a684bfd839a2abc501a57dcec0be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_nested_schedulers_": {"doc_hash": "4dcc3a5963fc0c57ffe75417a8d99a07eb1211643ab0aa5b6e40e047d1947a7f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_update_test_update.assert_b_x_2_y_": {"doc_hash": "1f04410735cbde262fe80e6fb6e6401760f0a9849999b4d1f3dca365cbc6788f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_merge_test_collect_yaml_paths.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected": {"doc_hash": "21b40fa7fc0caee6e09ce211d3c46d3d065d6c1a85edc3ff0a659168dcbc02e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_dir_no_read_permissions.try_.finally_.os_chmod_path_perm_orig_": {"doc_hash": "c59a87bd89a9b178ce5d7ca21a936a75453bdf4249d3694cceda27c0910bbaf6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_permission_errors_test_collect_yaml_permission_errors.with_no_read_permissions_.assert_config_expected": {"doc_hash": "27cfe7a212ec6e6d0601c7aea6a5dea34d4adb1ce87c8e644f4ec4e312bbbf3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_env_test_env.assert_res_expected": {"doc_hash": "4536571705e25a6064e23a8df3efad15ddbaea25dfdaba0530428fd9eaa05af1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_test_collect.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected": {"doc_hash": "8e6c6ee7b1d798290d8ad16dc98adfca86b73009c30701e905ffef33f6a10a91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_env_none_test_get.with_pytest_raises_KeyErr.get_y_b_config_d_": {"doc_hash": "08ba24118fa940efec3162d87a71a0ef9c61c15cab3ea4ea53a754f18bb297d6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_test_ensure_file.assert_not_result": {"doc_hash": "d63f60c516c0fb20d6a429f2c63ce8a5d2c49a2f2c48d4d89d2058339a388f64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_test_set.assert_d_abc_x_1": {"doc_hash": "fbea8b8b2777f3fe57ec1cf3ed51da042b134a9e5c3106014b8c2fb871c86320"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_kwargs_test_set_kwargs.None_2": {"doc_hash": "b2f86495e1f624f7c145b8b8d8679d1adb06457df02065386b6f87143f98a6b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_directory_test_ensure_file_directory.assert_os_path_exists_os_": {"doc_hash": "ff5d69a9d8888cb8647217d646630b314f85fb8ac903f86616d8637b74bd3383"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_defaults_to_DASK_CONFIG_directory_test_ensure_file_defaults_to_DASK_CONFIG_directory.assert_os_path_split_fn_": {"doc_hash": "963b8982c0aa98f50c9b2575a3abd5ca19ba7302cb1352dd89e9407166c8973b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_rename_test_refresh.None_2": {"doc_hash": "49e7357208a0fe750bfc9ee625f10366e56b8003d02eccb854396e234ba0006c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_expand_environment_variables_test_env_var_canonical_name.None_1": {"doc_hash": "a25c25ba87dcb8520f306bfd75f816b6f2ba76ec842a121ce4fbe06999e53f61"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_canonical_name_test_get_set_canonical_name.None_2.None_1": {"doc_hash": "21f9f1034af5f175b15ea25d58d8092b536b6a0ef560a08b905c3c23de73f03c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_roundtrip_test_schema.jsonschema_validate_confi": {"doc_hash": "1cc18b9fcf4e9ef8847a20d284fd53877812408b884c724a48b8d888686f012f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_foo_": {"doc_hash": "498e4593e989a461a883879a4bdb6f5b562af6c769c45d4e2f5b56ed4454b69d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_has_tasks_test_has_tasks.None_5": {"doc_hash": "27c4dbffed6d5c2e373fb0d54e6d6cc7fcd2adbf9c8ccc6992b07982714ee320"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_preorder_traversal_test_preorder_traversal.None_2": {"doc_hash": "38b86cb05172c2cbcb00d506fa515e3766e31475572201026f326def238f97ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_TestGet_test_get_dependencies_nothing.with_pytest_raises_ValueE.get_dependencies_": {"doc_hash": "0ff5bd241734bc19d548b424254faa288314c4f34f0238422dd6a9b2eb65bce9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_dependencies_many_test_get_dependencies_task_none.assert_get_dependencies_d": {"doc_hash": "453450ce16e82300e3a939e685dcf70c11b8758d27c6b191d04375ce3b040bf0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_deps_test_get_deps.assert_dependents_": {"doc_hash": "388f98506002903616c2374c8eb1e923bc6e5a4c160072a5ff9b6d5b7c78f8ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_flatten_MutateOnEq.__eq__.return.False": {"doc_hash": "2d61a2e203ceb066364b64d1eb93c41613a103ff42d6fc1c1beb7241c0b7ba0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_no_key_data_eq_test_subs_no_key_data_eq.None_1": {"doc_hash": "abd22c191606d563518ef9c0195a94eda4dce02b8176df78e18210e3dba11bad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_with_unfriendly_eq_": {"doc_hash": "ae729a2bae4e1fe0991445ea62aa4f880d81f5284b06b1321a2283c284b33c95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_to_task_dask_test_to_task_dask.assert_dask_x__dask": {"doc_hash": "ded3f3eb7ad23dff9613cad9f89edf8a2148f831d4e39396f23ae7797054778e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_test_delayed.assert_a_key_in_b_dask": {"doc_hash": "0a9be292c3d5e8389e8b552001fcb3fd725395ceb35eb9d0f4087dcac5eb20f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_with_dataclass_test_delayed_with_dataclass.assert_final_compute_": {"doc_hash": "c6cb675d44b775406245baf62f8a896276924f2027e917ef7cc128fa629e842c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_operators_test_operators.if_matmul_.assert_eval_c_d_co": {"doc_hash": "608d837a828379ff8e99a6b84d671a8a4316c91fec8edd6dc1a35b233ce3aa51"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_methods_test_np_dtype_of_delayed.assert_delayed_np_array_": {"doc_hash": "e849f706307210cadad0c80b4e29a7277eaaad326fe417a334cbe63d221880fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_visualise_warn_test_delayed_visualise_warn.None_1.z_visualise_": {"doc_hash": "6abbdbaca5bfd05f597c00894ead63ebeda4af3278ceb6d386107a3869bc2219"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_errors_test_delayed_errors.None_5": {"doc_hash": "97383ab834f3c07d9a0d9b68b95b4de990c5a7fc5ffef58f24cffb2fa81e4c7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_common_subexpressions_test_lists.assert_c_compute_3": {"doc_hash": "8a7c354e75333b9bd20f0b09e32121402c58c4599c5d56572de8c2638c5addc0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_test_literates.assert_delayed_lit_a_": {"doc_hash": "d06230935bbe67c5d44865ddda29171dc812d1717d8c29cf7c59b258c028c8ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_keys_test_iterators.None_1": {"doc_hash": "4a651de49e41788f490a270695fafe6a1a60e182746067893bb0a12ca9e55bea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_traverse_false_test_pure.assert_myrand_key_my": {"doc_hash": "4b9a103d42218607d84316bfa5be1d380f906d64882900fcf4dc163c9b0e4ee8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_pure_global_setting_test_pure_global_setting.None_7.assert_element_element": {"doc_hash": "d60c3d22e6f3648e5d74d1bbeb494c7783fe8f90758bce3852575021ba0bd12d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_nout_test_nout.assert_x_compute_tup": {"doc_hash": "a1dfae102043704f92ffa8d8c2f5e9b9671d99fd7371ecf3a7394f0636732be0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_custom_delayed_test_custom_delayed.assert_compute_n_x2_x_": {"doc_hash": "738bad5633d8f9237ac2130c60c9f72282775295324f94d6dfe7f14663e69cd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_delayed_test_array_delayed.assert_delayed_arr_compu": {"doc_hash": "3b88a2b514ec3507cde3bb77a907ec488a10fe287ae41d080dd3edd7bb553e43"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_bag_delayed_test_array_bag_delayed.assert_out_compute_2": {"doc_hash": "700e884c85ba7a3394c96679cdb37adafa6e13a52112ca9d32942fbf59eebade"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_picklable_test_delayed_picklable.None_10": {"doc_hash": "3fae957077df9e9ae2b6ce8187c274a10ffbcb52cabb777839a00878b53be9f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_compute_forward_kwargs_identity.return.x": {"doc_hash": "68804a77743e7e205007bed5f1e0b87aeb98b443a7b8d392c019efe6e1cda9a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_name_consistent_across_instances_test_name_consistent_across_instances.assert_func_1__key_i": {"doc_hash": "fca4754191b99a4857ff0eedc4167d2832f321aff8ea69544fcd7892ed252930"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_sensitive_to_partials_test_keys_from_array._check_dsk_xs_0_dask_": {"doc_hash": "608cdd5550d08b65d64168995f3fc54bf6e5dd3b392f0462351fa1c3a5d6651a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py__Mostly_copied_from_http_test_delayed_decorator_on_method.assert_isinstance_A_addst": {"doc_hash": "3a01207dfd45731cdab59bc5f5dc8bc966f1df18adc6b07f16e23693a83a0ca4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_persist_nested_test_persist_nested.assert_res_2_4_5_": {"doc_hash": "e53c4b24f2866df84c4d4ebe6a60e490dabfb7a7e2f727812be1ced6ee594f9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_dataframe_test_futures_to_delayed_dataframe.with_pytest_raises_TypeEr.ddf.dd_from_delayed_1_2_": {"doc_hash": "537286e7d4bb9cfb89685467d1918bd47ef80fea69719a99922e9d1d1708af30"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_bag_test_futures_to_delayed_array.assert_eq_A_compute_np": {"doc_hash": "36c68476a15cf23ffd72a4797ce9e85e203ab3a87b79ec6047cc16cdc93e0707"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_local_get_with_distributed_active_test_to_hdf_distributed.test_to_hdf_": {"doc_hash": "2cb6f5030342c419c4cb5c3226104f0191df871cf82b208348d414f44ec09946"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_to_hdf_scheduler_distributed_test_to_hdf_scheduler_distributed.test_to_hdf_schedulers_No": {"doc_hash": "63aa34927608b9d51c3bb06e73d8e582072e063ef9cdf6d7ddfed02cdef09f5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_in_graph_test_futures_in_graph.assert_xxyy3_compute_sche": {"doc_hash": "0fbd94208d389cd9b2ab5d0fc425e06b0b9386ae4f76114c47fb641ea370f36a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_label_test_label.None_10": {"doc_hash": "24d01e798113b7f09b31fa5472b34a5e39eb07c394429ea1ef04ae795655cab6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_with_unconnected_node_test_to_graphviz_with_unconnected_node.None_4": {"doc_hash": "8b07fe228f479a560f25c1311746e6e65a59928505b695688946735bd8cfe3de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_test_dot_graph.try_.finally_.ensure_not_exists_target_": {"doc_hash": "d4bb7ef0f22ae82ffe7496ee68171bcfe9aeebc0c23aba179b448d214332f1bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_filenames_and_formats_test_filenames_and_formats.assert_isinstance_result_": {"doc_hash": "1cd1d361e0aacdd4ad25558dd445ca03e865e384bc89a9749f16a26a6d6920cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_delayed_kwargs_apply_": {"doc_hash": "162f5985a0baa38a40e10463201f5b3d96c4720a727527f2d060ed967dc8d63a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_hashing.py_pytest_": {"doc_hash": "9b28c9eec62411f43e97da4bbbc8210f164c18fb08c536edb1eec8923cecf124"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_looks_at_cache_test_start_state_with_independent_but_runnable_tasks.assert_start_state_from_d": {"doc_hash": "ac95385d64107985fe0e093957a73d75e6abbba85e89e84167941bb06f16a7f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_with_tasks_no_deps_test_start_state_with_tasks_no_deps.assert_state_dependents_": {"doc_hash": "1c495adc3fa53c5bcede837a6b2772421c6997cf6d36c08cf4ccbf73648b3bce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_finish_task_test_finish_task.assert_state_": {"doc_hash": "612cd966b630d143f5c8f190be74990b94f1cfd46ffc3c64a1d715c6c8512b73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_TestGetAsync_test_sort_key.assert_sorted_L_key_sort": {"doc_hash": "b19bce03df07cb14d0dc388f9098a7b63dc6efbe9410ea2d64fd8009a71022a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_callback_test_callback.get_dsk_a_start_callb": {"doc_hash": "f6c80a34ed4302010c13f52192bb1d81f8b07d5f906aa08c728e77548c47676b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_out_of_band_pickling_test_out_of_band_pickling.assert_np_all_a_a2_": {"doc_hash": "aca748449bbfc78df960a7a7d95c46a39f406a51c72ea8a69aaff6fc41ddaec0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_bad_test_optimize_graph_false.assert_len_keys_2": {"doc_hash": "e8eae128e6f43040e2e19367d1d7e1f85a76ca39984dfd428a737fbfd857ae62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_random_seeds_check_for_pytest.return._FAKE_MODULE_FOR_TEST_in": {"doc_hash": "46ab88e1c4674ed8a0e797d29a9bc4c3232ab2a382367fd4ba5f26cde2c88f3e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_used_python3_posix_test_custom_context_used_python3_posix.try_.finally_.del_sys_modules_FAKE_MOD": {"doc_hash": "ca0cc36eaa40d66e240ae36606ba8433fb2b096a00dfba766e4a99666c684f72"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_get_context_using_python3_posix_test_get_context_using_python3_posix.None_1.assert_get_context_is_m": {"doc_hash": "936fba02bde24bab2fa022b63170a3d8149418caae5768206cc3f6669d0c5259"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_ignored_elsewhere_": {"doc_hash": "9588f00c0ded5755ad20b5cfca576dbdec441c29dbd586aafa02bf40fb6a39a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_test_fuse.None_5": {"doc_hash": "e6b7ac602cd25e98d721c5bc9f3acf3dd93e6c2037130d8a2a9a9aa0ce560065"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse.d_4_test_fuse.None_9": {"doc_hash": "dcc64d4cf2eaf4261081543417285cfbc61a1a16710655b87f03a7c68da37ff8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_keys_test_fuse_keys.None_3": {"doc_hash": "7586c3001052d99543dedf8a3f6fa5c1088a632a6d3674b9ad9c5f29faee6b79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_test_inline.assert_inline_d_a_inl": {"doc_hash": "0a65cb1790408a103ce5ed8d14d63269d0aa949e58a9e635a1d97c9271743c02"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_test_inline_ignores_curries_and_partials.assert_a_not_in_result": {"doc_hash": "5a265b0804b6cd2009a2d58a0f4e0f659142ed8fb0d163b42a71964990c212e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_non_hashable_test_inline_functions_non_hashable.assert_b_not_in_result": {"doc_hash": "600a19273c859f3d894210ded1956378d7e94fa844a912f48e24f2461213a294"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_doesnt_shrink_fast_functions_at_top_test_inline_traverses_lists.assert_result_expected": {"doc_hash": "acb47c1874b3b8db05d55d7bbe796af1299e54a3260a684769784e695e12fc01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_protects_output_keys_test_inline_cull_dependencies.inline_d2_b_depende": {"doc_hash": "e313f09373311b0667b3300116b1bed3d8823936155bf7b643c9e8f31f66a816"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input_test_fuse_reductions_single_input.d_3._": {"doc_hash": "d22a85a590b8399c4ff7573c612d918da7163761a0edf0b99f9dded118135392"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_12_test_fuse_reductions_single_input.None_22": {"doc_hash": "9231fac71f13aabcd3881d4b6ab6daa36d788cd9b4f5cc1ec8c5dcbad38facd4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_23_test_fuse_reductions_single_input.None_27": {"doc_hash": "857ac7797281830dc0381365f80a2c9d954f564b15cd714be59389121e7e3ad8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_9_test_fuse_reductions_single_input.None_34": {"doc_hash": "17810043500540132fe16aa29971333c019eccd0c3d2541c5841237a6c362874"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_35_test_fuse_reductions_single_input.None_37": {"doc_hash": "efd9aa0bce7e0b026e232faf5af070c9be168d2e4369465848ab753f1170c1b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_13_test_fuse_reductions_single_input.None_39": {"doc_hash": "43a07e337f5ad657572f1654706c026c3ae76d4bfe5d3994c2a118857ca70256"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_14_test_fuse_reductions_single_input.None_43": {"doc_hash": "bdda57ad66f4f72ae71fc1f8c3c4003a65fe32bd394d1ab17ad6a05b628268cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_16_test_fuse_reductions_single_input.None_47": {"doc_hash": "70a70bb986dbc357237ac02daf6246da94c13920d8e8a3750524ab8b5be59a00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_18_test_fuse_reductions_single_input.None_50": {"doc_hash": "7b363e65101c988ee5261776a6896528a839c08f2c395cf04e2b321775fa03eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_51_test_fuse_reductions_single_input.None_57": {"doc_hash": "89be61853cfb41b1278fcfde8388a34eb4e809097832c57a1580bd09bdd20cb6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.d_22_test_fuse_reductions_single_input.expected_27.with_deps_": {"doc_hash": "ca6cd36000bcc9030079d2a5d0553677de7185e37b3d14e5d1aab0353cf67535"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_66_test_fuse_reductions_single_input.d_29._": {"doc_hash": "38bf72271a9e6cb8e09158cb2a5d1876a2556aed80139f6e4df0bd4117ed52cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_72_test_fuse_reductions_single_input.None_73": {"doc_hash": "e46a7921d70cb52fc729beb6aa066563d958be143698a39ad04576f404c6b385"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed_test_fuse_stressed.d._": {"doc_hash": "a1d397e9ab1b94dd0ab83ac9b1eefbdd43d40dd8d62e3496cfecbfe8927b922f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed.keys_test_fuse_stressed.assert_rv_with_deps_rv": {"doc_hash": "30e4606c0d472073715654c60134400d529f92879edc25cc486fb46a2940e5a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input_test_fuse_reductions_multiple_input.None_11": {"doc_hash": "e03d2cd4670e0ee632196c8e9001c76371b4965ec087aca6c5fb17c0b5667563"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input.None_12_test_fuse_reductions_multiple_input.None_17": {"doc_hash": "cd5014ebf5c1a4448c5e7b3fe9ab28b97e4914daaa55c6be82f3256716285b7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_func_with_kwargs_test_SubgraphCallable.assert_f2_1_2_f_1_2": {"doc_hash": "091723ae5875d9e048d1b6562168c0a40c3fa072cccf2caea8e5ec773ae97aea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_test_fuse_subgraphs.sols._": {"doc_hash": "e745c80460987fd033f758d42521750a93f8e4d7c96a823726721702dcdb8873"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs.for_inkeys_in_itertools_p_test_fuse_subgraphs.None_4": {"doc_hash": "696d042996ffba48f3017c3bae7fab07d28953009877f2d5b446440a1fc65aa3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_linear_chains_of_duplicate_deps_test_fuse_subgraphs_linear_chains_of_duplicate_deps.assert_res_sol": {"doc_hash": "e9528eed928dd8535709da38fa3dbc16dc69d86b825a9e803b39078c9b9fb507"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_dont_fuse_numpy_arrays_test_fuse_config.with_dask_config_set_op.assert_fuse_d_b_depen": {"doc_hash": "787cfac79e92ba86f2a57a7d3d7dab553cf5154c8b5209b0ab22f0a3e58a50ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fused_keys_max_length_": {"doc_hash": "b8c6e91999e7650c1627db571b0f7a378776ab36609b417264d64a21cb345ee5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_pytest_test_ordering_keeps_groups_together.assert_abs_o_a_1_o_": {"doc_hash": "e23fadeb00fe336b8a50ec0a623a13c63733eee6784edcf31edb9e655a78e863"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_broker_nodes_test_avoid_broker_nodes.None_3": {"doc_hash": "aa0ed2b9ded9667d53bc4a2fbfda2d97f26a90a2032f56655154b0b670a4b0dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_test_avoid_upwards_branching.assert_o_b_1_o_c_": {"doc_hash": "3d0a91bf8375de60fd6ea8fabd992a40976f3ad50472d1ccfb3a8bf4ac70f4a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_complex_test_avoid_upwards_branching_complex.assert_abs_o_d_2_o_": {"doc_hash": "bd549ced7944723534bc73ba7a89b7a28440070517611995b15f7be1aafaa5a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_deep_bases_win_over_dependents_test_deep_bases_win_over_dependents.assert_o_b_o_c_": {"doc_hash": "357ed23880c3713eda113fe37ae2b5dd12d03038513ed03782a127ae2cef1eff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_deep_test_prefer_deep.assert_o_b_o_d_": {"doc_hash": "c7ad0109d75fa6f509c072ef0d8b67f58ab415192600b67874a5098ec897db36"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_stacklimit_test_order_doesnt_fail_on_mixed_type_keys.order_x_inc_1_y": {"doc_hash": "3b7f24bf598daf3812bd1d55334c61d5c6aa4a862ad134e16338ae5da0829ab8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_gh_3055_test_gh_3055._operate_in_order": {"doc_hash": "3545d5e61051bb05980d55de6c547ee74856d44d47f4bdfff8aedd9346272670"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_type_comparisions_ok_test_prefer_short_dependents.assert_o_e_o_b_": {"doc_hash": "cb8c62a22955fffad9075f1f769129b37cf1a8b767af612f17846f969e413373"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_run_smaller_sections_test_run_smaller_sections.assert_log_expected": {"doc_hash": "d70925bbd98093b213d7bca0a2b2b473383cdaf2655240290b7daaaba3e1683a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_local_parents_of_reduction_test_local_parents_of_reduction.assert_log_expected": {"doc_hash": "1aaf40637801df268d499199501c9255faf203a2cf7ce6d27261642dacc51bcd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_nearest_neighbor_test_nearest_neighbor.assert_o_min_b1_b2_b3_": {"doc_hash": "2cce6e085f04ea380fdef1cc35bca845cac50354fdbf010dd5d8e8da5b58b96e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_string_ordering_test_string_ordering_dependents.assert_o_b_0_a_": {"doc_hash": "f59a9bc535ef855712d717f9963385cadc9778006f1f74b218d85529784435d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_narrow_test_prefer_short_narrow.assert_o_c_1_o_c_": {"doc_hash": "1f4585908698272f5a60dcabe38e67d43c4e43d31fadc657d3dd5fb954a38853"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_ancestor_test_prefer_short_ancestor.assert_o_c_1_o_a_": {"doc_hash": "ddc9414387ac7e1a9251187b61865d1c179060b046fa88e468a96e88f5829599"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_map_overlap_test_map_overlap.assert_o_b_1_o_e_": {"doc_hash": "328cce5d782f1aece71f069c4ab306e3a703ad10c593e9c63295131a59547eec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_use_structure_not_keys_test_use_structure_not_keys.if_Bs_0_3_.else_.assert_Bs_1_3_5_7_": {"doc_hash": "bbe9e1c19779e8cfbecd8ca4dd22d828e04b04b51bb03b5bdfa3510924d51f9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_dont_run_all_dependents_too_early_test_dont_run_all_dependents_too_early.assert_expected_actual": {"doc_hash": "1ef5c90e3ff2d80f2a635cb7122065a04505bf4e56af38bc9b275cfd7cdf7ee1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_many_branches_use_ndependencies_test_many_branches_use_ndependencies.assert_o_c_1_o_a_": {"doc_hash": "0217ed553afd3aa961694ec847a81aace7ca27e3157534275622e07fc9baca37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_cycle_test_order_empty.assert_order_": {"doc_hash": "a1cbc8bd129468e380e89cacd583f70903b738cfcc1255c9c1f90f33261fedbf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_switching_dependents_test_switching_dependents.assert_o_a_5_o_e_": {"doc_hash": "48cd0f2e5827210334c2957a7b0f5f68f178a3a2ece2b9d2d2e2cd440f78b0f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_from_dask_rewrite_import__test_args.assert_args_1_2_3_": {"doc_hash": "1abd2e4f9ccfa4e4dc955b2469628ad13262ff023f496b76712da43b2e508aa1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_traverser_test_traverser.assert_list_t2_add_": {"doc_hash": "9a16791aec3529c77b68e7c0ef1bf392576e71892fc3b74027fdf8627e1baa8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_vars_rule6.RewriteRule_list_x_": {"doc_hash": "ec5290f4e0ad43bcf8207b77d9e80872d539668777c2deb79a8fb5429e08e187"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRule_test_RewriteRule.assert_rule5__varlist_": {"doc_hash": "019245b1f0ce4ce32e4931de5b492bbd39aef3b23fb6540367483bd8bf7496fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRuleSubs_test_RuleSet.assert_rs_rules_rules": {"doc_hash": "96a959fc9181f2a966f7ac2e298584c40ef509b0891218f5d42849d9b6305941"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_matches_test_matches.assert_len_matches_0": {"doc_hash": "0431cd8a176d77cb942e79d27990c36544d7b54448563806b099b73a93642d31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_rewrite_": {"doc_hash": "9bcd6b237394e1128f2f42621bc9f034851bae3b24495718da7fc8ec34bbf678"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_sys_test_numpy_0_strided.assert_sizeof_x_8": {"doc_hash": "2bf1243aef15245411f9235868507e97892de0ea69104c294e1ee71cc9e4536b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_test_pandas.None_6": {"doc_hash": "18c5b26bd4a2ffb28903edb86d0d92166980b4d25fb280d4e3ff23f95bc3d7d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_multiindex_test_pandas_repeated_column.assert_sizeof_df_x_x": {"doc_hash": "64ad1e4680fea9842fb8ffa8c6f49952400dfbdff83ec311561376411554f8bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_sparse_matrix_test_sparse_matrix.assert_sizeof_sp_tolil_": {"doc_hash": "4d900f2face5b92dd585b34389ea7561b4b73ba96b79d1df3815559b338c3844"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_serires_object_dtype_test_dataframe_object_dtype.assert_sizeof_s_100000": {"doc_hash": "cb462f15d7804fae66440e384790f464549727c670edc61480e4dc1565449580"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_empty_test_empty.assert_sizeof_empty_index": {"doc_hash": "031586606cbeac6806d39b943872ded2573d62d91c7ce9fb561571f190ca4d1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pyarrow_table_": {"doc_hash": "3da3ecaa572048726db1908f985ee44825803d2d5e17f4f32247cf79ceabcfb4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_system.py_builtins_": {"doc_hash": "14b381ec659f69d5c9169ccf271b61fd1f1eccb5f105955c84b497e02616c677"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_threaded_within_thread_test_threaded_within_thread.while_threading_active_co.assert_time_start_5": {"doc_hash": "89c5984a535edab4f5b62d5806b814dada74fd53998578faf58a320b50c9989f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_dont_spawn_too_many_threads_test_thread_safety.assert_L_1_20": {"doc_hash": "819b79504d3b6cad4985c25159c1fdd88cc40b34cc0b2f00e24a67651016de9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_interrupt_": {"doc_hash": "6c12a4287ed5cd0189ca1a1e557a977068c5cb6b5833c095cbeaf6168abf9661"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_getargspec_test_getargspec.assert_getargspec_MyType_": {"doc_hash": "2335e0184fb31027f20067c727a8368a3f1c696e0879873b650bed31f5c120ad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_takes_multiple_arguments_test_takes_multiple_arguments.None_7": {"doc_hash": "521325015b6ec9731660cca72932528ed0639d7e8f127d13b282893f2e6335ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_test_dispatch.assert_foo___doc___f__": {"doc_hash": "8d0acdc99fb034d7e70eced77875d5cff498a737384dcf084647186a775a1081"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_kwargs_test_dispatch_variadic_on_first_argument.assert_foo_1_0_2_0_": {"doc_hash": "372df096fb5d9a0cf281fe65322a9c8bb875bfe4f454d2c2e292d51383f1393b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_lazy_test_dispatch_lazy.assert_foo_1_1": {"doc_hash": "d13f5cee190c52e60cb16a945f7653cef960571f06688812af9fb805a85be44d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_random_state_data_test_random_state_data.None_1.assert_s1_s2_all_": {"doc_hash": "4cffeed891af73ac9537f3bea171c5dc15cdc25633c9de9541792218836b20b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_memory_repr_test_method_caller.assert_count_in_repr_me": {"doc_hash": "a6e1ef05d67e6c8f5e59a3739c7a3b4e77009f376e2db8d516e12eaf2470e756"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_skip_doctest_test_skip_doctest.assert_res_expected": {"doc_hash": "cdea1416d156e484176a66ace470b16df0c1847c5a0f3dd73d8d439680492f9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_extra_titles_test_asciitable.assert_res_": {"doc_hash": "357060ca9378cd58270706e7084cbec93570a06f56010140c6947953d25f4203"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_test_SerializableLock.None_4.for_y_in_b_b2_b3_.with_y_.with_x_.pass": {"doc_hash": "058416eabc107817d87b00ea18b72020eddb178ee612577289d8a9e03af22fbc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_name_collision_test_funcname_numpy_vectorize.None_1": {"doc_hash": "862140b0f24d0adec3f44d3399a6dff530b9df97efeb051386147ee329c84903"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ndeepmap_test_ndeepmap.assert_ndeepmap_3_inc_L": {"doc_hash": "89e502df51e93986205c785f4c8099b503fa04799365d8f77725689c6b55bd9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ensure_dict_test_has_keyword.None_4": {"doc_hash": "79b14d3f9893dc68ffa431578353022a2cc9883ce2ac53b1d672afd09510ffad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_test_derived_from.assert_extra_docstring": {"doc_hash": "7e7adc5dcef1a0684aa47dab5a4ca7e7d7dc154080d75305db24d1baaf51e46e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_bytes_test_parse_bytes.assert_parse_bytes_5GB_": {"doc_hash": "5685270c70abbf95c44e532fb815aed00a10bce378cead95b38c7e00a4ce053c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_timedelta_test_parse_timedelta.assert_parse_timedelta_1_": {"doc_hash": "28cf7c3e2842ab62a28e515d7f868e1c16c2bacea833a088ff95d843f8f6114f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py___pack_exception.return.e_sys_exc_info_2_": {"doc_hash": "9935b805110cba47dc724bfc015371b87734936f8d60337ebbd2ce052a005ac7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py_get_": {"doc_hash": "fced364befc2aa5279232c9e60e5c24c6d13b0e7167cdb65f81a846200e0119c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_deepmap_deepmap.if_isinstance_seqs_0_l.else_.return.func_seqs_": {"doc_hash": "b40aade22bd829b2f41d4c78956aacf7b6e702e899f264ac8760c4cdf0330d70"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_filetexts_concrete.return.seq": {"doc_hash": "29c0b3a2f78646d770e516696452dd1bb1d2a941ca295e8836dba30cc4318d1b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_pseudorandom_pseudorandom.return.out": {"doc_hash": "cf58f910afa1198efe0d3b0471a8470e3058510743e546b38ba8cae40735f746"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_random_state_data_random_state_data.return.l": {"doc_hash": "694d05aaab692e72175f51aa4074caabb6c17792d3f16b61cb0289aabe296159"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_integer_getargspec.if_isinstance_func_type_.else_.return.inspect_getfullargspec_fu": {"doc_hash": "f92c310397b27c38f8c3f9b9c1727125658ace0181d8da73ad8231445dd565ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_takes_multiple_arguments_takes_multiple_arguments.return.len_spec_args_ndefault": {"doc_hash": "03056ca80e238a53246cbca7eb9002198e7cf09adbf81f3d19b159268bc931b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_named_args_Dispatch.register_lazy.return.wrapper_func_if_func_is_": {"doc_hash": "9541dd55d9b90f59f4c5086d5918cd6dd1a08309a45447fe81c6bad9c9f2c641"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_Dispatch.dispatch_Dispatch.__doc__.try_.except_TypeError_.return._Single_Dispatch_for_s_": {"doc_hash": "5f2c61e79c8b20add1b94db8ac2c6e011b85d77664632d0d4576f56ecbefeb0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ensure_not_exists_skip_doctest.return._n_join__skip_doctest_": {"doc_hash": "909143ffbc94d024b44789ee576e9bfe43d4c82037d996b740ee5a5a4d53b8ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_extra_titles_extra_titles.return._n_join_lines_": {"doc_hash": "972ef85d5459c395ac7c2b180d23d3e4f6aa59ea41f1f1be0fb5d8fcdf6b16a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ignore_warning_ignore_warning.return.doc": {"doc_hash": "e9375482fe27a344704cc1a2f12d38708085be005eda67e8c28501f4ed4171b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_unsupported_arguments_unsupported_arguments.return._n_join_lines_": {"doc_hash": "8b63510e4b124e140650fa4e8eeed5a34f8f26bc849e4e7230de7fb42e8631a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__derived_from__derived_from.return.doc": {"doc_hash": "de58eeee007396a421f9928177f96b9d593753bbdefb97a061cb48c9a566d1d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from.wrapper_derived_from.return.wrapper": {"doc_hash": "05f5393082889ba3fd9ada983c561cd9302617fc8f2df8e7b3a26a65e2ecca96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_funcname_funcname.try_.except_AttributeError_.return.str_func_50_": {"doc_hash": "5b4dadc7ec906ece6a05575a0fed3646430cb83a023de3ab2e8e74d99201348c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_asciitable_asciitable.return._n_join_bar_header_b": {"doc_hash": "b41737cfcd53ca1ee9d34fcde249575665374a69ea7c04689ca15f9cafedb63c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_put_lines_methodcaller.__repr__.__str__": {"doc_hash": "6f8d0a4c9f854ecdf4aa09118851c9dee7cb9ca4728436ae43d415f852bf3196"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_itemgetter_M.MethodCache_": {"doc_hash": "514bf788cfe3b09a88a0141f38362f493f40af57dfe9c98e607606f8db68043d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_SerializableLock_SerializableLock.__repr__.__str__": {"doc_hash": "08b32ee9e695d6e43e6f01b13d458fae7ec0b27a7c8319e4a9e4e18ca0b3ccd8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_natural_sort_key_factors.return.set_functools_reduce_list": {"doc_hash": "143f3d82630ca6abc4e41e024f9ab4d69c5bef00ffef9c0f04a375da57f4f5bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_bytes_parse_bytes.return.int_result_": {"doc_hash": "c62f2b5c10454d3334702e83c668bdb31cfd3bb717471ae648ecd88887a06fcc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_byte_sizes_byte_sizes_update_k_1_": {"doc_hash": "e8bc03a11e408f6faed8dd8b2cd7840d525abb4a2a50d59fabf8e98d5d17ff07"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_time_format_time.return._2f_us_n_1e6_": {"doc_hash": "22e701d7ab5b5dfc36831dabfbb7ba6cc2da89e94c6ae7eba74279cb93595730"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_timedelta_sizes_timedelta_sizes_update_k": {"doc_hash": "ecd9e2398a29878fed762a152caa4ed9982fe19ba0be7edddbc319d43745c00a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_timedelta_parse_timedelta.return.result": {"doc_hash": "e1cb18be40dddeff80d1ecbc81c6a8003dd0e1814ce9cfec0f863611dfa7c1bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_has_keyword_hex_pattern.re_compile_a_f_": {"doc_hash": "54cd2a5240e7b5e388ab0d2cafdd1cca647635cdf3f02bd63eac31deca76fa5a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_list_top_level_GetFunctionTestMixin.test_get_with_list_top_level.assert_self_get_d_f_": {"doc_hash": "13669813930e18f97fee61ec1410babc84f8b3232a92737da6d14f5172e9e92c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_crosstalk_crosstalk.return.d_x_height_1_i_": {"doc_hash": "12fc078c16c06e3beff0a5b73c727194ee0a1773a17e9c5b398997e4adc684d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/setup.py__usr_bin_env_python_": {"doc_hash": "851edc5f2e468593c41e9a9c9b5e08ece7a5a57094f372e3c3873e5d89ed99b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py__Version_0_16_get_root.return.root": {"doc_hash": "21653266f74c86abe85af4ee25d836a2c81a87fefe13c9e88fa6bb9fde3196eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_config_from_root_get_config_from_root.return.cfg": {"doc_hash": "7fb1993372c3af9c182d4191beb3cb5b4cdae71176b6ecbf4ac395b784fdd245"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_NotThisMethod_register_vcs_handler.return.decorate": {"doc_hash": "43bef215b2433ff65353b118c8a163851a0e04c2280c904d860ac36ac0926449"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_run_command_run_command.return.stdout": {"doc_hash": "2e991c2c28f04f49cf83a9dbee0ae1cfbc1f80ce234795a2f0125e038e743f8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_get_keywords_git_get_keywords.return.keywords": {"doc_hash": "3316280692618c677d5aa8901dc62fc3f189ae37d48e118022ede4683954ef01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_versions_from_keywords_git_versions_from_keywords.return._version_0_unknown_": {"doc_hash": "4679d674c0e8d65ff1411878d80b6d4b445ccf104384c2b2ae527debab34a341"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces": {"doc_hash": "f1e6ef95c42f54a2b585f9ba794b72144071f5992ec8a611aebe089457b2ee48"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_vcs_install_do_vcs_install.run_command_GITS_add_": {"doc_hash": "1ba0570a9eb06c5d66fbb27b5a63222e58e92c39b3042f87657d22cfa55103ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_versions_from_parentdir_versions_from_parentdir.return._version_dirname_len_p": {"doc_hash": "019abc54e0c5d0037fdc73017eb449f4f36b174fcd53f2ddec66612636e3488b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_SHORT_VERSION_PY_versions_from_file.return.json_loads_mo_group_1_": {"doc_hash": "c665c7edac429ab58f1002f87b7ed7a19cc5e8332cec9641a96ab069cf4de0b0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_write_to_version_file_plus_or_dot.return._": {"doc_hash": "fe8050f92fdeaefeb6db1a26eea765793cc0e32671a4b687c377140986314354"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_render_pep440_pre.return.rendered": {"doc_hash": "62f8dd7acee0867457d7b07d3e6edcf3260a2a189611ef33381ac724528d4433"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_post_render_pep440_post.return.rendered": {"doc_hash": "118807a5c2a97d74d8fe5f796e9018ac18aadcc9b3182b6181ecd25e8dcbd0d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_old_render_pep440_old.return.rendered": {"doc_hash": "6c6b4eb402371d591eeb0abb50ada6698ebe88b10543f867cefca40fd086f508"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_render_git_describe.return.rendered": {"doc_hash": "a3d9b39392875e0334af8c64b46fe88c38f8d51c8b168839e4ae228db30b0daf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_long_render_git_describe_long.return.rendered": {"doc_hash": "cf56b90f4c7b7ac99479cdeafeebaf306a0ab42713bb536dffdfe85f0a8b1c00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_VersioneerBadRootError._The_project_root_direc": {"doc_hash": "2b30b66ceb0b36870dcb6210c247df913c36eacc6b4774a674a3d9084ef0138f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_versions_get_versions.return._version_0_unknown_": {"doc_hash": "9988fb0669308b1e2cf4052238548fdd26f5482f359484637de2875d682cae3e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_version_get_cmdclass.from_distutils_core_impor": {"doc_hash": "b46b8ebb3921cc678793c9ac073ce2ccfa012b4f230b5a81c900c42f0a97cf9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_version_get_cmdclass.cmd_version.run.if_vers_error_.print_error_s_vers": {"doc_hash": "690b93737766ad4fb1256933abef4a8b7507e8aa3ff75a426d910969a64d4b3f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_version_cmd_ver_get_cmdclass.if_setuptools_in_sys_mo.else_.from_distutils_command_bu": {"doc_hash": "d2515232f0cacf396d1c894d38fa2cf264319056056cc5e8fd23bfc1b8d8028d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_build_py_get_cmdclass.cmd_build_py.run.if_cfg_versionfile_build_.write_to_version_file_tar": {"doc_hash": "a3e61fb1c123a544a0866cae1590acbd50ccfa6687b2bde1c15463c1eb266a0c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_build_py_cmd_bu_get_cmdclass.None_3.else_.from_distutils_command_sd": {"doc_hash": "b1065450584f405e4c58ab87b443417917886b3a0bb0edbf1ef2bdf1b52de9f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_sdist_get_cmdclass.return.cmds": {"doc_hash": "8282207f276c680678f0dbfda07ca1696ba9602de8b75e07d62c92859164fa4a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_CONFIG_ERROR_INIT_PY_SNIPPET._": {"doc_hash": "d06ca46b62d1ae35674a46018618ee9c434fbfd9d1553dd0feba30be65ed1f96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_setup_do_setup.return.0": {"doc_hash": "dafa8d948287bbf959a0eb0fec748627094aba6b07cb77a7ef8f10a1ccc40ecf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_scan_setup_py_": {"doc_hash": "f5115215df36da299eab13a45e7bd03aac9f6b37b4275df1f509c308dfdbce03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_np_register_chunk_type._HANDLED_CHUNK_TYPES_appe": {"doc_hash": "aba9adbecfb8abb81316767d31df88c43c1ab720cdcfa47d4d914d380fdd4d3f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__should_delegate_check_if_handled_given_other.return.wrapper": {"doc_hash": "2497344ac9555bcf6332a58690a415c6cdee2e9ba6e7dc3a7c404f69cec21929"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_arg_normalize_arg.if_is_dask_collection_x_.else_.return.x": {"doc_hash": "43155acde636f11f7af9b1388b85e83bf8a9cfe1db9dd69c02ba08a483495c63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__pass_extra_kwargs_map_blocks": {"doc_hash": "e349936726be745b734884c57aa04b1f19ddbd26472eeae53e79787208cca24c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks._Map_a_function_across__map_blocks._Map_a_function_across_": {"doc_hash": "840eba2c7701b27331177c19327f32c64c4b13bfdd1a0075dcfa468ecd6f6722"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.None_11_map_blocks.None_11.extra_names_append_block": {"doc_hash": "bda300357eb1ce95e39f1205f4b4aa994647761c1d1bea8be4297cb7f60340a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumsum_Array.cumsum.return.cumsum_self_axis_dtype_": {"doc_hash": "4935593e08ee2e45515920af95a94061edff285e41314a035e2d5386eb08260a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumprod_Array.cumprod.return.cumprod_self_axis_dtype": {"doc_hash": "bc9ecd5d76374ded4d1f51b79ea4f6c001a09e74e4b52c2e93442413b544f86a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.squeeze_Array.clip.return.clip_self_min_max_": {"doc_hash": "c1ba40e8fcbafba1a277e30807855986808eaf70cfd98f10593e7c36a5af4f7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_npy_stack_from_npy_stack.return.Array_dsk_name_chunks_": {"doc_hash": "73af496b4f74ee71de19c3140a3a80da12ec7088ab66e500fe1ec62f23e9b383"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_empty_like_empty_like.return.empty_": {"doc_hash": "ef7b9a57a53e7bb64ace4ce4a237f3b3fd7d8a04bc90311accd4dd342f9bc91d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_ones_like_ones_like.return.ones_": {"doc_hash": "2eaf8f35bb5cc34430fba44166f8c3f5bd92748bbd4b8f47e51bf04fed563526"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_zeros_like_zeros_like.return.zeros_": {"doc_hash": "124f18e83e1dc1fba32efca6c78cd63adbdb31d7c1bfec09d7d51db760386b44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_optimize.return.optimize_slices_dsk_": {"doc_hash": "cd52a0b7e0f807928741ef607ba0a73c45adff555475f4d47c9313937e0fbf0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_add_dummy_padding_map_overlap": {"doc_hash": "612b8abe941f06e60865c50502124065bde45ce8def336185d9c061fcfca7159"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Map_a_function_over_bl_map_overlap._Map_a_function_over_bl": {"doc_hash": "38ab6fc848abf242287b04d94c15fe6a5e2214f38f1780b86c05a328f519065b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_chunks__get_chunks.return.tuple_chunks_": {"doc_hash": "372e26eb008e854b5a53648e44fd5f16b4776ba97d5dffb45f3445d2d298e1bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__balance_chunksizes_": {"doc_hash": "2e872e3321b9892cfaba3caf61fa70f46a93a47f308aa3c3f7d1ef1580f4c737"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__prefixscan_combine__prefixscan_first.return.func_x_axis_axis_dtype_": {"doc_hash": "5642efaf6ac9cc027cb75412af57fcdb1736928864aa55fc0a7b4a1774cd3996"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch_prefixscan_blelloch.level.0": {"doc_hash": "b7fb94f4d5e5bc18235e1ef42f8e0be61f35d251185dd9de5bb952256828c76d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch.if_n_vals_2__prefixscan_blelloch.return.handle_out_out_result_": {"doc_hash": "c924e405627651bf6ae738d58ec5d6ede7eadd3c8407422c0e4887d1657fd0e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction_cumreduction.for_ind_in_indices_.dsk_name_ind_m_n": {"doc_hash": "8046a6a7ea53f13ee7f95976abf663e5e493524729d04cf141e6ba9f796ef822"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction.for_i_in_range_1_n__cumreduction.return.handle_out_out_result_": {"doc_hash": "c045ff583cd01e3f37a9bf0b38ff6db92e9b79ca4266a7646d08111e2103cdb1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__cumsum_merge__cumprod_merge.return.a_b": {"doc_hash": "52a74e4ab75aa7dd92a2e8aefeb798b653f24006d7371565323c798d3cdb65f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumsum_cumsum.return.cumreduction_": {"doc_hash": "351f11747e255c52ac880ebadd4b81705ca02c225b0484c05e15394601b4c691"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumprod_cumprod.return.cumreduction_": {"doc_hash": "17b771f61f09e3189e3f46d0f19cbe281f089db64cf500147cb1a1b075e584ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_chunk_type_test_direct_deferral_wrapping_override.assert_eq_res_2_np_ara": {"doc_hash": "5123ca2dcd7d6c0b853e40a2f90e11c18ca3e3881afa5f209e6d7b8c94b317f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalarThatUnderstandsArrayOps_UnknownScalarThatUnderstandsArrayOps.__array_ufunc__.return.UnknownScalarThatUndersta": {"doc_hash": "59ce974e17c9bd879227f783fed799a50e07950b9e3d20f01ed799aaf8ca1d38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_delegation_unknown_scalar_that_understands_arr_ops_test_delegation_unknown_scalar_that_understands_arr_ops.assert_type_np_multiply_a": {"doc_hash": "3b51896941b3ff6c34e771c4f8e5b26aa6b966a7d15dc0b5d225f288d5f4747a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalar_": {"doc_hash": "9ed5704e001f0ea35d97176552bdfe08f0e3aee778f39ef406dd66c5c9679d84"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lstsq_test_lstsq.None_6": {"doc_hash": "62ceec5fb464870c839488d0c97d00b58b941810e12a6a863fb1cd1e4787f06c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_correction_test_svd_flip_correction.None_1": {"doc_hash": "67710b70cdda87f5ca574a1f965b7522990900fea789d9edb335de5e1ce43d27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_sign_test_svd_flip_sign.assert_eq_v_y_T_": {"doc_hash": "16baae31b26f9fa34f61cd66d19445fc1c253a3d9de48a3a4662b9e0b5d073cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_disable_lowlevel_fusion_test_disable_lowlevel_fusion.with_dask_config_set_op.assert_eq_y_1_3_": {"doc_hash": "a94dcd1efa8ee92fd4a6b80abbdd373220d3cc38f273062f59c69cca50199e8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false.assert_z2_shape_10_": {"doc_hash": "6679a1bf474d97bb5b2b3c21e8e1c46394848e8e5a56082aa9b4f3c58548ec09"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_deprecated_signature_test_map_overlap_deprecated_signature.None_2.assert_y_shape_3_": {"doc_hash": "b565d2c41a37348fa4583fe305861171978bce5f07e880898860fea71e95aac1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_heterogeneous_test_plan_rechunk_heterogeneous.None_4": {"doc_hash": "8591fd2eac255483334076f50b1bb0cd31943501bed59bad8768f27973e8d11a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_test_rechunk_bad_keys.assert_100_in_str_info": {"doc_hash": "6324a4711e1907a87fdaeff11fcb46b241133df043077753555a3a3b9fd6b9e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_test_balance_chunks_unchanged.assert_balanced_chunks_0_": {"doc_hash": "d71bb9e395d91e66180b38c02b1184c1bce35baaaee7f3a8c7385a6937d8c926"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_small_test_balance_small.None_3": {"doc_hash": "42b5e2038625331085bbe7c92d744499fb99e22c98b1aae49f894d062f0d171e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_n_chunks_size_test_balance_raises.x_rechunk_chunks_arr_len_": {"doc_hash": "bcc5eca8c9e72e772ce4a330a5d63c3ab258586175bd3c205d9a3113ff3db640"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_2d_test_balance_different_inputs.assert_balanced_chunks_1_": {"doc_hash": "b2e8828ff34e6631cd87b1c277730719a1ccef55337901eae0914a3870796e10"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_median_test_median.assert_eq_": {"doc_hash": "cc105ed6f5db2cca5b2d80c8dfc77c661f3c7bc6e6a2ae21115249502798ad91"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_unknown_sizes_test_reshape_unknown_sizes.None_1.A_reshape_60_1_1_": {"doc_hash": "2cc45ef107777dd432f297009f61f4d1db2025443c3b4ae7af12c222398d3298"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_chunked_no_merge_test_reshape_all_chunked_no_merge.assert_eq_result_base_re": {"doc_hash": "b14bcb58ee1314562663166b9fbf60c404ea0efdbb776b537d220424f9847120"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_test_getitem_avoids_large_chunks.with_dask_config_set_ar.None_2.assert_result_chunks_": {"doc_hash": "947c8f1b7e833c632d6027042b05a9002151418683d89c42ef9f996ec5bd93f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_missing_test_getitem_avoids_large_chunks_missing.with_dask_config_set_ar.assert_eq_result_expecte": {"doc_hash": "0f798bc0fbe41b0927fc585345c8c4785957d947d0e19389a55f742a4f2a459e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_avoids_large_chunks_test_take_avoids_large_chunks.with_dask_config_set_ar.None_11": {"doc_hash": "40e36d560d51c1176b212c7dc0589bbab0936c4f6cb09a7882c780a8a3a7a8ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_uses_config_test_take_uses_config.with_dask_config_set_ar.assert_len_dsk_4": {"doc_hash": "69fd456a10d72b38a7bb4ddfea39a4e06d0ac59da6809e4f1a37b68a67e68127"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_draw_sizes_test_draw_sizes.assert_b_c_5": {"doc_hash": "65295bbc5fb2f8bd2a0988d3885de43ced97f71a04ef9a11bfb856717bc223f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_too_many_lines_fills_sides_darker_": {"doc_hash": "a4ee4f854a87e61871ef292f566d16c4ee524189d0cd9c9e5a5ad00208117798"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_gc_test_bag_groupby_normal_hash.assert_even_0_2_4_": {"doc_hash": "11f87df43cf0f547f6447635781c756971628f75b8a70eb452e3258c5483fe25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._dict_Blockwise._dict.return.self__cached_dict_dsk_": {"doc_hash": "0b52ee6d318133139f2ba8d79850c7e9e45800166816b01e7fae98a171248d97"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_find_all_possible_keys_find_all_possible_keys.return.ret": {"doc_hash": "1338ed02e37aabcf8765dbd0571eeaf948824ea96be208a8a38959d3b751c2d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.nlargest_Series.isin.return.super_isin_values_": {"doc_hash": "a15d320ae2d1bb2afb33edc7ec114605f0bf5891cb18b186385f9fb38aaa65fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_list__build_agg_args_list.return.dict_": {"doc_hash": "0a2d635ccc4ca59d9b19028725441fcef6352c1d7db46780c2f267b268cd0864"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy_SeriesGroupBy.__init__.super___init___df_by_b": {"doc_hash": "ff65b0ae55ac57b2baffa2be5a7ef77645d4d27a443b3818739ebcbfb81820bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py__Implementation_of_Hype_compute_first_bit.return.33_bits_sum_axis_1_": {"doc_hash": "c1173253d7e27c51fa855b37a69d39d3e62803ba46aa8a49f5a4a7a6f660a526"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py__meta_from_array__meta_from_array.return.meta__constructor_data_c": {"doc_hash": "ae10bbe36444859dc2bea57a5cbda7d5201fb882ee72c9cf143aafa94fd0cbdb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql.if_not_isinstance_uri_st_": {"doc_hash": "2c7be6e12257eadba650160f7ac6343e508658dc532b76125e87bcf5ad136355"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_with_null_partition_test_divisions_with_null_partition.assert_ddf_read_divisions": {"doc_hash": "99bb17dd5569a102ce88c80357e3971a73e5868efa24a6035198830aff95b8ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_passing_engine_as_uri_raises_helpful_error_test_passing_engine_as_uri_raises_helpful_error.with_tmpfile_as_f_.with_pytest_raises_ValueE.ddf_to_sql_test_engine": {"doc_hash": "eadbb84bdd1fefed74deed21f49cb924f9bda130abd8b2d236e002043a4eef3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_meta_test_meta_no_head_rows.None_1": {"doc_hash": "f584e1542ba8914decb77c8f9036d4e7fbc9e1b2ea8e9920082f714aaa1152de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_meta_no_head_rows_test_datetimes.with_tmpfile_as_f_.assert_eq_data_map_partit": {"doc_hash": "a93cedd3ad64904d2bda199967c8b163c172055831596694123788a803b0a5e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_disk__noop.return.x": {"doc_hash": "1d889f9e71feac6df004dacb620944507599bcf52e40c68947679f7e122031aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks_rearrange_by_column_tasks.max_branch.max_branch_or_32": {"doc_hash": "c6644431e368ac6a145dfd3f811253acf2609438c392450a6ccb498b1c23ab41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks.if_npartitions_or_df_npa_rearrange_by_column_tasks.return.df2": {"doc_hash": "9bd3d444a728ee35661880f36bb14d6658622406a16bbb9a64dcb93bb44f4f67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_groupby_concat_cudf_test_groupby_concat_cudf.assert_eq_res_dd_compute_": {"doc_hash": "22022662bed3ba09da76932056b67e1dfa300065f92abf24963b26585b08bed1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_columns_missing_from_left_test_categorical_merge_with_columns_missing_from_left.assert_assert_eq_expected": {"doc_hash": "4bae9d35b02a06fb413528baaa50becdc13cc5145f461a7f2f1fe54269d22b26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts.assert_assert_eq_expected": {"doc_hash": "ac931ac42c5f1b99a784f173554eef628c9a6e496e8295f501553589207788cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_test_shuffle_hlg_layer.assert_dsk_dict_culled_": {"doc_hash": "bb04c532c128c4ec0fc7abdd812916a98e8cbeecf9b1e37cc41aa588bedb850c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._toposort_layers_HighLevelGraph._toposort_layers.return.ret": {"doc_hash": "110f207607e92a49e60c4c4dbefa4e714becefde295afa99c1d1c644b158f6ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.cull_HighLevelGraph.cull.return.HighLevelGraph_ret_layers": {"doc_hash": "dc1b6d2f23d9a94c21dfd816f0497448704193d1b6f2dfff5160a2f94750a0ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_SubgraphCallable_with_numpy_test_SubgraphCallable_with_numpy.assert_f1_f4": {"doc_hash": "7f9eb9e1aa534bb0ad2a32cf553c6f981d8d11659ab93f8d11f1638d570e2443"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_dense_dense.return.d_x_height_1_i_": {"doc_hash": "80af90ca973990b37a33dfc86273c305c24283747bb31eb0e7ceec742922793c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_np_": {"doc_hash": "fcff97454fc8c3818fa8c260464cd1e2deeaa78a93cd0b5ee5682baf2b2fb3b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_ensure_minimum_chunksize_ensure_minimum_chunksize.return.tuple_output_": {"doc_hash": "f9673e024a7fc70fa5d6dafe11a2b00cc7699cd06857a4ec9f7f27a30aa2db24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_depth_greater_than_smallest_chunk_combines_chunks_test_depth_greater_than_dim.with_pytest_raises_ValueE.overlap_darr_depth_depth": {"doc_hash": "052542a2aaaf2befab7a04e0aa6028286d124683781b7cb1522804d8105e6f06"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_none_boundaries_test_none_boundaries.assert_eq_exp_res_": {"doc_hash": "b2a702c7dfc0c0507fcfc1af342445fd225e8f6886eb41db6628c24557ae6eee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_rechunks_array_if_needed_test_map_overlap_rechunks_array_along_multiple_dims_if_needed.assert_all_all_c_2_for": {"doc_hash": "d4b20a6b9053c9cca777f4a76c8f0d2360b645f2c79a4326a4ddea030e7ee230"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_chunk_read_file.with_fo_as_f_.return.list_reader_f_": {"doc_hash": "2edfe75657b76c9e2289372289e716286d59aaef6691dd8abfc62dc8861ee36f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_cupyx_register_cupyx.tensordot_lookup_register": {"doc_hash": "192fc32a267a6229378fc0b4ff32fe9dd43b0a8868a92e5a24ef29d8e9eac7b0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_sparse_register_scipy_sparse.tensordot_lookup_register": {"doc_hash": "5848d19ffbbf011f2cc8295c68129338bd5029e281f4fd305c620e3486058d6f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py__tensordot_scipy_sparse_": {"doc_hash": "f934a3fee3f0f0e767194d224c9f6385f8db80b90cee0a83d7125b89a53cf215"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__reverse_lstsq.return.x_residuals_rank_s": {"doc_hash": "1ddd810923178a7899614be40654d687463467e847010da1b965a11b5054f39c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_tensordot_tensordot.if_concatenate_.else_.return.intermediate_sum_axis_lef": {"doc_hash": "09df4fee3cdb4947b39e110b210e15cad774fa4605bb163b6cff906f34b5a848"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unravel_index_unravel_index.return.unraveled_indices": {"doc_hash": "17557d8fc58d25a1d6c38f31f430d09d4fc9aec045d03f69a97163c3d1e74862"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_optimize_blockwise_annotations_test_optimize_blockwise_annotations.None_3": {"doc_hash": "aae98ef2fd4c0920d933da9d03a471912dcd8b0a8cde6e07bf66ba26c13413d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_compute_test_svd_compressed_compute.assert_eq_v_vv_": {"doc_hash": "dc1f6dd93f67b57b953bd873582d1f9e787a60786c20688c166b7756e66b57f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_test_svd_compressed._v_must_be_orthonormal": {"doc_hash": "3bd4cc31f426d6a2c4342fcd7ea4e671031b5a26d051a5180f815e37ba077ae5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_dtype_preservation_test_svd_dtype_preservation.assert_u_dtype_s_dtype": {"doc_hash": "8d0df05a67f018df18016ed91d7e878af3d97243d24dea106b2a7e5c5035ba23"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_deterministic_test_svd_compressed_deterministic.assert_all_da_compute_u_": {"doc_hash": "a14511e15daeeefdc44779c4320e7df1d67817c694a14899d29ecedd9ab4ee9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_double_dependencies_test_fuse_roots.assert_eq_zz_z_": {"doc_hash": "5001ccb92299f21fcfe271163bdbfb6e7ef48892e12446c4fbe03f6ba418b9a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_test_overlap.u_depth.np_uint16_2_1_": {"doc_hash": "e7689085a271c2266e3550b9ea5b4dc5db383fb4267f3478726ec7bf8162bb1e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap.u_depth_5_test_overlap.None_5": {"doc_hash": "b25a8f01c3004ff3397eede2b1c85ba65ccf77a8c221b01954f7147d6a54ecdf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_not_chunked_merge_test_reshape_all_not_chunked_merge.assert_eq_result_base_re": {"doc_hash": "d31fbb6ea72155ccb33c84abeaab0f68d42cd0a0117729de3e8a2aa09263e64c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_merge_chunks_": {"doc_hash": "f9222e24a5dcf57cdffd0ffdb7372720b1e44ea7e0065c77a119059234b9fe41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_method_test_unravel_index_empty.assert_len_d_indices_": {"doc_hash": "ed29659c2f99075ffe1129ed21a9a2a23cc3b9930696a7560e34b5dba8524173"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_test_ravel_multi_index.assert_eq_": {"doc_hash": "4df5e8305830487102aa59c7a4ac7c79c294ce30bf0aa21dcbb84b50ae4431e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_nan_raises_test_nan_raises.with_pytest_raises_NotImp.func_None_nargs_na": {"doc_hash": "d6223b625cf7c885c4edce87827cfb1f627c2b36a1a5bd3312851590fa307ca4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_test_sample_k_equal_bag_size_with_unbalanced_partitions_test_sample_return_bag.assert_isinstance_random_": {"doc_hash": "a80eb867ea03311f1f87da4ccfbfdd1b412de3b7872d5ac4be2a35ea131fac45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_test_partitions_are_coerced_to_lists_": {"doc_hash": "8bf4f70734c6e1f7fafc06cb91944840ac139632a6e1d5d73be5d3bedf617b2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_annotate_annotate.with_config_set_new_annot.yield": {"doc_hash": "d5d1f8c458d3c7dd70ae8f656f1110fbf02bc1f04e18326ea8fa5d5b5b227271"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_is_dask_collection_DaskMethodsMixin.visualize.return.visualize_": {"doc_hash": "6c16d90537e08d5f8ec834321d408a3b5c51859f120ef61a05de2993b8ba7e0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.get_output_keys_Blockwise.is_materialized.return.hasattr_self__cached_di": {"doc_hash": "d80d25fd260f06ddf77956f88be7497625892fee31591d8c34fd1669d1aa11c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._cull_dependencies_Blockwise._cull_dependencies.return.key_deps": {"doc_hash": "e571183a2d6baa55b3cc9e68fafb7daedc9a1cc6cb080fefd4087035699754fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__get_coord_mapping_make_blockwise_graph": {"doc_hash": "971091a61c0ffcf5a924fdb34c477ac8b511592f6fa3558257c93a5c6cdfd492"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__make_dims_": {"doc_hash": "8c4deb59e9cf9acea90733b94506cd5e2546272ff991e56520b757deb8dbc3e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_check_deprecations_check_deprecations.if_key_in_deprecations_.else_.return.key": {"doc_hash": "3f9effa0374b3ea8d07361e4bd7ff48f7da75379cd795b3e07f53aedb980bba9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_serialize_": {"doc_hash": "7eb4945820edfe7dbd84cec2d71fd10c92f2c4d53fafe89d7ec86ef4aa50f24c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.skew__Frame.skew.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "d201db26e4f3a9ee96b04a4ae5d09946619f9caedd0980b9934a2040da2020bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._skew_1d__Frame._skew_1d.return.new_dd_object_": {"doc_hash": "e97c31c1b93a661b843109d7b36a3b6bc857dcc45e0317bd9427ee18db6704a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._skew_numeric__Frame._skew_numeric.return.new_dd_object_": {"doc_hash": "c1b60f6d47fe55bcecfcb545913fcb7a1bacfb15feb68a943adc6a4f67d1527f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.drop_DataFrame.drop.raise_NotImplementedError": {"doc_hash": "cd1a752826cd0519ee0ee6308e5e106a29b3ac1dfa30761d11b9985b506c00de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.aggregate_SeriesGroupBy.unique.return.self__aca_agg_": {"doc_hash": "54f474d1021430a33cde7ac253e5e9a66310df80d8431d8ae2475d9e473022f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__unique_aggregate_": {"doc_hash": "5e339e8db2db6811a792bb7d016fa4ea79844b3e4d4261d5d6ef4a09f76a579a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas_read_pandas.if_compression_not_in_com.raise_NotImplementedError": {"doc_hash": "8daae204f58334c212f5bd416070317c7cd93f5ca607e4a9fb28ae0a14ed4e52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas_from_pandas._": {"doc_hash": "4816e5f804d9cb33f0e09333ada8c238fac7e10b24c1f6ad77faf27749c28b08"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas.if_isinstance_getattr_dat_from_pandas.return.new_dd_object_dsk_name_": {"doc_hash": "6f6dc8fea6bc0e3aca15f74c55e9db3c2efdf30442b486071a2b86aceddc4e40"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py____append_row_groups.try_.except_RuntimeError_as_er.if_requires_equal_schema.else_.raise_err": {"doc_hash": "2136a10a323f59d456a2839de1cbae6259a57424953606bce0d7112794cef81b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__write_partitioned__write_partitioned.return.md_list": {"doc_hash": "e10f04a519e83170e42c32b1ff55684685b32bb0d6ea7043f1f24d62893d49e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__index_in_schema__index_in_schema.if_index_and_schema_is_no.else_._No_index_to_check": {"doc_hash": "6bfc0e4bcb48d1cdb5d9a25566a1c9cb6a4d57a3cba479d92e9813acc73ea79b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_PartitionObj_PartitionObj.__init__.self.keys.sorted_keys_": {"doc_hash": "cb8a5d959a5b8f7f3c8c7e6e02be6da6b4c67e868d23799b753c0ac7251f4c33"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__get_rg_statistics__get_rg_statistics.if_subset_stats_supported.else_.return.row_group_statistics": {"doc_hash": "3e082043215f9504e67565af4dd762970638b6622ae769d631f666ae201c8342"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__need_fragments__need_fragments.return.bool_filtered_cols_part": {"doc_hash": "5e37316795e1d751fe4a97a0bf152fe59163df00b2ec30292f7c1cca0c6f28d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.read_partition_ArrowDatasetEngine.read_partition.index_in_columns_and_parts.set_df_index_names_issub": {"doc_hash": "5dae66a80356eba877849e32430e23d296475175d53cc5d42fe8abf8559c111c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.read_partition.if_not_index__ArrowDatasetEngine.read_partition.return.df": {"doc_hash": "1884fca729fbb2c88e24e52903cf248ef5b4003bcff41a59c29c6c6758962a62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._pandas_to_arrow_table_ArrowDatasetEngine.write_partition.if_return_metadata_.else_.return._": {"doc_hash": "7be006d32180d9e9f6f8478dd2efe8078cbd172b4f13d4eb74d49c344d93fb8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.write_metadata_ArrowDatasetEngine.None_3": {"doc_hash": "0e863d7e87ce121554b890a863fe037df83e7d8ce15650f291e88aaaf0b077e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_None_7__get_dataset_object.return.dataset_base_fns": {"doc_hash": "47edea390fe9bfe19626f35d8921eb34f514a9b36e0a7df92000c61571bda68f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._gather_metadata.if_dataset_metadata__ArrowLegacyEngine._gather_metadata.if_dataset_metadata_.else_.return._": {"doc_hash": "32b45c63ed1f3f1de8e8ef2a831593ddd0da0c35b09f1a2779708a9f886d8f31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.collect_file_metadata_Engine.collect_file_metadata.raise_NotImplementedError": {"doc_hash": "fbc17aff8eb79500002ed1a9e9618b0de0c25fe6809d90e1227ea897c66d71d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.aggregate_metadata_Engine.aggregate_metadata.raise_NotImplementedError": {"doc_hash": "d86453942cb47bb133164811f79386c880526fd46a32b122420425a587752a8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_with_multiple_partitions_per_file_test_read_csv_include_path_column_with_multiple_partitions_per_file.with_filetexts_files_mod.None_4": {"doc_hash": "c5a2f2076e710b98adcc7cc9cae2cd35c1c90a3b022cd44182a1b35fa2e1cfa3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_compression_test_read_csv_compression.with_filetexts_renamed_fi.assert_eq_": {"doc_hash": "127700329445e27284b2392410b720a6f335be4f269d67c0b9b86a58400b9848"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_engine_write_read_engines.return.pytest_mark_parametrize_": {"doc_hash": "a8187f5cb9e98dfabbe6725ee97e68c848c9291c73c343691f30b214ef8817a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pyarrow_fastparquet_msg_if_.else_.fp_pandas_xfail.write_read_engines_": {"doc_hash": "52e2398df3439c5664d9d1bf8767939c297e521d044d5ebb18333535716e8cd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_local_test_local.for_column_in_df_columns_.assert_data_column_o": {"doc_hash": "faa414dbb77a0aebc77a1028d4d807a1ecd3dde9cec56aed2abb5286e3a198bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categorical_test_categorical.assert_df_x_ddf2_x_co": {"doc_hash": "906751dc826661cb348f59b97e27012cca96a4fb6c5e71b32bd2ee45c3a63fac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_unnamed_index_test_categories_unnamed_index.assert_eq_ddf_index_ddf2": {"doc_hash": "b719a958626ec63b98dfe6b0d758e06b89acf24ae8777d12daf02990c60be248"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filtering_pyarrow_dataset_test_filtering_pyarrow_dataset.assert_eq_df_ddf2_comput": {"doc_hash": "8c9d8a5d24089a04115440882a12aea53629acc84f802113d7ae395d744c06a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_no_meta_test_read_glob_yes_meta.assert_eq_ddf_ddf2_chec": {"doc_hash": "dddfb62fe428bf18b4a3dfa72db667f3a8831c8981ff7a85febffcb39efba543"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_multi_test_getitem_optimization_multi.assert_eq_a3_b3_": {"doc_hash": "4149c12ba8097ed6bb885f3f79231c488140a06a8edb4e4a6da492ac323c7f25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_getitem_skip_when_getting_read_parquet_test_read_parquet_getitem_skip_when_getting_read_parquet.assert_subgraph_columns_": {"doc_hash": "1af8ef90554c6470b2aadb85ae4427deef8502a6c54fa867fc4b4fb2501081ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_multi_partition_none_index_false_test_multi_partition_none_index_false.assert_eq_ddf1_ddf2_": {"doc_hash": "b94d81595d61da26d0d96afbbbe0a5734a16479f87d4f41187f09cc98487333d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_simple_test_pyarrow_dataset_simple.assert_eq_ddf_read_df_": {"doc_hash": "9674b87934c156176969c24dc26267c16c170c12b2fc1ac6923ddde054806fe0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_partitioned_test_pyarrow_dataset_partitioned.if_test_filter_.else_.assert_eq_ddf_read_df_": {"doc_hash": "cc4f95ad63d9e29897d22a822af17db09d9e23e1233c220a84ca466a06834ae0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_test_parquet_pyarrow_write_empty_metadata.assert_pandas_metadata_ge": {"doc_hash": "49d81c37cdb8ce9103a3c08513f985ad9bb41f81fef0531145babaf124c94ce9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_append_test_parquet_pyarrow_write_empty_metadata_append.df2_to_parquet_": {"doc_hash": "08408adbc0b4c5400de1ff9813e798689e12de28d3185999605d002f569a97d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_create_metadata_file_test_create_metadata_file.assert_fmd_num_row_groups": {"doc_hash": "01a091dfab8badf90a46afaf51bfbaaaf3210e7e11407af702599a082d424840"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_write_overwrite_is_true_test_read_write_overwrite_is_true.assert_len_files_ddf2": {"doc_hash": "207ca7efd55fbff436745787c4f5961afe55e10a07a8c94f66565c4189acf7c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_write_partition_on_overwrite_is_true_test_read_write_partition_on_overwrite_is_true.assert_len_files2_len_": {"doc_hash": "e0a100daa1afe15ce42dba87f61fd96d4c7bba668a898e6eda8eb7b489972e95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_overwrite_raises_test_to_parquet_overwrite_raises.None_1.dd_to_parquet_ddf_tmpdir": {"doc_hash": "416727bde12e431c5f47891219e2e2e5b24714f8d14dee74ace6d73fb6d0d813"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_dir_filter_test_dir_filter.assert_all": {"doc_hash": "01452a675cf148b8d37f59a3cb4368ab4533c457b006e319da338143491be385"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_decimal_dtype_test_roundtrip_decimal_dtype.assert_eq_ddf1_ddf2_che": {"doc_hash": "96d26a4d83bc2768519adb47e1b2324b4939e5d267ea4b0ff2244ca530613522"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_padded_get_unsorted_columns.return.order": {"doc_hash": "973e3d40b90c2f9f6e3629fcecc533db615306c13a5afb07d5a8ff5f99f9b37f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_sum_intna_test_divmod.None_3": {"doc_hash": "381d3146faf88592815355f0f16ab248a1c6e69ef8d3a0483ebb1739774df9e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_with_agg_test_groupby_dropna_with_agg.assert_eq_expected_actua": {"doc_hash": "3ce35d5887536964a1202640c791b7ce9e492fee43cb81d1dc9723582066698c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_observed_with_agg_test_rounding_negative_var.assert_eq_ddf_groupby_id": {"doc_hash": "94d97034b01ddd95dad5b4a40e88c37f86d82ed931a1c840d2d087226dd6af8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_split_out_multiindex_test_groupby_split_out_multiindex.assert_eq_ddf_result_ddf": {"doc_hash": "daa6ef0b3cc04e2fcba03bcf903c05a1296b766744804229e272be11a0b82c7a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_true_split_out_test_groupby_sort_true_split_out.with_pytest_raises_NotImp.M_sum_ddf_groupby_x_so": {"doc_hash": "cb7c28eda3c9010c50c256e65e2f9621ab1d5545bebffa789e67d997059a5792"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_aggregate_categorical_observed_test_groupby_aggregate_categorical_observed.assert_eq_": {"doc_hash": "329554e375adccb428731490845b640b24670bd77f5e6c855c3e35232241ce73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_timestamp_str_test_getitem_timestamp_str.assert_eq_df_2011_2015": {"doc_hash": "d4285d03337f09be2336cfc8b6e5660aaafc93eb00543d69f94fa78bd2dc9667"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_period_str_test_loc_period_str.None_5": {"doc_hash": "7de4f82cf1b1fe9f36757efec09773256e6d2bcf75419e988fe732d2cbd12971"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_cov_test_rolling_cov.None_1": {"doc_hash": "3d314e9a914c6e929e997397a01613220ce591871813463a47c2caec39849ee1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timezone_test_set_index_timezone.if_PANDAS_GT_120_.else_.with_pytest_raises_TypeEr.d2_divisions_0_s2badt": {"doc_hash": "ec749278d6877e878e9757ec3966d7d087b5aef00af13e38d0bfe160df61ecda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_npartitions_test_set_index_npartitions.assert_output_npartitions": {"doc_hash": "aa11a3a5d3129b1e4eb6d348a9f4e69b771ea667218dd5e893bd78bd95c2c366"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_datetime_precision_test_set_index_datetime_precision.assert_eq_ddf_df_set_ind": {"doc_hash": "66d01122cfc4def1b00662835d48590cec91b52713f3827c7b31ee8aede0133a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_overlap_test_set_index_overlap_2.assert_ddf2_npartitions_": {"doc_hash": "20b5a1e34d4775d06a2a11e28c0ba56cabaac8c1d44a55693fdb9bcec107a28d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_is_dataframe_like_test_is_dataframe_like.assert_is_index_like_wrap": {"doc_hash": "300dab3d16b98234aac1bda1706084b04b78dd85c92df333eb5841d413d0cb0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_ProgressBar_ProgressBar._timer_func.while_self__running_.time_sleep_self__dt_": {"doc_hash": "c2a3115f33eae90bd939a09a2654ac9f27a2b4ae978517dd39c1dc123537f20e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_ProgressBar._update_bar_": {"doc_hash": "97bccb740e126756de98a8afbd3c6c278b777f23c4f36950952df6fb7c002dc7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_None_2_order.init_stack._": {"doc_hash": "b138b5445d9b49595ca37622048f9f65c8569b55da5277f70485bae662f363e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order._initial_stack_key_cho_order.dependents_key.return._": {"doc_hash": "99c2359c1aa62e9b335a47e817a80b4a4ff05030bbdeef0c9247879047310237"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.dependencies_key_order.dependencies_key.return._": {"doc_hash": "76f2e446cea34e9b48fce530824c2f1f58044e2c2d458692c9b2e589a13ccee5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_schema_is_complete_test_schema_is_complete.test_matches_config_sche": {"doc_hash": "bbe2baeed07308e0001e3fde107c2533e58b917ec3e975859fff2e883641dbd4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_deprecations_test_get_override_with.with_dask_config_set_fo.None_7": {"doc_hash": "893199b936d9e47b0bd6a4c2f5bcbcd9243a4aa9a7cd6309195fcd49c04fd760"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_pytest_test_with_get.None_4": {"doc_hash": "f3e6a2200d3ce5f8585d7056b2840603170f991789e29ff13dc8a9763d419748"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_zarr_distributed_roundtrip_test_local_scheduler.asyncio_get_event_loop_": {"doc_hash": "4976fe2533f71e20a5304c53840fb64078b84b85f591190459ed8c19ac863dc5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_os_test_basic.assert_all_isinstance_lay": {"doc_hash": "2f22ed054ef54cd14ac4c619d099e345477607416cbe6032a14362f2af4afe9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_with_equal_dependents_test_order_with_equal_dependents.None_7.for_i_in_range_len_abc_.assert_o_x_5_i_1_": {"doc_hash": "b840f269199f96a9f814e32f6ff9bf6e44fb7ed16de6b35308f066c39360ca54"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_terminal_node_backtrack_test_terminal_node_backtrack.assert_o_a_2_o_": {"doc_hash": "5dcc07c586317b5c37d930b9e60e4f61e7b7cfd716f6e3e5417263dd0c9303b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_is_arraylike_test_iter_chunks.None_2": {"doc_hash": "a78ce85d6b8f8337c5ac055e0f3286d524dd33c16307ce21775a1d1098e73e50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_time_ago_format_time_ago.return._Just_now_": {"doc_hash": "2c1addf7c89e6bd205e9a7a303b185b7af1cfb84a7809583243e9cb8f94798df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_key_split_key_split.try_.except_Exception_.return._Other_": {"doc_hash": "c3fa85d026ea8b8ac114d70f27efefe8e0a09f6007c130a1a0bf9b369e4424a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_stringify_stringify.return.obj": {"doc_hash": "ce20802b7806e35d45532f4cf5cc231eb755399a44bff904eeb388fb3c6891b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_stringify_collection_keys_stringify_collection_keys.return.obj": {"doc_hash": "21e38ec229c56e4df02d0cfe01b893a58b3fb9e34cfac62ae8415714b4d7a524"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_getter.return.c": {"doc_hash": "1df4e798a1cbeff45588c636c9470ad9930a60b4cd4851ae7e5c714719dd2f12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_nofancy_getter_inline.return.getter_a_b_asarray_asar": {"doc_hash": "5c6f4446127fb060893cead1d6a67ada5c1daec236df568dbb4a970b8b341822"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_round_to_from_array": {"doc_hash": "d2a477b1d293353025fe1543b0d508d3bd27cd8199f25b31d09d0b8fae11a29f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array._Create_dask_array_from_from_array._Create_dask_array_from": {"doc_hash": "af6bf3f95f1af234823b86da51e20edae3c879a63a0e36a87263b31bff1a88ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__bincount_agg_bincount.return.output": {"doc_hash": "a2e543782996bdf9e094a350d54f6802cec52792de814edc309a53a8d756fef4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_aligned_coarsen_chunks_aligned_coarsen_chunks.return.tuple_new_chunks_": {"doc_hash": "b233797600275070975e8d7f6cacf221a75d47c766d06d35f2714d4fcad92541"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_delete_append.return.concatenate_arr_values_": {"doc_hash": "6fb2dbd24f6f38925cfbaafed430cc1bd6213902158218f349acc71f9f8c67ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__average__average.if_returned_.else_.return.avg": {"doc_hash": "778b9ee6c8ad7c32b0da2e53ce06e83cbb5eac7c789c28e612e9fdf76111ec9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_average_": {"doc_hash": "3779fc1532ab26f19e0c5ddfbe410409df579cbcaf19216d33e6d2d48ccf472d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_test_chunked_transpose_plus_one.assert_eq_concatenate3_ou": {"doc_hash": "7d46a3d9cf7b14e19933ebf7ad852b62fb2f34064bdf923de3bc1ad14ed0c4d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_dimensions_works_with_singleton_dimensions_test_stack_rechunk.assert_eq_z_np_stack_x_": {"doc_hash": "bb69b669238993a127c56fcb838ba059967b54181cfa4125c36568d4b16ccdb1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tri_test_tri.assert_eq_da_tri_N_M_k_": {"doc_hash": "fd4bd18b140ce8f4e803bcd91cbbcea11672f102dd303a08722f319f21f9a17d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_test_ravel.assert_eq_np_ravel_x_da": {"doc_hash": "3d452f97671b29a5be064a8ca07fdb34a4061f9491a4d3a8175c32c882d5b3b0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_1D_no_op_test_ravel_with_array_like.None_3": {"doc_hash": "18e92fb88a57344b0274fad0b8c9e251b714f4ddd0fd53d62bbc4a3dd71e37a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_with_excess_test_coarsen_with_excess.assert_eq_": {"doc_hash": "2b085796cb11751969e465e06f216587bc2a4bb62016489002e049e71d184cd2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_bad_chunks_test_coarsen_bad_chunks.assert_eq_": {"doc_hash": "0291c5c4b878324596f7756b3d549a5ba78d49fa91fcd4954adcf5849a88b6da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_aligned_coarsen_chunks_test_aligned_coarsen_chunks.if_any_remainders_sum_.assert_any_remainders_1_": {"doc_hash": "87c90fbfc12f207a1c8b9f73dbf4594c4e205712a3d7c03b8c5eb1b0de014970"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_append_test_append.with_pytest_raises_ValueE.da_append_a_0_10_a": {"doc_hash": "9af1ea5035f283ff93042cd6a75b86c4624f0d955d49319eec98714b52d0dfbe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_multi_insert_test_delete.None_1.da_delete_a_3_axis_3": {"doc_hash": "9bbe512262003cf9d6a16feeae4b556aaf396843a7e633627a3f21f2632f1b78"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_result_type_test_result_type.None_10": {"doc_hash": "7e8a0d05dcea5be8c04b45e1c631ea85e26ca47495e96b9404965dc1780b4db3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_average_test_iscomplexobj.None_1": {"doc_hash": "cb89393276db49888874921c3a5516ee3fce1d7d4342ff51fe4f0fee2f1c23aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_.None_1": {"doc_hash": "35aabbf3860e9ebe65b5a18c3478809bc5bc416b468b5546ace521b1f1e52f11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tril_ndims_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np": {"doc_hash": "7323ad40706935af871835541a7fb1cc421a03de8b43de43da558af9bf8c3966"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tril_triu_indices_": {"doc_hash": "2893a3b5bba99a4876458b5feeb6f6c454c607ea9d6b424ab0ce39f0478f0024"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__array_like_safe__array_like_safe.try_.except_TypeError_.return.np_func_a_kwargs_": {"doc_hash": "ef991a7d1501063e26b2220f3b6654f6340ffb5f3add5c4c9c52b2dc558d9c88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_array_safe_array_safe.return._array_like_safe_np_array": {"doc_hash": "e8f766970774d5268b5889b5ce2420b99b0f98f252c029a7d05d520fc7021e0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_asarray_safe_asarray_safe.return._array_like_safe_np_asarr": {"doc_hash": "3b05b2cca43b78504fe9ac3bf4e698471bdcec53df7e91bdc1f974629916f153"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_asanyarray_safe_asanyarray_safe.return._array_like_safe_np_asany": {"doc_hash": "0985645e9a5ccad0c8140bd0ca908c2f0f0547ed125bf3cd5a647f7ddb73aa27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_releases_element_references_as_soon_as_possible_test_map_releases_element_references_as_soon_as_possible.try_.finally_.gc_enable_": {"doc_hash": "0eccfa689f1e3c49eeaabc590557289999b60fd01670574fb60e3b0525ee68db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bagged_array_delayed_test_dask_layers.assert_i_dask_dependencie": {"doc_hash": "755eb798feae6f87cbd61f87ce1e14e8eb98bf0b023b0e3e66cb9f9968e52c3e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_get_scheduler_get_scheduler.return.None": {"doc_hash": "ee972d15b96870fc593bed66ae2450e47e93e569bd6308f57014c3b4c2355683"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_wait_get_collection_names.return._get_name_from_key_k_for": {"doc_hash": "8f6c59d103eb1e9ea4a4823a31faea063e373dcb356b0303bda45d063f47d4f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_get_name_from_key_get_name_from_key.raise_TypeError_f_Expecte": {"doc_hash": "53cb81ccda9f49c4e67dc15c6a88679982992d1948c3e1bba086cc8890e46117"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_replace_name_in_key_replace_name_in_key.raise_TypeError_f_Expecte": {"doc_hash": "8720ae4102c83b87d26b56a8d4dfb58666f142a0fda3bd25b8cd221de595365d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_clone_key_": {"doc_hash": "1f665b7ad45b773823586e9e9a6854338c236d5c97f627ba44d0d0c2ceca6728"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__dask_distributed_unpack___Blockwise.__dask_distributed_unpack__.return._dsk_layer_dsk_deps_": {"doc_hash": "97f4d7bec2a2c3fb851ac3dce1e13fc13562fd122b951b1bff8069954e36f364"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._cull_Blockwise.cull.if_prod_out_size_iter_.else_.return.self_culled_deps": {"doc_hash": "3f6cc6e1a8713fabad4cacd0850cfc8e957a31cbedc3971b09307c4f5c05d028"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar_Scalar.key.return._self__name_0_": {"doc_hash": "71155de85223e8caca9e7ad7152185c099fbb5a12e9c6030f23efde1d0cdd7aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar._get_unary_operator_Scalar._get_binary_operator.return.lambda_self_other__scal": {"doc_hash": "25c31b094d741c80ac26d5d773f67d2d16e5924f16b2a3f7d5586bfee5691ee4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._process_metadata_ArrowLegacyEngine._process_metadata.return.parts_stats_common_kwar": {"doc_hash": "bcd5f2a5a0ad328bc053fd88e706bd2bf68edfda86933b1755f17cbc6f827ba6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._organize_row_groups.for_rg_row_group_in_enum_FastParquetEngine._organize_row_groups.return._": {"doc_hash": "c61c785930adf11e99452df76d1737acef3a70c846a62015d0159d9c113a6e0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._make_part_FastParquetEngine._make_part.return.part": {"doc_hash": "941e99a94aea860b98a9850a15329dcf1554f4a1779f9cc529190b737e782583"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_metadata_FastParquetEngine.read_metadata.return._meta_stats_parts_inde": {"doc_hash": "9c6a4caff2a77aa508304fb2ba91559b3ec30ed0a2fe552ef2a406707c3f490c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths.path_parts_list__flatten_filters.return._": {"doc_hash": "c286f015e62fbb54049b30f3244ed55149274db7262da57a0b9b07b533b22e27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__aggregate_stats__aggregate_stats.if_len_file_row_group_sta.else_.return.s": {"doc_hash": "3a5f4d8a58b52ab5144977b28e3f590edafdaaba905e7ce4eea561d83a7bdc85"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_test_split_row_groups.None_3": {"doc_hash": "7fee0723ca31da28eb74d8be60abd724bc662f9c565536d13350388ce8781920"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_int_test_split_row_groups_int.assert_ddf2_npartitions_": {"doc_hash": "ff9dfd2f14ca0ce56b190bb383e6bb2bc71cff0f92febd9ec6d061751076e56d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_filter_test_split_row_groups_filter.assert_eq_": {"doc_hash": "0251398535c3be718139484351c61e0c0622edc29a2e450cefe389c664254fac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_filter_partitioned_test_pyarrow_dataset_filter_partitioned.assert_eq_": {"doc_hash": "1340c41aac788dfe6caed74ed4abe8ee3ae6f178199a0b6c030ce8cc8afeccbc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_axis_test_series_round.assert_eq_s_round_ps_r": {"doc_hash": "cbb683a4f2c72132445fe72b557c1fb8fda2614a9f8ea33fa904a9d88273a73c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_test_repartition_npartitions.assert_all_map_len_parts": {"doc_hash": "a7806a52b0f37d0d8823913fe6d6b2d5643d1bf71f291e8c312b5cc1ab482366"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_errors_test_map_overlap_errors.with_pytest_raises_TypeEr.ddf_map_overlap_shifted_s": {"doc_hash": "9a7d7bfae14eb33e78af5d1e4b08b3b140d38f0f153eb2d2d67466f2a10df421"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_apply_and_enforce_message_test_apply_and_enforce_message.None_1.apply_and_enforce__func_f": {"doc_hash": "0fe2d3e9add98f2de2be12761901ad7b9fefde57dc4e7ec516045bd8ec19edcd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed.__setattr___Delayed.__setattr__.try_.except_AttributeError_.raise_TypeError_Delayed_": {"doc_hash": "06d1fef61861b2d858aeedf7b8265bef8d594c9fd323b1f4c134cbfe4b1834df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed.__setitem___Delayed._get_unary_operator._get_binary_operator": {"doc_hash": "3fb6ba47a42975be95b8e3c29b3484eb924bf2d3bb0c828ed7ec314d04268eda"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__can_apply_blockwise__can_apply_blockwise.None_2.except_ImportError_.return.False": {"doc_hash": "52561ab8e3611096f0131ea8a3196bd4b892228c57dac5a5307eadf0bbde8d64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__build_map_layer__build_map_layer.if__can_apply_blockwise_c.else_.return.MaterializedLayer_": {"doc_hash": "ccf03654b1cef50126c933a7837f7bfce4cf4e00627350264b60ea6ce0fda1e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__bind_one__bind_one.return.rebuild_": {"doc_hash": "be09c9874244066ccc650bbfc1bdac0211b096d4ce7c21a02eed7c2891dc8585"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_clone_clone.return.out_0_if_len_collections": {"doc_hash": "39cfd8904f184c889e4e02cf826152ca4fbbe7cb8d3b5c571c97c8fe6e803568"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_wait_on_wait_on.blocker.checkpoint_collections_": {"doc_hash": "c101203634282d573640799dd66fe8385de87d69d2703291640eba36704d0905"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_wait_on.block_one_wait_on.return.out_0_if_len_collections": {"doc_hash": "b62eb575b70bae3af696595af1096a16b4518a855b580f1ecebc9e86a57a8097"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_chunks_": {"doc_hash": "e9fb8700da4ef90924ccc2ddd3d7f5b3583f2434d267641c2ac126f512f9fc1c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_annotations_unpack___Layer.__dask_distributed_annotations_unpack__.annotations_update_expand": {"doc_hash": "e556f82987b44e3b698086f2d70d8ebda9fa968de0bf708f93a9d2d1eb86e20b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.clone_Layer.clone.is_leaf": {"doc_hash": "9e97670461d13455d45777c10443cdecb0950b9c5606cc07de17c74640d2273a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.clone.clone_value_Layer.clone.return.MaterializedLayer_dsk_new": {"doc_hash": "6baa65589873244a3d3f31cacfe4dc5da1f78b4d06d6107d375ae7219f63bf34"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph_HighLevelGraph.dependencies": {"doc_hash": "2fc2630b8d2565273736ee7a3ad81a7b1627252faa5f0080d97c991866f695ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.key_dependencies_HighLevelGraph.__init__.self.layers._": {"doc_hash": "ec1a2fc5afecf4baa1c6d7200d5c43b0efefad69d95245e68946123d23363b83"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__getitem___HighLevelGraph.__getitem__.raise_KeyError_key_": {"doc_hash": "3b6fb407660e1067f1cddfe5b8b2681db0bef35d0db967bc4eee7e6e9f56359b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.cull_layers_HighLevelGraph.cull_layers.return.HighLevelGraph_ret_layers": {"doc_hash": "f46e294d7e39f9e721d3b8ab7b821c89a57d89af9c13df641452259bef55516c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_get_name_from_key_test_get_name_from_key.None_2.get_name_from_key_1_": {"doc_hash": "0fbf4fa0ea0c8a4d5830b4215ddffa992491c5469f017a2e86d2c5a674219aab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_replace_name_in_keys_test_replace_name_in_keys.None_2.replace_name_in_key_1_": {"doc_hash": "14cd7487e6b96c74a2fbd36fd1389e94a3f9ae60cedab637567bcb5d65ce3ca1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_Tuple_Tuple._rebuild.return.Tuple_dsk_keys_": {"doc_hash": "b5caf3a59cac1ce5b6b9ec1cfc47281aba63b697dacfcdb266890d9877b376a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_custom_collection_test_custom_collection.assert_z3_compute_7": {"doc_hash": "19a1fad1e9f264d18eff01a278ade83d6056d5666e0569a7c1634813bf63092b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_array_rename_test_persist_array_rename.da_utils_assert_eq_b_1_": {"doc_hash": "f7c55a5f9abe43eca2f611e3238cae4416628a6905ce6fa29c8858083d076c67"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_test_compute_dataframe.dd_utils_assert_eq_out2_": {"doc_hash": "e57133059ca371f1caa1b1af22ac1591c78e11da8f0785ec2cb57210ab7346e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_dataframe_test_persist_dataframe.dd_utils_assert_eq_ddf2_": {"doc_hash": "72282efe10bdcb986c5a8a46d5285f9fdad0047d242024f2f97043b75886f15a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_series_test_persist_series.dd_utils_assert_eq_dds2_": {"doc_hash": "ee41fab8b72481697412a3c5b9fd59843d6f2577cefc508a928add791acb7127"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_scalar_test_persist_scalar.dd_utils_assert_eq_dds2_": {"doc_hash": "17d9658a50a66601c5dce40bd0098863deb9c7611e817f1be65a9be99077c027"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_dataframe_rename_test_persist_dataframe_rename.dd_utils_assert_eq_ddf2_": {"doc_hash": "ccf83b6c9c7d2a719e31a8d80674ff8c4f1e8402294bf4f09c96fda396e818f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_series_rename_test_persist_series_rename.dd_utils_assert_eq_dds2_": {"doc_hash": "7343b9f3881a8d5463bf02fad64aaa584fd114dc5237193ca172b03059824739"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_scalar_rename_test_persist_scalar_rename.dd_utils_assert_eq_dds2_": {"doc_hash": "e7f94b72d016eefc876f473d113902d9376663237cf5442377469d896f98ed17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_dataframe_test_compute_array_dataframe.dd_utils_assert_eq_df_out": {"doc_hash": "6590bbca08f6475cfa620b242925693849a44cf859f6842f9178da26e12acc94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayed_test_persist_delayed_custom_key.assert_dict_dp_dask_": {"doc_hash": "7a8d0fb2ffcd47c40bfafa425cbfbc1ce0f92ad6fce768894473b9551be2709d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayed_rename_test_persist_delayed_rename.assert_dict_dp_dask_": {"doc_hash": "751a6f00e00ff8a58760713973d016c27a14276d2ec46794dbfa0cd3f506ece3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayedleaf_test_persist_array_bag.assert_list_b_list_bb": {"doc_hash": "304a6844dc44c4aab712477b52ff9bb146374d1312a927003899288f0c80f454"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_bag_test_persist_item.db_utils_assert_eq_a_b_": {"doc_hash": "b0096009227499da90a909c2ec39b71f2ebae63b9b58f43f085693388cb849af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_bag_rename_test_persist_bag_rename.db_utils_assert_eq_b_4_": {"doc_hash": "9bf31dbf19176acd3a6c49d7fad51194547b74ffe0553980d36d83b55925f12a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_globals_test_optimize_globals.None_1.assert_eq_xx_np_ones_10": {"doc_hash": "8141dd479bf0ce46753f837c4527554e81ac765602f8b6ce448a37f36326de99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_attribute_of_attribute_test_cloudpickle.assert_d_compute_3": {"doc_hash": "69f13ac0328919c502e1ef5836565296b5835e8f323935ca829796e6515445f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_fused_blockwise_dataframe_merge_test_fused_blockwise_dataframe_merge.dd_utils_assert_eq_": {"doc_hash": "0a2cc6ba8ea2613dcf8384e6e865463504c15d4675d4158e1d335a58b123fe6c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_annotations_blockwise_unpack_test_annotations_blockwise_unpack.assert_eq_z_np_ones_10_": {"doc_hash": "1a94a213d4f2c2398763ca6f0636179a18476fef73019a372ad938fc91746f5f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_combo_of_layer_types_test_combo_of_layer_types.assert_res_21": {"doc_hash": "4309793373b3b02f3e6857d4d781ea2c6a44dcc70bd1ca12734abfd3032a4cee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_random_NodeCounter.f.return.x": {"doc_hash": "463d93743e46443fcb65eb0e24624eac04d19b0ba39273aaa9573154fc831009"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_collections_with_node_counters_collections_with_node_counters.return.colls_cnt": {"doc_hash": "c4d712c0054504764e2e1b40fbb9b4fc4ddaeb458e90c89b7ecc56d1e494782d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_demo_tuples_demo_tuples.return.Tuple_dsk1_list_dsk1_": {"doc_hash": "d4c0a127d9b090d94949fec07cd41948caf787c0eec6d19021552055eefb4d94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_checkpoint_test_wait_on_many.assert_cnt_n_5": {"doc_hash": "d63cc5fa4fcf0029c7a02ad0d3f7e09e4226027cbf348e3f6e7d96acd8607cae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_wait_on_collections_test_wait_on_collections.None_9": {"doc_hash": "a3b0a40f9817a290a92c9931bf2d46d067139e157636087797457c9e74029666"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_clone_test_clone.None_7": {"doc_hash": "386abdfe7b9d762ad5ff0096f1c16c3b4301d23ad6b933d1f068a7bd411c86c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_keys_values_items_to_dict_methods_test_keys_values_items_to_dict_methods.assert_hg_to_dict_di": {"doc_hash": "11656870c3a38bac01d446f99fff645ca41bf2ab57d22bde6ab0cfe71c150593"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_getitem_test_getitem.for_k_in_Unhashable_.with_pytest_raises_TypeEr.hg_k_": {"doc_hash": "63c7c58871e84293babe52d7db833c0ab6162bfb1f0aecf64358163c12459de6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_copy_test_cull.assert_dict_culled_by_y_": {"doc_hash": "4d05d7bce08f25edb0318db9144fbfecfc6d45b235385ae777a59fb24fb84cba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_cull_layers_test_cull_layers.for_k_in_culled_layers_.assert_culled_dependencie": {"doc_hash": "27b07a7bd7b7715d5ec08b5575a5f6ea6c1096adcccdff8c5c1fe21d0112a7c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_annot_map_fn_test_single_annotation.assert_dask_config_get_a": {"doc_hash": "6ec04f7ea75bdb0b21e6bc18f9dde58b6aebbe99fb5bdf268ebb4b3bab0af521"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_works_with_highlevel_graph_test_works_with_highlevel_graph.assert_res_x_1": {"doc_hash": "0a5d7f2a725f1f01972721b3103e259ec9519efcf2bf8c60d469916f5c544d6f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_scheduler_lock_ensure_dict.return.result": {"doc_hash": "1136e71ff40caa3453e2e04ecca05b2cf2e0ce18dca522ce34a08865402dc515"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_fuse_slice_implements.return.decorator": {"doc_hash": "1a11a27c09b9a0131321ce21e60bd912a55d7d496484a9cbb48182442b9546d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array_Array.__slots__._dask___name__cache": {"doc_hash": "53701e44399f7f47d5c716f4a666a66f14f0aad3a6f5f11cd36d13a5dcb807bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__bool___Array.__index__.return.self__scalarfunc_operator": {"doc_hash": "dc0ea0fb3d1a67908f8f9f72e3f9c62d40eb9f861956e5788eed02787de9f3a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__setitem___Array.__setitem__.self._chunks.y_chunks": {"doc_hash": "9cbb545bb0c752f790e869067fdbc3bb028afdc33c74d0d95404c0389c5b252c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/image.py_os_": {"doc_hash": "d75a6dabfdb973daba28c2904dfaf01925713408ff8f765a059d6018c87697e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/lib/__init__.py__": {"doc_hash": "7eb97a3d4e356bf1d3ccf60ad8288b2a0fb2fc978550e6362ebcf5e57b8b422d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/lib/stride_tricks.py__": {"doc_hash": "3d676e36fb909263842c13cc8209afd53f9b2bdd0dd49172e028a2228e36f37b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd.if_nb_0_nb_1_1___solve_triangular_lower.return.solve_triangular_safe_a_": {"doc_hash": "7fb83b0435509e024a832d8a0817e3d1e0f91a9a55051778426cde629b7df868"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_inv_cholesky.if_lower_.else_.return.u": {"doc_hash": "d43b6efd5f76a7eaca4419d722a60fdb8aff4673650e4e6225184a4201fd5745"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_warnings_try_.except_TypeError_.ma_divide.np_ma_core__DomainedBinar": {"doc_hash": "3e0305d2ae72cba5350045333ae84a0541e23dcfb9a7a3155cc725dc8fedafdd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_adapted__rollaxis.return.a_transpose_axes_": {"doc_hash": "0eceeb42979a77ce74e058c15dfd8fae71395c23d3109be2221a8b7e6e03990c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_if__numpy_120__": {"doc_hash": "540c53f927cabf823bac2b1e239a84e47680e2e6325f71db584e8cffd57f3797"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_coerce_depth_coerce_boundary.return.boundary": {"doc_hash": "54c41838a5d841dc49c0fe09e7105bff096200734a71913f1156d9eabb9fa87f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_sliding_window_view_": {"doc_hash": "737c9b54c76bb16776016208d909e37b5f186954758f335ef123a8ccaa6a00fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._wrap_RandomState._wrap.return.Array_graph_name_chunks": {"doc_hash": "0addd021a00f677e57dab6f7e409e3b207d53258429833bd24b8d083c8a36245"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py___empty": {"doc_hash": "2adcbd180e4e5e35049567465f0bcf280d21c0e007a08598d274d4930ada79c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanprod_nanprod.return.reduction_": {"doc_hash": "b42893d12b19a1a3766fca51f02b22f56108c24bbe191fbfdbd270c2ba8c5936"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nancumsum_nancumsum.return.cumreduction_": {"doc_hash": "485459543b63e17b3417b174b757bf782f4d9ec07368835706580e6a8b6b8b6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nancumprod_nancumprod.return.cumreduction_": {"doc_hash": "46d42aa18ebfe5101676fb476758751fbf6814e71eba3908e3c67e6fd967e44f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmean_nanmean.return.reduction_": {"doc_hash": "88483557f0afb21a118defc6af421a48e8b4c0a97e75fbaac0f5ff8739ca7bed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__sqrt_safe_sqrt.return._sqrt_a_": {"doc_hash": "3cf7efb94c0e0092b44ac00893ee2c388c6939bba2a30ad8af1953881b188fa4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__arg_combine__arg_combine.return.arg_vals": {"doc_hash": "23b38017f052071981a1a079dfd105ceed50d22a83c8331c3d92af43f6d800cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_flip_fliplr.return.flip_m_1_": {"doc_hash": "accdc97428aee428d5a0b500df57ba12a768b3cb72dbdce44099e4d02afb87e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_rot90_rot90.if_k_1_.else_.return.flip_transpose_m_axes_li": {"doc_hash": "a95693738815357dd6006ccc4c9badb3b44e040fdacf62bb11c8d4e43cf73668"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd._Blocked_variant_of_fu_histogramdd._Blocked_variant_of_fu": {"doc_hash": "e9bdf0f4556eef9312de0f1eea68a4a6aaebb6ca1442a25a229dc38c1aeb63ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_bisect__sanitize_index_element.if_isinstance_ind_Number.else_.raise_TypeError_Invalid_": {"doc_hash": "e8daac76bbad9a9519b781c8d10169c7894d65f3a906a4ddb4123ac22e5aefad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_concatenate_array_chunks_concatenate_array_chunks.return.Array_graph_name_chunks": {"doc_hash": "2800a77ba52ff8dcd4ee02d569fc656571ecdbf518f1e51178fb2339e5a8868b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array_setitem_array._Master_function_for_ar": {"doc_hash": "55dcde362f82d135bd552c79489bb43785d3d7635b154a3ed31ffadb2de55557"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.block_index_from_1d_index_setitem_array.block_index_from_1d_index.return.i": {"doc_hash": "67a37e9ccf5ea89f0963d4a9050d5a249a7f900ade6ad352e398052c99e9c18f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.block_index_shape_from_1d_bool_index_setitem_array.block_index_shape_from_1d_bool_index.return.np_sum_index_loc0_loc1_": {"doc_hash": "94c291f656ba140738598d72e4d09fba08c3104011ed138cacb870de718513f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.n_preceeding_from_1d_bool_index_setitem_array.n_preceeding_from_1d_bool_index.return.np_sum_index_loc0_": {"doc_hash": "47189035e9acf4630f4e84413890427fcc74c532e81b18eb827ae1b6325ec6ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.value_indices_from_1d_int_index_setitem_array.value_indices_from_1d_int_index.return.i": {"doc_hash": "0984404f6f9c35f1a858c1decbce27164c158aa1713f7fdef88a568dc082a3c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.flatten_setitem_array.non_broadcast_dimensions._": {"doc_hash": "a69c2c14257bb37ee8d931d899166a1853fe961cd3bdf90ae6255be809652ce8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.for_in_key_locations_in__setitem_array.return.dsk": {"doc_hash": "bfeb86bdb4d7a2f5416c62ab9ba21f11bc5b8b4d75744a3b108c19efbf881022"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_": {"doc_hash": "1fe7e6cb4d11fc98a849e4c967671527712faa344177a65847a0b4b3c62d3eb9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosis_kurtosis.if_fisher_.else_.return.vals": {"doc_hash": "4d46459daadb9546bbdd3cb85a0c776950204e838bb36e63ab477ab29ac725d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_point_slicing_with_full_slice_test_h5py_tokenize.with_tmpfile_hdf5_as_f.with_tmpfile_hdf5_as_f.assert_tokenize_x1_to": {"doc_hash": "7ab60582a23f0619767880fe36b73beada8bb110a74330ca0e28d0a779fe28d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_zero_slice_dtypes_test_normalize_chunks_auto_3d.None_1": {"doc_hash": "59799b8ee427570963279c3bd249958ff49cf1f7a2f3a57a3ffe0d5d0c6b6feb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_np_test_array_function_dask.assert_eq_res_y_res_x_": {"doc_hash": "da58c2d62f604321ee3279aaaf7cae23402ca104e27875fc240c3067e10a1bd9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_numpy_arg_test_blockwise_numpy_arg.assert_eq_x_np_arange_10": {"doc_hash": "aa931f55215d6fd6366d65998f129f1d17dffa499f13b92c676bd8c3cb3d6b16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_np_test__parse_gufunc_signature.None_3._parse_gufunc_signature_": {"doc_hash": "0ca69eb2ff1bfc5f4b20ccc261dd21e74d27a6bc348682d1ad2ffcbb927e8b83"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_with_meta_test_apply_gufunc_with_meta.assert_eq_expected_1_re": {"doc_hash": "dfc012d199fc8469103e57129c9a632990e7f48a6ad61f1af9669752ef16d6cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_as_gufunc_with_meta_": {"doc_hash": "3621865e5bc806fc76f4237f89fa5dbcf0ae4d4d54ae76e27569e7b1d132650a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_os_random_images.with_tmpdir_as_dirname_.yield_os_path_join_dirnam": {"doc_hash": "261eda3e333739c7d99d3bcb1b8d2e1505fef9cfa08a86062cef73dfbb0b248b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_cholesky_test_cholesky.None_1": {"doc_hash": "6b2c38f02f815a082d1d3eab5b04604bbc18aef4f4524b5b2f0cfb876c32ec1f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_np_test_basic.assert_eq_result_expecte": {"doc_hash": "3be66e86bbd859f20ce05033c856303b791156f7c570b2d6e6ae3cdd47ddeabb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_array_creation_blockwise_fusion_test_array_creation_blockwise_fusion.assert_eq_a_np_full_3_3": {"doc_hash": "b59e67c5031a7773b0b672e966a22689580106df0f3829894f3a83aca2014202"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_test_map_overlap_multiarray._are_not_somehow_shifted": {"doc_hash": "fcb97376de557a0c145fbd97220ca2673cc413adec5fbab18fbc4ae99bc963f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_defaults_test_map_overlap_multiarray_defaults.assert_eq_z_sum_20_0_": {"doc_hash": "39a119b86169fa0203ef4b002e30522202f24f27687df2a25eedcf208d2a2347"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_block_broadcast_test_map_overlap_multiarray_block_broadcast.assert_eq_z_sum_4_0_": {"doc_hash": "2bad4361b1ccea3ae0385783bc219c05a83dc2781eb85520af5a92170d957985"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_ensure_minimum_chunksize_test_ensure_minimum_chunksize_raises_error.with_pytest_raises_ValueE.ensure_minimum_chunksize_": {"doc_hash": "bc6e9d6830d87bfa686ef9869c16d53ce4f897a6b4726cd6b452ed59da3f42f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_sliding_window_view_test_sliding_window_view.assert_eq_expected_actua": {"doc_hash": "1cb545243bd7d7e7def556b5a1586b1b5ff39a8d14fc63a3de0996ab711f932e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_sliding_window_errors_": {"doc_hash": "a0041eb282e07d0e6f16c67af45f2d3eb38d95c5e7d8aaaf1f9fd7eb969d90fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_warnings_from_dask_utils_import_fu": {"doc_hash": "dacddf30dbf567012b1e2130409285cecb2625c6f88c1f51154f2c3c36752e4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_os_test_numel.None_1.for_sub_in_itertools_comb.assert_eq_": {"doc_hash": "900454cb3c9634817787bd901b62beaa0801072363c6cfc31e97a9bc2a110cd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_np_test_reshape_rechunk.assert_np_prod_list_map_l": {"doc_hash": "6d21daf96705fd1cb527db99ee63f723e525c9241e9126f7d689e0b8c4b715e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_rot90_test_rot90.try_.else_.if_len_axes_2_or_axes.else_.for_k_in_range_3_9_.assert_eq_np_r_da_r_": {"doc_hash": "59ccd1fc6a8836ed0daa705e6e3c421796c8c43ba85ec2464b2d2dde0108dc00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_test_bincount._can_bincount_result_be_": {"doc_hash": "49764e151633592cd51edefb331f7f54619489724efaf5b07eef39cb53e629ff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_alternative_bins_range_test_histogramdd_alternative_bins_range.assert_same_keys_da_histo": {"doc_hash": "254a3c3edcea285aa72307cb0eb44a86b072929d0ee58aded39c2f0b4137eb12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_density_test_histogramdd_density.assert_same_keys_da_histo": {"doc_hash": "e5ad4075a1cb93801fe6709d855cd580f0123ce1107bd9afb9d78ad1fa98fd00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raises_incompat_bins_or_range_test_histogramdd_raises_incompat_bins_or_range.None_2.da_histogramdd_data_bins": {"doc_hash": "98c3bac1a1205613651ab0cbb4697fe3c590ed5641be33397fd23a8e840e36bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_cov_test_cov.with_pytest_raises_ValueE.da_cov_d_ddof_1_5_": {"doc_hash": "1e4a375a94294794b69243a54273763a97f0e1ad087c0779d34dbb04c93366b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_power_divergence_invalid_test_skew_single_return_type.assert_isinstance_result_": {"doc_hash": "29f0c3fba28422ce001aff9aed4c9b0c1a86fc153bf0d34af4021873fde7ac8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_kurtosis_single_return_type_": {"doc_hash": "b6eee6b2db4a7325280b340f13cbce6469b4c98afe5a2570100fcc9fac3c6cae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_xml.etree.ElementTree_test_basic.None_6": {"doc_hash": "cc598d3f84a1d107352054458613c0fd6737673aa6d4fdfbf67dae0c449b1368"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_xarray.py_pytest_test_asanyarray.assert_eq_y_y_": {"doc_hash": "c24373e489fbf18f400a1d7f1f7e428d9ba61fdd5835918984c735869cd819d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_xarray.py_test_asarray_xarray_intersphinx_workaround_": {"doc_hash": "b0843a3fa1814a5b30ce8dd29e3f91569ba4877a32426679fad8d6e9ed4efe8c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_from_functools_import_par___array_wrap__.return.x___array_wrap___numpy_uf": {"doc_hash": "0e61e2aa14b67ace004d1cadeb91d9c58896247c752d9d05a80e993592908c09"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__check_chunks__check_chunks.return.x": {"doc_hash": "34f7f57c2d3243a06d8a102edd3bc14e46518e9d0f51bd879e2097101e46f77f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_safe_wraps__dtype_of.try_.except_AttributeError_.return.np_asanyarray_a_dtype": {"doc_hash": "84588818b93cd6f90d2edafeed7b6b026e6720080da220463a2de3d030fce914"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_arange_safe_arange_safe.if_like_is_None_.else_.try_.except_TypeError_.return.np_arange_args_kwargs": {"doc_hash": "92b845fcdbcf8f5a2ab4852c5b306d67e8011a9f42ce01d4dc356e77f9c8cea6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_svd_flip_svd_flip.return.u_v": {"doc_hash": "1c328931610fd8a83c24dc909530a864136a667bf19a7c1886da4dc8faa4d875"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_shape_as_first_arg_wrap_func_shape_as_first_arg.return.Array_graph_name_chunks": {"doc_hash": "983af33c5b8b2d54f19bde5119611d125adc7f663b6e96a950e8994cd8a39d24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro_to_avro.files.open_files_": {"doc_hash": "7982951b74a20920145715691e4f1dc4b9963775f3f47d42b79990691e6fe5f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro.name_to_avro.if_compute_.else_.return.out_to_delayed_": {"doc_hash": "194dc6b457bee3e9c1cd34eaf8929dc9bccc70837ac9b55b1bb959b6098f6317"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_temporary_directory_test_temporary_directory.with_ProcessPoolExecutor_.with_dask_config_set_temp.assert_any_fn_endswith_": {"doc_hash": "3ac153dad7d0692cf23eddea8ee9d343eaecbb2561a998d9df167d143a22c99d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_from_functools_import_par_fmt_bs_enc_path._": {"doc_hash": "262c6e1922d8ab47b6fb9b359bad1c9a2ba811f4e11265f4069f891d09063470"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_read_text_unicode_no_collection_test_files_per_partition.with_filetexts_files3_.with_dask_config_set_sc.assert_len_b_unique_paths": {"doc_hash": "00f1ac7c5a728a073fa7cf4de5f74c13ee412f9092f4c4e077dc086b5b2eed1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_errors_": {"doc_hash": "8d6e41cc036145524e8793f97dbfef2e65c9ccfed1661b4542dc13eae11f417b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_read_text.if_blocksize_is_None__read_text.return.blocks": {"doc_hash": "26186157f70bf133a1b13bee524c4b2af2b92a25c05ce8efb86855ec259328e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_file_to_blocks_attach_path.for_p_in_block_.yield_p_path_": {"doc_hash": "05633cddab7fac3f21a3cd6c045661815e6a43a2ffcf19ab27c70ab35554ad8d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_decode_": {"doc_hash": "fbc549c337e5acdc504e07d67745832f61650f49c90e13fcfad400ee06972f62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_pandas_register_pandas.normalize_series.return._": {"doc_hash": "e87e25c954e634be12f56bd44b27fa9220b6bd07b67639daf5dfe41abd9260f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_pandas.normalize_dataframe_register_pandas.normalize_period_dtype.return.normalize_token_dtype_nam": {"doc_hash": "9c5e8d0ecf0d3e5cd1ced7eea182dc7f8bb85d1ca5840826e590705f4d76e3ad"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDep_BlockwiseDep.get.try_.except_KeyError_.return.default": {"doc_hash": "9f01826e0bd970780b7db49116d84670bbc009eb5db612e83e742f890eeb2aec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDep.__dask_distributed_pack___BlockwiseDep.__dask_distributed_pack__.raise_NotImplementedError": {"doc_hash": "59c3ded8e1da855725720702cb4904583e599b3df63784e98e6e94dd92238950"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDep.__dask_distributed_unpack___BlockwiseDep.__repr__.return.f_type_self___name___": {"doc_hash": "26c2d59407edda458af8f9cadf6708a3e1a89b05cb08cca5c4543e5f00fc9eb5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDepDict_BlockwiseDepDict.__dask_distributed_unpack__.return.cls_state_": {"doc_hash": "43930b73b3700ee39255b55da5b787367e697084d6baf1c6aa23eca7aa0869f8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_subs_blockwise_token.return.prefix_d_i": {"doc_hash": "53471f63981b436406b0864280605caf2cc3dd8ec793c7e379a9148fda7ce8bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise_Blockwise.output_blocks": {"doc_hash": "d9795559ec4eb6665649be67f3ff3c1c3db384d6a43aa3e2440517520b75d9ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.clone_Blockwise.clone.return._": {"doc_hash": "3b81be9107d62696acb977c63de272f38f7beb47e36438a99071b55b9f2a36f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.if_numblocks_is_None__make_blockwise_graph._Create_argument_lists": {"doc_hash": "edc5d2a8843cb8847beed03c8a4cbfba308e5491d1a952e12b3d2228cbad2008"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.for_out_coords_in_output__make_blockwise_graph.None_7.else_.return.dsk": {"doc_hash": "0b92a344c18b19546b402825dd3ce198fb37724f21d4653b61656786c9c7059d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_copy_read_bytes.if_blocksize_is_not_None_.blocksize.int_blocksize_": {"doc_hash": "b51a573629cc7e6e4375c5654775f56a13167f9f2e242f9bdfa7f7a88d2fbf04"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_test_read_text.with_ProcessPoolExecutor_.assert_result_a_b": {"doc_hash": "5d5c8f80c45510aaf674d10d62ba7b65bb467ba8d0471ff4ecdae6aba388a7b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_bag_": {"doc_hash": "c89e85611f9ded84b8b654205e064e0436e058df768bf3486e20055a05ddda6a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_parse_sample_bytes_test_with_urls.with_filetexts_files_mod.assert_sum_map_len_value": {"doc_hash": "5d10a9cb8ecf4a6df6520931601a0cfab7c115839f8e6a115fb4097b93bb9ea7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/utils.py_bz2_": {"doc_hash": "cfdc60bf7e1e63a5b168f2be9084356b4c62de55e71b765ee590c559f19c36c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_sys_Cache._pretask.self_starttimes_key_de": {"doc_hash": "54fe5b9285a65216022b1fc2eb07cef067e6a503a90e2cfd8db004f0c04d0b51"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.product__Frame.min.return.self__reduction_agg_": {"doc_hash": "58d0e92486a1d742ee0843fc203ed607bef98a489b7cfcbfac89293fb916a363"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.kurtosis__Frame.kurtosis.if_axis_1_.else_.return.handle_out_out_result_": {"doc_hash": "c610ce5e95d17d82f2b6fad90818fc9d273a96c6575721970d7388e5d4e9aa4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._kurtosis_1d__Frame._kurtosis_1d.return.new_dd_object_": {"doc_hash": "99870bde5f52d6a7003bacdc033021f9061fd74cdbc8b6a36456ef9be5c5e8fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._kurtosis_numeric__Frame._kurtosis_numeric.return.new_dd_object_": {"doc_hash": "a612e08f29f1003478b080e52e34928fe1cd2c5d88a9b3255e5a00e356e1a41e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.astype__Frame.astype.return.self_map_partitions_": {"doc_hash": "bad2c09147e013ed30b1cc8fd3719fd233ebf24e1398e6fc8b4e13c76d0adef1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.sort_values_DataFrame.sort_values.return.sort_values_": {"doc_hash": "bb9418c930b99c8a52a7e231e95c956bd96fe891e1e95246e1886105b8eed1e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__map_freq_to_period_start__map_freq_to_period_start.try_.except_AttributeError_.return.freq": {"doc_hash": "3ff4be55f2604e9307a677003808735f44f50694272a10512fd2590a22573c72"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py_bisect__IndexerBase._make_meta.if_cindexer_is_None_.else_.return.self__meta_indexer_cin": {"doc_hash": "513f70b99433817482d9b951a020eb4823f7d923abdad5ab6e83002d5232113e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partitions_of_index_values__coerce_loc_index.return.o": {"doc_hash": "8b6375bf3ecd851262d96d7139709496df283355d7683adb22c03857ef61c215"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__maybe_partial_time_string_": {"doc_hash": "f23b3a17ac618c6b4c21b7643949ab0ae64493669fc511e002ce4a48cffd5ef0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/__init__.py_demo_": {"doc_hash": "739b95885b6dd169229afcaea2e99c89c5e4451579b48bc528f79858abc63be8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVFunctionWrapper.__call___CSVFunctionWrapper.__call__.return.df": {"doc_hash": "af9bfb8147e757a15cc4614e7366c6ae84fa4917d8466c800c71f40052c777d5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_text_blocks_to_pandas_text_blocks_to_pandas.return.new_dd_object_graph_name": {"doc_hash": "98712de768e578cf11d0cd31c36ff3b6298188b48c0b67f9f18a839db49a8028"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_block_mask_AUTO_BLOCKSIZE._infer_block_size_": {"doc_hash": "6da397de6137de498e7dbebdb76046b04ea4fab9c4fdb1e2516a97ef84ca6772"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_MakeTimeseriesPart_MakeTimeseriesPart.__call__.return.make_timeseries_part_": {"doc_hash": "962d48722c0b5bf9bc1a570c70139e746aeccdb7be3c00764a2b08d1d97e513c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_os__pd_to_hdf.return.None": {"doc_hash": "f842e37d39cf550c50eb62aacced91094b0342822e6875696ace71a3187f7e1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_os_lock.Lock_": {"doc_hash": "0c43483955c8b1316a8c09e8adadbfcbb583e6c031f0f3021edd60642fb2889b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_create_metadata_file_create_metadata_file.paths_root_dir_fns__s": {"doc_hash": "1c1bf3358165fa81a0e52f3d3097bad53ebf0ecd4bf39cd6300bc25404488fe7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_None_2_sorted_columns.return.out": {"doc_hash": "bd5119c3d86cc72ffb03c79536abd6bb5411ea24f1b72ad808c18023cbc45f4a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__sort_and_analyze_paths__analyze_paths._join_path._scrub.return.p": {"doc_hash": "b2e6754730ef0f551b98f5fdc40631c82bc334fec7458c57ef02df7d67e6ba13"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_no_args_test_make_timeseries_blockwise.assert_isinstance_layers_": {"doc_hash": "bb08ff9f2046b67730140637e46ac2b3d1b98939343aba705b6f308e7755ca12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_os_test_to_hdf.None_3.tm_assert_frame_equal_df_": {"doc_hash": "2e10a65753da8f86994920d17b72c2ed138685b1bab4af9b8c64acaa12dc1b1d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_files_test_to_hdf_multiple_files.with_tmpfile_h5_as_fn_.assert_eq_df_out_": {"doc_hash": "68055e5424780c28e897977d761c03b21abb1c7b671e9095b10fb74f4f8ff89f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_test_to_records._TODO_make_check_type_p": {"doc_hash": "5c42afbab730c3e5c83d28e4c32371e76a1d9e6dd069a34e23256dfc0e1e2d44"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_glob_ddf.dd_from_pandas_df_nparti": {"doc_hash": "4ed714138545b4656a9efd7fc79f7aa89f6ee01936e8b8b847af11fcae3a138c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_dict_column_test_append_dict_column.assert_eq_expect_result_": {"doc_hash": "1d15e5c7224105ea935e6d8b648edbc43ddfd8a262e552e6fd9e7be06537c535"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_dir_nometa_test_read_dir_nometa.if_statistics_is_False_or.else_.assert_ddf2_divisions_": {"doc_hash": "919427c55238dc193d445d17a1477db56e3e44ca64399494dfeddb69b134ae4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_statistics_nometa_test_statistics_nometa.assert_ddf2_divisions_": {"doc_hash": "a64f907aedcf6685c443b1a4ee0bd8368a5e04c7f8fd3546d49131e3cc216b22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_blockwise_parquet_annotations_test_blockwise_parquet_annotations.assert_layer_annotations_": {"doc_hash": "a117b42350517c3cfd806089803a65379cb6ad426ca1fd26981aa5a155cb6678"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_column_overlap_test_partitioned_column_overlap.if_write_cols_part_.else_.with_pytest_raises_ValueE.dd_read_parquet_path_eng": {"doc_hash": "3223d558a895d25939236a722ea8fa7486234835d9a9bcf8fc5b87c99e8086e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_rename_columns_test_roundtrip_rename_columns.assert_eq_df1_ddf2_compu": {"doc_hash": "e5e22bff7770c31e15e903b8c6f6886687fa2c09c268c83e5750e4f65daa0121"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_io_db.with_tmpfile_as_f_.yield_uri": {"doc_hash": "05453d9816629f34613f628903a5d0df0d4578f7d996c8fa5b3f5dfecb05abf8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py_json__get_pyarrow_dtypes.return.dtypes": {"doc_hash": "9d41a1f554f97feb7255316216eb9f20351990669591ef31092d172c628612e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py___asciitable": {"doc_hash": "3162d113c46ccc42a82e84849c12df508639b87e665e87f665b8de525d5b7a82"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat.if_not_isinstance_dfs_li_concat.if_axis_1_.else_.if_all_df_known_divisions.else_.return.stack_partitions_": {"doc_hash": "0b59db2b685dff3601f8edb6b28eee4c5891f735f88e8e34430db3fd4f2a3780"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__contains_index_name__contains_index_name.if_isinstance_columns_or_.else_.return._is_index_level_reference": {"doc_hash": "e26dac62e603b79d7c55d98ceb4050b1cf17eec384a29bd33a3103f10902d912"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__select_columns_or_index__select_columns_or_index.return.selected_df": {"doc_hash": "d1a391feb4717fa0ed82fd92c9c5bab3cd697dbacf95e603b100ca600f74a2ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__split_partition__merge_chunk_wrapper.return.merge_chunk_": {"doc_hash": "8d3533f16efe6b25abe4df22a4d5ad371f23cc84081416c24fa8017dc96c15a7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_broadcast_join_broadcast_join._is_NOT_broadcasted_": {"doc_hash": "b5abce3044b536af3027bb503c66f7a32b8a261e30fa1315790354f8dae9c0e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_sys_get_dummies._": {"doc_hash": "115b0c7c28b8f863bc14459db3288eb9f3e69cbc92735581bfc1395e4f55557a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py__calculate_divisions__calculate_divisions.return.divisions_mins_maxes": {"doc_hash": "558a5569b51d31f5baff267bcc94b286b336e1a11f3196c143c2f62d7db02282"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_sort_values_sort_values.return.df": {"doc_hash": "8e2628c8c07e79270dfdfe9b1be30531823517cd0b73f261f7312c3448d9d5c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_barrier_collect.with_ensure_cleanup_on_ex.return.res_if_len_res_0_else_": {"doc_hash": "d3dbe003919feeab59c4d2251cd65311b9c3c50e8f14b95f9f58b4dbb2c3b57b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_partitions_pre_set_partitions_pre.return.partitions": {"doc_hash": "ff20e18b7c2a6c13ffbd1b06a68934b5ea4459ddb7879ffb3567c64c9cef8c39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_cat_test_str_accessor_cat.for_o_in_foo_foo_.with_pytest_raises_TypeEr.ddf_str_col_str_cat_o_": {"doc_hash": "3c0e9ce1d0dcce992a255717c26d6f771cb840c7d68204aa6aa2582d582e2112"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_warnings_test_arithmetics._Arithmetics": {"doc_hash": "e3f5591fde61e84309f423bdcf5f4038612000a82589be30f125d8f7ba5c4c7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.for_dds_pds_in__test_reductions.for_dds_pds_in_.None_19": {"doc_hash": "2a16b8dbe8af5ca5f5ac34d755fc521c244e318cf0a0d4e4faa6805931ed6ca9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reduction_series_invalid_axis_test_reduction_series_invalid_axis.for_axis_in_1_columns_.for_s_in_ddf1_a_pdf1_a_.None_9": {"doc_hash": "943bdee0e2627d76d9a8f6c868ba4cf5f8c2eb7a3100747e6c0c936fd00f58b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_non_numeric_dtypes_test_reductions_non_numeric_dtypes.None_18": {"doc_hash": "c3d417ee8fcd9dd2f72fabdc23a6d979f9f1a56ae2e0db7e02752d4fab8d0441"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.for_axis_in_0_1_index_test_reductions_frame.assert_dask_graph_ddf1_me": {"doc_hash": "c5d97912e2f2d03509c49c4fbaac74ee031eec64720f80e97a304e22c1ad880a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.None_31_test_reductions_frame.None_40": {"doc_hash": "19d43f83bd8c4c5e21c7783a1129a889d534de05eb51f87fb3ae8907640cd189"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan.None_1_test_reductions_frame_nan.None_1.None_23": {"doc_hash": "0deffe27d50f9af5fa2afd7cf677bf1772facdf5258642f9e588ff6dee2e4488"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_partition_size_test_embarrassingly_parallel_operations.assert_len_a_sample_frac_": {"doc_hash": "2c88a14a9f5cbc910db052a661b671c749a941193af92acd4f395d8af190a7bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_from_textwrap_import_dede_test_repr_meta_mutation.None_1": {"doc_hash": "e92aa6379db112dc089f753581edaa94c1cf92bc302fbe6252b1c24356ae77ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_np_test_basic.assert_abs_approx_exact": {"doc_hash": "42a5e1cb8db7a33ccb3cb8b8b3f572938883d0921370935d04e814c6eb58391c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_np_if_PANDAS_GT_110_.CHECK_FREQ_check_freq_": {"doc_hash": "51335449ec7e232579f709e7105ce8a3e454e5c716bf56e3d7b8ad6e88718b2f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_np_df_left.return.pd_DataFrame_dict_idx_idx": {"doc_hash": "710bc594a54c75e68d3e239333a6c74778f058fb7b9a685aaed3e9147a085dab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_double_bcast_left_test_merge_known_to_double_bcast_left.result_head_1_": {"doc_hash": "97be3665a2edcf8685e23dc45b89a9a6498d71770666b92ed2546a64ec488a8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_column_with_nulls_": {"doc_hash": "868202d6c4485c57ca5ae4eded3026240419010664713666f904b313a7876c0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_ignore_order_test_concat_ignore_order.assert_eq_result_expecte": {"doc_hash": "ff6f519d24f57e0ef11061448bd424783ad234aa7b62a7f6811e4e527ee3a30a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_does_not_raise_setting_with_copy_warning_test_categorical_merge_does_not_raise_setting_with_copy_warning.assert_eq_df1_join_df2_": {"doc_hash": "74abdb66c0b0cacb07427712215325f94b27a7abaf73e7bf9c0c74d371ee9700"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_numeric.py_np_": {"doc_hash": "00536267af4a1d84d16ecf187c3a33c15a9c0465ba383e976b3b634efbd897b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_pd_dfs.list_dsk_values_": {"doc_hash": "e457302da84bc8b31fe6b653730b3373914d5d13456acae69f2267de8d88cecc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_test_set_index_interpolate.assert_d2_divisions_3_": {"doc_hash": "0e3e428d4cdf55053fa83c20b0387701c7cda68c73cb4b9d0cd7a0859a6addf2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_int_test_set_index_interpolate_int.assert_all_np_issubdtype_": {"doc_hash": "a8516b228ee0233f3cb331e9eaad6249f07447cc04210657160e8703f09fb847"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_large_uint_test_set_index_interpolate_large_uint.assert_set_d1_divisions_": {"doc_hash": "d95d72f00517b9b357b5202cb17e7c40d63acb5ca5a5a83afa9b3a58d231f4f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_serialize_test_shuffle_hlg_layer_serialize.for_layer_in_dsk_layers_v.assert_layer_roundtrip_ke": {"doc_hash": "6d929b4d98e6380b61d6cef521d156d8af33fc99788ceadc606f5048cd53d731"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_visualize_visualize.return.p": {"doc_hash": "e4cb55952b3431fcb8607c1fd5045d54cf9a82028d569920e9ff160bd97cd622"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_checkpoint_checkpoint.if_len_collections_1_.else_.return.delayed_chunks_checkpoint": {"doc_hash": "c437015c37355a087084a86df1363a25cac80a9abf163316bb90684db9f7af0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__checkpoint_one__checkpoint_one.return.Delayed_name_dsk_": {"doc_hash": "9d167e5230ac0ada0a509aea44c6c46131908e8bcb5b250578470b1c0f2e6dca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.get_dependencies_Layer.__dask_distributed_annotations_pack__.return.packed": {"doc_hash": "cc5810ca38a4948f9f86ec1c074706d48fee2473c816a100ee8c34a8ec825204"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.get_all_external_keys_HighLevelGraph.get_all_external_keys.try_.except_AttributeError_.return.keys": {"doc_hash": "f7494ba8193071ffaed5417291df9c8515a24fdcbfa07a3a5fe3dfe2b1c232d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.items_HighLevelGraph.get_all_dependencies.return.self_key_dependencies": {"doc_hash": "3dbd227489dae85932eeb12b745c9804a39a1861320912ccbf8c1bfd444c232e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__dask_distributed_pack___HighLevelGraph.__dask_distributed_pack__.return._layers_layers_": {"doc_hash": "64000e92715428f38f07391eac8a1f39f6385a9165bb9f7edd73fb6caa82feef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer.__reduce___SimpleShuffleLayer.__dask_distributed_pack__.return._": {"doc_hash": "8b7036449533edcb704d0ff307d3b713ef05d34b0d640fe824243d716a8358b2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer.__dask_distributed_unpack___SimpleShuffleLayer.__dask_distributed_unpack__.return._dsk_toolz_valmap_dump": {"doc_hash": "1fc9f883d1030f5582d2b47f8959b40e3e06e1371afdac433534f4a1122f15a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer._construct_graph_SimpleShuffleLayer._construct_graph.return.dsk": {"doc_hash": "1db49ea26bf44219e37ea4f3376c5f98d3cf7193a8ce56299a48fed1a599595e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ShuffleLayer_ShuffleLayer.__dask_distributed_pack__.return.ret": {"doc_hash": "0af997f47666410312c69c7b5972c79bccae2ec0cef2ca72988605114397957c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ShuffleLayer._cull_dependencies_ShuffleLayer._cull.return.ShuffleLayer_": {"doc_hash": "1f57367ced0c9c5d0859542946e01b41cff891d6c623fc7ae06071fb3c871c50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ShuffleLayer._construct_graph_ShuffleLayer._construct_graph.return.dsk": {"doc_hash": "1ecc5aad1bd2839e978b644b4ea8553ad3e46a8d0c1ad02812f717b4afbc5279"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer_BroadcastJoinLayer.__len__.return.len_self__dict_": {"doc_hash": "14c043ef49c51a62a944c8ab71e4d503b2e5b066adad047d788e7bb65b599aaf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer.__dask_distributed_pack___BroadcastJoinLayer.__dask_distributed_pack__.return._": {"doc_hash": "df4e3db532a4e01ac0ccc5a7f961a444d7a2fe51e223751140a814850b5beb94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer.__dask_distributed_unpack___BroadcastJoinLayer.__dask_distributed_unpack__.return._dsk_toolz_valmap_dump": {"doc_hash": "f8f6639f61a9d89eb970e2860b349c134f02e13493038de71403b5fb27e80b13"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._keys_to_parts_BroadcastJoinLayer._broadcast_plan.if_self_lhs_npartitions_.else_.return._": {"doc_hash": "58180d167fad062e265eb2d7dc2c0b2915f95e5274ad90361f1829ba6a3f8d75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._cull_dependencies_BroadcastJoinLayer._cull_dependencies.return.deps": {"doc_hash": "70ef55395373d231101b34a51ae6f339f19e4515efe2a5480547da2e6ef622ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._cull_BroadcastJoinLayer.cull.if_parts_out_set_self_.else_.return.self_culled_deps": {"doc_hash": "b351fd56f6f5df67f852cf2eec8b2c1de8886b41498bf7520f6613a934e211e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._construct_graph_BroadcastJoinLayer._construct_graph.return.dsk": {"doc_hash": "16f097cd682f716d3ece63d58d7118c1c908a2cc7f75d55017ef86779809845e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameIOLayer_DataFrameIOLayer.__init__.super___init___": {"doc_hash": "50913bc7e056ae8b38fd650440e829dfb8afab2aebce48d919432adf5247f4c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_2_execute_task.return.key_result_failed": {"doc_hash": "bf7d88b7bf183dec85189a73ff4c39b4b3024289df03a691e53952dbd57fe5c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_batch_execute_tasks_release_data.if_delete_.del_state_cache_key_": {"doc_hash": "da9aa2c8dbe575eba07d2423af1eee87c14cdf83a0a45d2bfeab801a620a53a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py__Synchronous_concrete__get_apply_async.return.get_async_": {"doc_hash": "9820f621ff8fa87f92e70de760b5d721ba5ff6afc39b8e47c53cc419a86d037e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_fuse.if_config_get_optimizati.return.dsk_dependencies": {"doc_hash": "71eb38bcd804c002348b37a3e78298fb7ec8d5faec7d47b453fd522f2e00a9ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_from_operator_import_add_test_cache.assert_not_Callback_activ": {"doc_hash": "cd606053bac37e8dd590ceaa829fbfba03e2483905dae101f1e8cafded7d65e7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_from_dask_callbacks_impor_test_start_state_callback.assert_flag_0_is_True": {"doc_hash": "787662aac197710e3d2f89b5af1443e39f0c9b2ba887300f551f9e97222df589"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_pickle_test_istask.assert_not_istask_f_sum_": {"doc_hash": "a5a050ac418c6685aa291073dc71fb870f64fcdeff029a0eccda6945dbe073f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_datasets.py_pytest_": {"doc_hash": "112e1f817ed1d461eb358b1e8fc5410ed9949241d4e774e3e24ca399d8cb5736"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_pickle_Tuple.__dask_postcompute__.return.tuple_": {"doc_hash": "eef09b1d28a3b84bc1a53009dc4bcf6f7a1d47944bf3f21a2f7f105fe8ed0a64"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_nout_with_tasks_test_kwargs.None_5": {"doc_hash": "b16e37c3aec3a8dbfabd0cdfe9be39a9e0b48451f14ee061f0678c298eb316fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_pytest_test_persist.assert_y2_key_in_a_data_o": {"doc_hash": "76d7302280f45bb8cde55de1984965b07661507b351e0f03958ba687173411c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_array_creation_test_blockwise_array_creation.with_dask_config_set_op.da_assert_eq_darr_narr_": {"doc_hash": "7d4e6943c2f1448b54497b87250ce61da06737a4dcba316bf5d6a321a4966af6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_dataframe_io_test_blockwise_dataframe_io.with_dask_config_set_op.dd_assert_eq_ddf_df_che": {"doc_hash": "d4ba7ebfe5778356d4452df5e27a88b050908c334393a2e6040e2ce43c11a9ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_numpy_args_test_blockwise_numpy_args.assert_res_1000": {"doc_hash": "3ffacc2c442f9e3a579289c6a919f530eea1c36294e7cce63087bc8a7d6196bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_numpy_kwargs_test_blockwise_numpy_kwargs.assert_res_1000": {"doc_hash": "b52756448561d67672b1471ef3c8f4ac3f4023b570449c56690e772efca93607"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_map_partitions_partition_info_test_map_partitions_partition_info.assert_res_1_number": {"doc_hash": "129e86780519e7bf0a883266f123c87d66068d80d48a4b20213e1a2aa80454fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_in_subgraphs_test_futures_in_subgraphs.ddf_4.await_c_submit_dd_categor": {"doc_hash": "ed31972c864206c5d1445dad628401bd31f5b4ee36674a490faaedbcaa30fff3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_copy_test_task_label.assert_task_label_add_": {"doc_hash": "c7dc004de354d6dc63610a042508ca870fbd2e41ec9139c9d4b49e91615860f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_assert_no_common_keys_assert_no_common_keys.if_omit_is_not_None_.else_.if_layers_.assert_not_dsk1_dependenc": {"doc_hash": "a82574860979f68d61f128904ce5092ac8e4ca458e0461f27e6818d449420014"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_assert_did_not_materialize_h2.object_": {"doc_hash": "e33b0cf11a3f8a1ff00bd21065227ee296f5882b00920b0b8fe3ef000e229581"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_bind_test_bind.assert_cnt_n_9": {"doc_hash": "758170c50f534d5c512c2cfcf10a0ef0b018939e943cf5e771d62b2f45e034e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_bind_clone_collections_test_bind_clone_collections.assert_cnt_n_8_or_func": {"doc_hash": "66a8bd92282d62c312367b0b7e77e78fdf95a6fee7865bdce483beb721ed7f0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_bind_clone_collections.db_utils_assert_eq_b3c_b_test_bind_clone_collections.assert_cnt_n_36_or_fun": {"doc_hash": "e4cff58cec2760834eae49fe9014d33127120a241402b6b5d7290e2b55d992da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_split_every_test_split_every.assert_t4_compute_schedul": {"doc_hash": "75f55ea8bb0422c1f3e8658a9acfcaac6153d8ac048ed7c2f49b84896663e3ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_split_every_invalid_": {"doc_hash": "f98b7969de5bc8c269ff894abaf89b810bd883b28dddca4d40c9e2039bd729b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_blockwise_cull_test_blockwise_cull.for_name_layer_in_dsk_cu.None_3": {"doc_hash": "cc9f87973a02309e8e39b7e3ff55f2918094f8ac61d59ef43d76a8c955145d0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__pq_pyarrow__pq_pyarrow.if_pa_ds_.else_.return.ddf1": {"doc_hash": "a4893c83406764225048ca3414252de244fb3cdfaa459758bedb540f3b2368d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__pq_fastparquet__read_csv.return.dd_read_csv_os_path_join_": {"doc_hash": "89da9636f3853b78858f28164a84c703517199ca13d5ffc765b42f52f6a85d48"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_scheduler_highlevel_graph_unpack_import_": {"doc_hash": "9f8a36821a5e10be2282fbe63b104fd79f29793ca60b19b3e61a6007ba8f533d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_multiprocessing_test_pickle_locals.assert_b_unrelated_functi": {"doc_hash": "ecb5c44a0a58f413d4e331621faefc1acba06b2e72fb357fd716165f8d2e3099"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_os_test_reuse_pool.with_pool_typ_CPU_COUNT_.with_dask_config_set_pool.None_1": {"doc_hash": "6b19ccad222c41fc00d2c16b099ba7028cf64527978c6ab5d936393632291239"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_pool_kwarg_test_pool_kwarg.with_pool_typ_3_as_pool_.assert_get_dsk_x_pool": {"doc_hash": "98f7a8934a67f70c114bbd4d08fa34fbeb761bbc7174c316a4acc46ca07d509b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_datetime_from_dask_utils_test_impo": {"doc_hash": "15bf4b9fd6d2821efaf3f379fbd90a72043801cf6797878dc7ec5b8109201a52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_stringify_test_stringify.assert_dsk_y_1_1_0": {"doc_hash": "e10f463827acc4b6819637f798cba0bf6e5fd14ce5fbe9f43db0aab7d3be27b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_OperatorMethodMixin_partial_by_order.return.function_args2_kwargs": {"doc_hash": "4b021ca748aee38c739f07f03fd9c5a97cf362aa34e7e92a566e13eb532a1fcf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_arraylike_is_arraylike.return.bool_": {"doc_hash": "a796c1234325877ff530976fc4613ddac7cccf1ad83300dc6e0139c929e7115c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_bytes_format_bytes.return.f_n_B_": {"doc_hash": "6564ad876d97d87ae20dc188f4a0f37377b03d1f515503e233c50d7274f8c876"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_from_random_import_randin_trivial.return.d_x_height_1_i_": {"doc_hash": "42c2e77d1099c84b1031f77dd21b879f8b062eaf1b81fdba8502bc7064326bd7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/__init__.py__": {"doc_hash": "15df1371cf035f7eb51fcb8dfac9ff98b7990407d56131c55300e3fac0dd2b3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py__concatenate__concatenate.return.out": {"doc_hash": "cb48270bf529d63e398394091cdddc9ea1eb2d12cd506cf8ac7cbcc2b098812c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py__tensordot__tensordot.return.res_reshape_olda_oldb_": {"doc_hash": "ee28d80c287b3a0efaa1d23cfb68593e19a7f2229a93a839d4e26eb0995190d8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_cupy_register_cupy._cupy_einsum.return.cupy_einsum_args_kwar": {"doc_hash": "1851fef8c6154702a0427ff97bc5fbcc2f0b125acfc6bc2853cd8a64cf3352f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks_unify_chunks.arrays._": {"doc_hash": "881d2e0076999ceba1b33ab85b6aeff87560f3242e97dbcf20fc4f421110ff4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks.for_a_i_in_arginds__unpack_singleton.return.x": {"doc_hash": "4cf0cc1d7841b4e7575fc9f1489aef646dc26b297f8957a891c391779757a475"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_deepfirst_shapelist.if_type_a_is_list_.else_.return._": {"doc_hash": "f99389ea0ce0356f386044ffba060852fdbf8a4717c1434b4257b1d4941243a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_itertools_empty": {"doc_hash": "57c2e89a98b06c7d8cbbf2ebe462347acaeb7f9ffa2319e516909a3404755f9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tri_tri.return.m": {"doc_hash": "bfbf265be57f4575517cec12ee7db012273cbeff3b45aeab12228b4965e41dfe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_fromfunction_fromfunction.return.res": {"doc_hash": "b7c9dd44f659fc6b3b40ae2eabe083d0b6520e3ae1d0b7ca8cd036883b145131"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/dispatch.py__": {"doc_hash": "c12c304c4a12aff74f878b6f926e8429eccfd7cd1ddbf1a0105d4fc315a2517d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.for_dim_sizes_in_dimsize_apply_gufunc.leaf_arrs._": {"doc_hash": "a28535baf59ba0068444e733c4788f262c55ae6853cc2bb32ab0404b99f29aee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.for_i_ocd_oax_meta_i_apply_gufunc._Undo_from_above": {"doc_hash": "c4c187af3a493ac3a311688202b3932f7be900acc0efe89c9440cb6e461c221d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_from_functools_import_wra_masked_outside.return.x_map_blocks_np_ma_masked": {"doc_hash": "ee9801931ea86a56e2fe597e2d3d14b35216d04c0c8dd5b99c55cef82d5a03ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_warnings__overlap_internal_chunks.return.chunks": {"doc_hash": "6c222e808b7105b8e795b71c9a0ac285de67bd7730f8413aa758033c16e6b573"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_internal_trim_overlap.return.trim_internal_x_axes_axe": {"doc_hash": "6c20c133916106ecbd125b7415c8a71b4d7cf1e7cbde05bd9c4545668cc7e345"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_trim_internal_trim_internal.return.map_blocks_": {"doc_hash": "1f839a625a2dc079382b5c81432ed67a1b11b9de09679ccfdc8c525c30dcc732"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.beta_RandomState.with_contextlib_suppress_.choice.return.Array_graph_name_chunks": {"doc_hash": "8bb0184efeab947876b26f860481905c92775bdadbae4835e6043aeb4c5c8cdd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__tensordot__tensordot.return.x": {"doc_hash": "2db8d139016a591c544b30d310dbbd60eb3cff65eac1977c205d9392ea33931f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_digitize__searchsorted_block.return.res_np_newaxis_": {"doc_hash": "3c715398662ac256c3c618acf8b0bbdab3b8be8408e6f14b64dfdc1544db9657"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_searchsorted_searchsorted.return.out": {"doc_hash": "e91142138ab8281bc366c757bb642f694ce000f5a1fb6359aec408e26d13c8db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__TODO_dask_linspace_doe__block_hist.return.np_histogram_x_bins_ran": {"doc_hash": "683c90a2483e1c4bf2efa30f4aafb4ffcd7fb12d2ba84ce94e653367b6549c2d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__block_histogramdd_rect__block_histogramdd_rect.return.np_histogramdd_sample_bi": {"doc_hash": "711f3816dc78935257d22cc53937dbe859cb180777494b1e012915abb48adc1b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__block_histogramdd_multiarg_histogramdd": {"doc_hash": "35be6a398cd33c337b1e2451f32d45777f1781022444fbd170034d91cd40452b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd._logic_used_in_numpy_his_histogramdd._sequence_of_pairs_": {"doc_hash": "adb34481bdc4e91cc35f3ed5d7611fd002e8b15a62468fb3d80907b010256df0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd.if_range_is_not_None__histogramdd.n.mapped_sum_axis_0_": {"doc_hash": "ec156afab18dbc08c90472904cfdff1d80a6e64f70faece9bb8d5053298891e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd.if_density__histogramdd.return.n_asarray_entry_for_en": {"doc_hash": "2f75f844d93e2533e682b45399c03997111bb9d61fca4956f3f7450cef0a414d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ravel_multi_index_ravel_multi_index.return.index_stack_map_blocks_": {"doc_hash": "4351c883b418dff2cbc7aaa6c44bd75ed24231f6711f84a1ce92f7b33e2e2d42"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__int_piecewise__select.return.np_select_condlist_choic": {"doc_hash": "02f76a18c63cb1d05fc59d4557219d72156358859dba4c0a52972e747b17b4bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_select_select.return.blockwise_": {"doc_hash": "a3d1d6f15d6479fd2f8b30b4d53d3df694732daae0b03eb2d7599eb38f97ff58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_unknown_chunksizes_test_block_simple_column_wise.assert_eq_expected_resul": {"doc_hash": "0bf051e148582ad1bea04ccdf101a3bf1e52a2f58dd7487e27a2f0f4599bd1ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_with_1d_arrays_row_wise_test_block_nested.assert_eq_expected_resul": {"doc_hash": "8f95517815a032bbc29dfcc65ac4fccd95d9795f96dadf8c9921a3bb21eb7218"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_args_validation_test__validate_normalize_axes_01.assert_o_0_": {"doc_hash": "2706fb3a4ca585c089686ed08f3eedca661f7ee7b9ceae8def40686299e07999"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_many_outputs_test_apply_gufunc_output_dtypes_string_many_outputs.assert_min_compute_shap": {"doc_hash": "29e88261bfc9fda8828a53df3f8cbed683ddcfd5c467acb330e0a93b0076efe9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_mixed_inputs_test_gufunc_mixed_inputs_vectorize.assert_eq_x_np_full_8_": {"doc_hash": "004f605773f31ac7ec0379021314e65f43cbb9e8fcef1f77c0e47b63203b42e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_test_as_gufunc.assert_valy_shape_10_": {"doc_hash": "baa93f1f5001e54f46813d4a9f25693504ac2bc9e61c89f11fe35adc07f1f582"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_broadcasting_loopdims_test_apply_gufunc_broadcasting_loopdims.assert_z_compute_shape_": {"doc_hash": "29f95813e385c84658a211bb42446ea4e41815fa4dd229698212c33a500dd143"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_pytest_test_overlap_internal.assert_same_keys_overlap_": {"doc_hash": "1a6219a538c41286ff2fab7d1f1a291fd49c5667758a2c9ba651fb96d53eef35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_contextlib_test_array.assert_isinstance_y_da_A": {"doc_hash": "69ac0c0d71a6b5a2dbd1029ff97835f268d04832743798a3efc79db76de6089a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_searchsorted_test_searchsorted.assert_eq_out_np_searchs": {"doc_hash": "c1287aeca59f2ef41141e93c44736be68c1b291dd44a5caa5d2e3ab8920cb1af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_searchsorted_sorter_not_implemented_test_histogram.assert_same_keys_da_histo": {"doc_hash": "bc6f55b42bbe9fa8dee8c2d2044a7e1977597879776a20c306fbac35bc85814d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_seq_of_arrays_test_histogramdd_seq_of_arrays.assert_eq_a1_a3_": {"doc_hash": "26e906ab370f57eb9fad94e789e04f74c83bc2f8c2809a67a323dca60e30f6e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_weighted_test_histogramdd_weighted.None_3": {"doc_hash": "2b61fb5b030595a80a645d27a23e64bab90633d7bf335ee3c3373adca4c535a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_weighted_density_test_histogramdd_weighted_density.assert_eq_a1_a3_": {"doc_hash": "72ac220d907ec114a8817423df0577133f6b13a828353c57d4b8441d823b2592"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raises_incompat_sample_chunks_test_histogramdd_raises_incompat_multiarg_chunks.with_pytest_raises_.da_histogramdd_x_y_z_": {"doc_hash": "d5c6bdd9fe68b7b54f1cfa8f7ac7e2f3de8d8b92e95bf81a7cc0f58b15086561"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raises_incompat_weight_chunks_test_histogramdd_raises_incompat_weight_chunks.None_1.da_histogramdd_z_bins_3": {"doc_hash": "5e6ad026c6e21f0a65e6431720c27ac4dccaea772a0746aaa3a1e27290b115c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raise_normed_and_density_test_histogramdd_raise_incompat_shape.None_1.da_histogramdd_data_bins": {"doc_hash": "b6ee5964d6422bafcdaf82c73cda09cb476297a940423d700c31092685074e8e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_edges_test_histogramdd_edges.None_1.assert_eq_ib1_ib2_": {"doc_hash": "7b1d96e5c49e5606aec5377b9e844d8f26c66d1bfe81134b64faab5224e56f90"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_select_test_select.assert_eq_np_select_condi": {"doc_hash": "49edd60edc73fee3af3faff326f734388eda40a2c8c6ea5359ca593757b83d25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_select_multidimension_test_select_multidimension.assert_eq_res_y_res_x_": {"doc_hash": "97f74199d175656054bbe5e788eabc97d5c7bd974c956bf240c9c8843160134f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_select_return_dtype_test_select_broadcasting.assert_eq_np_select_True": {"doc_hash": "0daf29c3f35f05fbd44c99acdcf770c94fb458d254982461125dd9e3c0e1b368"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_unknown_shape_test_ravel_multi_index_unknown_shape.assert_eq_": {"doc_hash": "0342cb589c8ad42e2679b00419849a0a214409f3951859948c495b8ed42abd21"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_unknown_shape_fails_test_ravel_multi_index_unknown_shape_fails.with_pytest_raises_ValueE.da_ravel_multi_index_mult": {"doc_hash": "6293ef1e3515bd73d0bc87d0f4fa94ae22e3054b2149a2c2cb5a94b893779d45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_delayed_dims_test_ravel_multi_index_non_int_dtype.with_pytest_raises_TypeEr.da_ravel_multi_index_": {"doc_hash": "03bbe4166e390340a3f92a0515b304779dd491419f50fb0709b97ee82fea5b28"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_divmod_test_divmod.None_7": {"doc_hash": "2f457847ab2f49ecfa28e537c2e41e1a38637796bf4efe8f73e4c394d43954f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__dask_distributed_pack___Blockwise.__dask_distributed_pack__._All_blockwise_tasks_wil": {"doc_hash": "ae8a9de48934188f9d0380de69e1ec5eb2126de7bd5dbfec3d688cb8f02cc5d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__dask_distributed_pack__.global_dependencies_s_Blockwise.__dask_distributed_pack__.return._": {"doc_hash": "cde1f27174cad2da349db6554a8017fadbca14840ede7554e108871dd291b7a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py__nonempty_series__nonempty_series.return.out": {"doc_hash": "a7fbdbe9b340ad1adca3c9ffc375624b31813e97fb8be827b2b345942289d63f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_union_categoricals_pandas_hash_object_pandas.return.pd_util_hash_pandas_objec": {"doc_hash": "5c210dd1ab0be54f16bf43cd20059c2cf5376b3bc63e1ce0dfa69e3a0c0fee9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_ShuffleGroupResult_ShuffleGroupResult.__sizeof__.return.total_size": {"doc_hash": "13cdcc00846609480ae56291ed8344d64d146bc4ccf18cd0cd80445edbaac300"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_group_split_pandas_group_split_pandas.return.ShuffleGroupResult_zip_ra": {"doc_hash": "be362f70a209d6cc84f4adadb1636ff63e6159af5f624422214a297c07411710"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_concat_pandas_concat_pandas._Concatenate_the_partiti": {"doc_hash": "79602fcb58c282da08ddaeb51f2a0bb50ad8eb44a2e0cce5574817bf0bd389ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_concat_pandas.if__concat_pandas.return.out": {"doc_hash": "58540020ec2eeb4948b12d16c9e76286db952f3081fdba4aa79349fecc5ca553"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.add_prefix__Frame.any.return.self__reduction_agg_": {"doc_hash": "35c4c6cfec2229f402e2a2d385c894a9f43e0c21eff80b9bd106cd011e76d1b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/dispatch.py___union_categoricals_dispatch.Dispatch_union_categoric": {"doc_hash": "ee5d387375d75ad2a6bd859dcfa9f2187bc7518f170d4b771c72dd98bc4c1131"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/dispatch.py_concat_tolist.return.func_obj_": {"doc_hash": "74edf14f93c2fa6483e51eb355e4231e7c540243d042e57f6bfe79e97cdf476c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/dispatch.py_make_meta_": {"doc_hash": "4d40f8a66a4b0771d0bd8f2ebfd99651fc30dab92132c4019434433ed0d7a729"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVFunctionWrapper_CSVFunctionWrapper.__init__.self.kwargs.kwargs": {"doc_hash": "ed25cd68cf10283398466115d0b856b5d6b731e963e987b24e8db8d9c1fb41be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVFunctionWrapper.project_columns_CSVFunctionWrapper.project_columns.return.CSVFunctionWrapper_": {"doc_hash": "6a3b53e409d61ee2816c21e25178e65bc9a49a19229d1d1fe13df740619d0939"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_np_make._": {"doc_hash": "5d414050a7ee9959edb3112404f32b4e2be155ae3e784c1ddc7a7d36ee291646"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_HDFFunctionWrapper_HDFFunctionWrapper.__call__.return.result": {"doc_hash": "3a2f4301a3b5977d6479efdfe429a67d7eed6709ef5fa9b7f12981b2569ced69"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf.None_7_read_hdf.return.new_dd_object_graph_name": {"doc_hash": "395efdac65398b3f026591009f3ee95503846b8323a66930d1a6d9579afdab30"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__build_parts__build_parts.return.parts_global_divisions_o": {"doc_hash": "607e616093c6dc752cd55be122e6a0af131392569c271a62ed1d8e00ca63e7c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__one_path_one_key__one_path_one_key.return._": {"doc_hash": "380800324f676705a2e91d1e9dbdb78b8195613bf3b59078e720e6253467cc7f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__expand_key__expand_key.return.keys": {"doc_hash": "e8e04dccf7231058ae008ebd899888a7ee997400f1d19e52a26793b3e0f6f92e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__get_keys_stops_divisions_": {"doc_hash": "5705a9006d8d22d4b89ccd8cf395088d1a180cdba1d70d0d14499118d5808c50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_json_del__pa_version": {"doc_hash": "99172fb323e60c56aefda52ac0d6d28c593bf061dec54d97f445ed67846df37f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._get_dataset_offset_ArrowDatasetEngine._get_dataset_offset.return.fmd_i_offset_append": {"doc_hash": "612d92b5eae0b68619b26b3831a618ccd77903c9e391a22cba2d69af60cc2177"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.initialize_write_ArrowDatasetEngine.initialize_write._Inspect_the_intial_meta": {"doc_hash": "5881addeaaf071ca80134e65e0375bd9321f416256379b3696c85db413974510"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.initialize_write.if_append__ArrowDatasetEngine.initialize_write.return.fmd_schema_i_offset": {"doc_hash": "1f7035e4bb2fb53b19341235fc0eeb8b4121c3cfa554ede9483f30671f261328"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._get_dataset_offset_": {"doc_hash": "fb3d992c25b3cbb38b818b58bde5c2e4445ec61b233df4fa644eaa1361d2baa7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__read_sql_chunk__read_sql_chunk.if_len_df_0_.else_.return.df_astype_meta_dtypes_to_": {"doc_hash": "5cd61a60fff573643c1345408b33efe3d229c3f8ace99a1ed7540a7cbec9068f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_raises_on_no_files_test_csv_with_integer_names.with_filetext_alice_1_nb.assert_list_df_columns_": {"doc_hash": "2f0d10a8a10aff5f22703d74cf65bc1e4f1055c8cc11ea0c0b4494d1db3a192d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_line_ending_test_to_csv_line_ending.assert_raw_in_expected": {"doc_hash": "3add9e8f49b6e48571ebf2d759f83ad42e06c2a5fa6f124f404d5db868bcba58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_block_mask_test_reading_empty_csv_files_with_path.with_tmpdir_as_tdir_.assert_eq_result_df_che": {"doc_hash": "f0eca8a48404bbdbf477eaedc3a578362e09e17f5dd3443e918064a1c09a6d05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_groupby_get_group_test_csv_getitem_column_order.assert_eq_df1_columns_d": {"doc_hash": "3072aacec956e68f78b6c8b498200f636525670e8709d90d2fab3f25a2f5c91c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_nodes_test_to_hdf_multiple_nodes.None_4.assert_subgraph_columns_": {"doc_hash": "22f95db391376adad9405440e6fce272e767aee88e4afe621ad0a82e37c1faa4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_test_roundtrip.if_.pytest_xfail_reason_fast": {"doc_hash": "625b2730cb84ed3d96e4e359e40e579c789fd1a4faff510834c5a3e0ebdef036"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip.None_2_test_roundtrip.oe.write_kwargs_pop_object_": {"doc_hash": "5a418a01e3120a1863a423c9e894f87671170e93d2baa822a68f30041f8dbd0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip.if_oe_and_engine_fast_test_roundtrip.if_str_ddf2_dtypes_get_x.else_.assert_eq_ddf_ddf2_": {"doc_hash": "dd043cbf4dea47cf8876a8f0c4f9912ff969db3f00120cad783380a93b84ec56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_inconsistent_schema_pyarrow_test_read_inconsistent_schema_pyarrow.None_3": {"doc_hash": "accffd0f5b68fab95c43cd2ec21ae4b06e8d3c54aebaa6fbb2c9afa9c1af2b79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_blockwise_parquet_test_optimize_blockwise_parquet.None_2": {"doc_hash": "ce6ba6bd8946f18ec4ed22d6db72649caf9f2e3a3ad5aa31b8bae9c60a9c15b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_single_column_test_single_column.with_tmpfile_as_f_.assert_eq_test_data_pd_d": {"doc_hash": "1aa1ca0bb9ac5f780331b552fff412ac65322c740adbe37f220d14665b82891b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_broadcast_join.if_lhs_npartitions_rhs__broadcast_join.return.new_dd_object_graph_name": {"doc_hash": "74ff25f5c00be7e8eaeb4f4b7d5e59217cb231c608459a7a75b73124c957833d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__recursive_pairwise_outer_join_": {"doc_hash": "0040c3923a29caf404a50dfea06b51acaf2dda1e4702dd6920fb1ab56d319c81"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_with_empty_test_merge_asof_with_empty.None_2": {"doc_hash": "2f48393faa60084449cd508599cdcd61d81682b2840102d9808ea61a26d73c38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_large_to_small_test_merge_tasks_large_to_small.assert_eq_": {"doc_hash": "71d0579f8948c25c65b230c23fa65a9e3d0815d75aadabf989e12df49a3f829e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_pairwise_rejects_unsupported_join_types_test_pairwise_rejects_unsupported_join_types.e_match_merge_multi_only": {"doc_hash": "60ea8b2bbfc3c327041da98db62432feaa3ce968d6c004e26fb58e9e8f713608"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_pairwise_merge_results_in_identical_output_df_": {"doc_hash": "545c28bd8bf5a9360b75c4a0f01e26298490126fb084c5bb724ca80bcd6a1cf1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_np_shifted_sum.return.df_a_b_c": {"doc_hash": "552922784130fc917696b0a2ff5174492ebc2275644bc08e85c47483f691e4e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_uint64index_test_meta_nonempty_scalar.assert_meta_pd_Timesta": {"doc_hash": "ff011475b96060b401f605f6f8f523d1449bb7ae1d589d2afd694405e570537e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_raise_on_meta_error_test_raise_on_meta_error.None_1.else_.assert_False_should_hav": {"doc_hash": "eeb3ef45f3d7c23d02876f9ee1ff26774889bafe443361e007d132e6f6d34a1f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__empty_series_is_index_like.return.dask_is_index_like_s_": {"doc_hash": "b2d3954857649b8ce9e4ecb12db85d1edd0b3e45cf1f81d7e627a285a9da0025"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_works_under_error_test_profiler_works_under_error.assert_len_prof_results_": {"doc_hash": "97fb7f37615e24b5ed11122782a05e958ad83bce48788a58c37191f412c06959"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_two_gets_test_two_gets.assert_len_prof_results_": {"doc_hash": "a0d27502008351fbf433d3965a200ebd8e75d28e042e2e7b7842a3ab81e8aca9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer_Layer.get_output_keys._this_implementation_wil": {"doc_hash": "0d8940052d57cf99df7cfdda6113f297ba80544f141f083bbf4f77042647ad51"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_MaterializedLayer_MaterializedLayer.get_output_keys.return.self_keys_": {"doc_hash": "f63f8538e35359aa2b463c87217e84f701f07692c703f8cfb33d395c2f030f48"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ArrayOverlapLayer_ArrayOverlapLayer.get_output_keys._FIXME_this_implementat": {"doc_hash": "65a1722e7c87e712a6266af9c6c5e2ec0113929bf27c9fce20188bdd512a6204"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ArrayOverlapLayer._dask_keys_ArrayOverlapLayer._dask_keys.return.result": {"doc_hash": "dca3b9f91bebfa71c079d94b8005dbb4bf8d5d9b6aa5eb38fe6eb6fabf2ad226"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ArrayOverlapLayer._construct_graph_ArrayOverlapLayer.__dask_distributed_unpack__.return.cls_state__construct_g": {"doc_hash": "e847e9920e2b4459ac33632262c8d200a00a4e23674941cd4b3cc4c6c4b8e440"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py__expand_keys_around_center__expand_keys_around_center.return.result": {"doc_hash": "f64be1187bbf7d0fa04f8caf73c4d895f16d2ae42cbe2b4db26e4cd4be0832fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_reshapelist_fractional_slice.if_all_ind_slice_None_.else_.return._operator_getitem_rounde": {"doc_hash": "7331078a9c12acfdee100afd7a3c5b107f50e9fea0e6672c6977eeb604ad0ec0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer._keys_to_parts_SimpleShuffleLayer._cull_dependencies.return.deps": {"doc_hash": "29cd9eba739886fa970b310f8e3bf4bae7fe7b9701006b6f679ed76e24eba341"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer._cull_SimpleShuffleLayer.cull.if_parts_out_set_self_.else_.return.self_culled_deps": {"doc_hash": "5641c33df98d42baed1dc4f3216c6c94a5d8d9b9f259104d581816d0482472b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_itertools_sizeof_array.return.o_itemsize_len_o_": {"doc_hash": "0f9edc52ac984124943becb6bb281d0d72541e7fa8c7ccc4355a879e783152f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_sizeof_python_collection_sizeof_python_collection.if_num_items_num_sample.else_.return.getsizeof_seq_sum_map_": {"doc_hash": "fd0aab12f1444b334fc792fc88d0dd76071b14c571a30c13cb49273269c6cb75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_SimpleSizeof_register_numpy.sizeof_numpy_ndarray.return.int_x_nbytes_": {"doc_hash": "ca6bc21240d64c914cfff9d2a29b8e683c9bcfe02995edbb1c0a01e2eabf5564"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_test_visualize.with_tmpdir_as_d_.x_visualize_filename_None": {"doc_hash": "9560380864899b0b52f0cabd879f855a161e59769e47ab77398f335d6a27fd4b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_order_test_visualize_order.with_tmpfile_extension_d.assert_color_in_text": {"doc_hash": "4cb54f40b266d78d3b971f03e539a174aebbac874d7ab428415538df266f5fdf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_fusion_after_compute_test_blockwise_fusion_after_compute.assert_df_x_result_comput": {"doc_hash": "08250babd72408c6812e37e1a6b77bab6f8f626037306055ad3f102f9f81b8c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_different_optimization_test_blockwise_different_optimization.np_testing_assert_equal_y": {"doc_hash": "b7da902727ceb3f1a793d09a90b4faabce82ca2416016663e824d8fe68abca9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_shuffle_priority_test_shuffle_priority.assert_late_split_early": {"doc_hash": "d4948c3255a832310401a52aa11d612f6c4fce0a27cefd294a8a475ba55fc043"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_map_partitions_da_input_test_map_partitions_da_input.await_c_compute_df_map_pa": {"doc_hash": "23cbcf40d255ab2274e63c281053261ea8f7be4f4a6b0edece276b40b34a9a0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_map_partitions_df_input_test_map_partitions_df_input.with_distributed_LocalClu.with_distributed_Client_c.main_": {"doc_hash": "dc4bae32150f6ae7613b9e2b5aaf370c231b4f35f044fb22ec11450efbb6673e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_repr_html_hlg_layers_test_repr_html_hlg_layers.for_layer_in_hg_layers_va.assert_xml_etree_ElementT": {"doc_hash": "95be4ae05b57c245a809065aa374440f54174b220a9e7c7a4ccdf8a0de73891c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__dataframe_broadcast_join__array_map_overlap.return.array_map_overlap_lambda_": {"doc_hash": "b3f5066b8ff6a735f8274fbbba88daa77712c8ba07e78e9afdf7cf3d366739af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_fractional_slice_test_fractional_slice.assert_isinstance_fs_1_1": {"doc_hash": "c6529a52a877988ecb70ad580d61545667b21f164aec02b5bc6c14a96aa9327b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_itertools_double.return.x_2": {"doc_hash": "4d68a52939343b898de750c9bf5a5a7b4ccf2e800caf0a2812e002fc69d8d055"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_cull_with_deps.return.dsk_k_get_dependencies": {"doc_hash": "f49dec20b6d6443e1a4003542fdf5a9c00bf2187f10955a33c9d6c45f06de829"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_SubgraphCallable_eq_test_SubgraphCallable_eq.assert_unnamed1_unname": {"doc_hash": "34ccc8d92c309af67aa8ae5cdf016ea6928d3dde96b7608803c0ded21f82b511"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_stringify_collection_keys_test_format_bytes.assert_format_bytes_int_n": {"doc_hash": "5f1c996273b8241e88db9886b64e4477f8ad1ddb85dfbd332fb28e964fc996de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_deprecated_": {"doc_hash": "26768e511e8a6e87e9c132049c668e421172a1e3dde2c97730aacc063e971425"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__deprecated__deprecated._Decorator_to_mark_a_fu": {"doc_hash": "51cf434eb880b1a39f3acb68bd253e1e54e17a86b54d3aa02d1e32bccd137108"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__deprecated.decorator__deprecated.return.decorator": {"doc_hash": "c2f9a403dbb898d0945156194b320a105a34432da1a25008fde37f47c8ebcbd5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_np_percentile.return._percentile_a_q_method_": {"doc_hash": "fda63caf283ceaa0b0fd344813bc5c2722315330a3311fcd7cfef09c690d554d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_aggregate_slice_with_int_dask_array_aggregate.return.chunk_outputs_": {"doc_hash": "2d9eaea891f0e914f638686c930ef9d119b008bb2388dba7a17d0335fdb3a32d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_getitem_": {"doc_hash": "033b9ca97787f790b767f276c36bd153fa18183cc0bb3ad5307388097a7f08b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from___future___import_an_PerformanceWarning._A_warning_given_when_b": {"doc_hash": "728d7530267e35d58c6ae12afcfe83393e359375d901bfe3e062d839d71b3318"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__concatenate2__concatenate2.if_isinstance_arrays_0_.else_.return.concatenate_arrays_axis_": {"doc_hash": "12db953241a0b684d7857dc3bf7b4c31b5fa5cb425e57adc11bd2c4b1fa1b9bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_not_callable_func__map_blocks.extra_names._": {"doc_hash": "bd969cd7d26f5fe490dbe5945d3f2dc474f2b91266ee61de3b3d2442dfe4544f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks._If_func_has_block_id_as_map_blocks._objects_and_prepare_to_": {"doc_hash": "7be75304534d65c045235144af4d74a20ef4bf0610da6813e9e7af8886d70c21"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store_store.if_len_sources_1_and_l.regions_len_sources_": {"doc_hash": "86709ce0c733ab97b7bd4d5efde5fa350f5b6e3598e04da83d969388d0858b99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store.if_len_sources_len_re_store.for_s_t_n_r_in_zip_sou.map_keys_map_layer_key": {"doc_hash": "fa184a4555c288230f0a728239cd79728dd00c7fe8c2d047fa91bf53e30680c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store.if_return_stored__store.if_return_stored_.else_.return.Delayed_key_store_dsk_": {"doc_hash": "fda5b9fd2d21fefab79094f09f0d5a1f9ed5cc4d8fe103624582be215f58e4a6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.shape_Array.__len__.return.int_sum_self_chunks_0_": {"doc_hash": "10ea2c7ae1cb4008e8349242dcef053f4f2f69ba5ab12e2ae6e702d7451339e8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__repr___Array.__repr__.return._": {"doc_hash": "8651101406deac5318106fa4e5b43a7399853e03b518d86f3a733caf11023766"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._repr_html__Array._repr_html_.return.get_template_array_html_": {"doc_hash": "88a1d57486e63fce9ca27b4bbd81373b1ec426d33c584a3a1fa42b17adf0b032"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.blocks_Array.blocks.return.BlockView_self_": {"doc_hash": "0a7e1330768f620e356f51fcbbcf659b210cfb0db4faa76aa76cea057e67ec03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asarray_asarray._Convert_the_input_to_a": {"doc_hash": "1f5c10f3902db8a91b2bef5c8fb0e967b57e1b9eac09f888241edfb0e9595e41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asarray.if_like_is_None__asarray.return.from_array_a_getitem_get": {"doc_hash": "922326805d16849735f36a9d93c0a3333a9bde0f096cb9c071f65a8bd12cfb8e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_elemwise_elemwise.expr_inds.tuple_range_out_ndim_": {"doc_hash": "fd59dd5e798677d5ba76bc8ddc9261a847a1d503cc9e2e1e431cd3c546f98b34"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_elemwise.if_dtype_is_not_None__elemwise.return.handle_out_out_result_": {"doc_hash": "9d914e389d8c3d3df7f2b16dfdf862322fd99aa158054274f0ccc69bbdc3d8c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__elemwise_normalize_where__elemwise_normalize_out.return.out": {"doc_hash": "93fb9ba0640de7810a1ff3701339803f3e8d9758fa0201189ce2e2d5d75daa85"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_new_da_object_new_da_object.if_is_dataframe_like_meta.else_.return.Array_dsk_name_name_chu": {"doc_hash": "48444102c3eebde8a921bc97ff75489bd539f81cfb66b044e748b46f55167547"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_BlockView_BlockView.__init__.self._array.array": {"doc_hash": "009320727bdb1f136ce6b540204a19787752e852a524d02037446dd0cdf74a63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_BlockView.__getitem___BlockView.__getitem__.return.Array_graph_name_chunks": {"doc_hash": "5c6861750ea77384665a3c6875af6d0c0371b64efa7e3107224fc4e2b1d6fa05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_BlockView.__eq___": {"doc_hash": "ca33bc6850f00ba7eb5e6f24e3391025fe7514f3b9eebb6acaaf79e194b4d133"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_re__SIGNATURE.f__INPUT_ARGUMENTS__": {"doc_hash": "b00c52e98b3307c36d2679586445d94ed00d152b7ed34dcdf9d4724151526bb4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc._Input_processing__apply_gufunc._Cast_all_input_arrays_": {"doc_hash": "8d2cc6fcc0e56bd973b82900aee2e3b05d682d642147a737a52113c7657b89db"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.args_apply_gufunc._Assert_correct_partit": {"doc_hash": "d905e7bab18187fa2a747af9130b78a61be8877892fe7c23e381c807b3825d0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser.walk__Recurser.walk.for_i_xi_in_enumerate_x_.yield_from_self_walk_xi_": {"doc_hash": "f14116e0f30f4110083441f30acdd7692ff1aac63808854fd52fe3a19821b4b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_warnings__percentiles_from_tdigest.return.np_array_t_quantile_qs_": {"doc_hash": "d730b902250343f9f987fee9c218d4a3ec8abda04b518aecef524183f5cc45e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles_merge_percentiles.sort_order.np_argsort_combined_vals_": {"doc_hash": "45a929505dd4a12b24581849ea7bd1c692da7f32a44e7ff25b830fa52daa7c05"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles.combined_vals_9_": {"doc_hash": "d8141c517b4aa815c21dc6814ee57983bc06836fda14ec3da1184ff2290a2466"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmin_nanmin.return.reduction_": {"doc_hash": "44b12f80928bdeb18b45bc167957d6d4d1e7266deb33dadd54e51713c13bbd9d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__nanmin_skip__nanmax_skip.if_x_chunk_size_0_.else_.return.asarray_safe_": {"doc_hash": "5139c9330c675ab14589934124f6646a90c99118abbe8650d23470edc6468a32"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_numel_numel.return.np_full_like_x_prod_sha": {"doc_hash": "3fd54db98fb822a22ad62ade0a11931bf8878f55c03109050892cd1fcfe4be7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_agg_mean_agg.with_np_errstate_divide_.return.divide_total_n_dtype_dt": {"doc_hash": "49a74b040e7a425d78d45ac7eb2193f9afc42000598184ec4e0714611e9665c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_warnings_reshape_rechunk.return.tuple_result_inchunks_t": {"doc_hash": "b54d5ef78094e18e06ceb668977574a720a4465333cfd8e301cd984b8c0dcc39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_reshape_reshape.name._reshape_tokenize_x_": {"doc_hash": "be6172707ba6496259058a0d0f112c16010431cdfe452fbe4e21435513d1e5f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_reshape.if_x_npartitions_1__": {"doc_hash": "1fda55722b68468ecaf03d51dc3afed858f191da845bfd810423bbb864ca750b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_2d_vstack.return.concatenate_tup_axis_0_": {"doc_hash": "603cba25758d41827afb1d20043c6ea8fded7291bd56793527cc5f4c21afdbd1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_hstack_dstack.return.concatenate_tup_axis_2_": {"doc_hash": "47e28174686a937470a0b893c48535508554d5e70125fd7885798660856003a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_dot__chunk_sum.if_keepdims_.else_.return.out_squeeze_axis_0_": {"doc_hash": "2a33d0e121a817d37dc978f7d3c4c7913defa8af01fcb341e62991efe1be8d53"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__sum_wo_cat__matmul.return.chunk_xp_newaxis_": {"doc_hash": "adbc1862c87fbc4d98099ac0340f82fd4c27dfc48ab303fb3f442fcaad441ca2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_over_axes_ptp.return.a_max_axis_axis_a_min_": {"doc_hash": "7c3f9f352effce132dcbd545c954e2a836387df9bc225fd03eb4b0d1d9a951e9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_diff_diff.return.r": {"doc_hash": "fb913af914e701eebbcaae95a14526f1e134824dad3d70243390be9e99fad534"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram2d_histogram2d.return.counts_edges_0_edges_1": {"doc_hash": "66012524b913df858da045c5abd56ef06af08e85a193da39bf3cb05cc61ee2eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique_no_structured_arr_unique_no_structured_arr.return.result": {"doc_hash": "b496d6327f062c701944069c97ab49efe134be5556fde4715617880d10b09adb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique_unique.None_7.result_append_out_indice": {"doc_hash": "a5a7d3e940ff3d179ca4a12b862b9d6df00fe484f407cb46337054c8939f3f09"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique.None_8_unique.return.result": {"doc_hash": "af60002b8ebf3338cb2c2775e60008971bb600656dda5db99fd64d5483a6eb52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take_take.if_math_isnan_other_numel.else_.warnsize.maxsize_5": {"doc_hash": "f4f2afd6ef80051b26995e30ffd40a0c6d2a01c1cd4112204c7f1b2ed2c94374"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take.split_take.return.tuple_chunks2_dsk": {"doc_hash": "7efb880216db62d894aaccf5eea444d2f5a879bde60928fa27d54c7e821990e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_check_index_check_index.if_np_isnan_dimension_.elif_ind_dimension_or_.raise_IndexError_": {"doc_hash": "f42663e3795bd6562b564468b48f7383592bd13b75e568b402e68e36b8cd2535"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.for_i_a_b_j_in_enume_setitem_array.out_name._out_name_": {"doc_hash": "b39106331a69402f2f5e8e2db718f2c6473d9a89860cfaf6432ba44f5cca016d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_test_map_blocks.None_2": {"doc_hash": "ac6220ad8757d839aff2b80dfd20bc345b277abd69af8473bd4cf352755fc462"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks2_test_map_blocks_block_info_with_broadcast.expected2._": {"doc_hash": "efe052079aae77de36502ec150f83ef9c8431149c63b828dad982fe981e76762"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_broadcast.expected_test_map_blocks_unique_name_new_axis.assert_x_name_y_name": {"doc_hash": "2c8e538359d5c27edff517ff75c02d7d556ec104d5df9997ff8ef6c0e27c8a08"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_optimize_blockwise_test_store.None_2": {"doc_hash": "58193d0267ef67b69c71843721bfa6a9a9c7bf69868a3b8f60ec61101f8d6ffb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_regions_CounterLock.release.return.self_lock_release_args_": {"doc_hash": "701cf3f0adbed6d5af05abf15e02968d46cec798e1f850bd242a80198879e2ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_locks_test_astype.assert_d_astype_f8_is_": {"doc_hash": "aef4a100513f336444509f1431e49b4c184a4e41e4f57ca18ac71dbc0fbcbfcc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate3_2_test_align_chunks_to_previous_chunks.None_5": {"doc_hash": "c21edafe1f05da64614993287927ed84b75a131bfabbfa72cf88ecdc42402091"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_raise_on_no_chunks_test_point_slicing.assert_same_keys_result_": {"doc_hash": "78511de1d3be3eb3a245785b24e937f7819e376d923143a5344d16b8a9c2dc8b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_changed_dimension_test_to_delayed.assert_a_compute_s": {"doc_hash": "5dc22f00ba63be6341f5384fd1d1ca13307befcaf3cf9600b7c26dbedbd58d27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_delayed_optimize_graph_test_concatenate_axes.None_1._too_many_axes": {"doc_hash": "201e6fa384372fc46eaa06000ff833c22434944227a2e2c8171ea8c5ad3ca2c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_concatenate_test_index_array_with_array_1d.with_pytest_raises_ValueE.dx_dy_5_": {"doc_hash": "4ae6bb484ee33c61f37b2bd27718cbfaa681605fd320b772ce4471c763ccaf8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_2d_test_setitem_extended_API_2d.assert_eq_x_dx_compute_": {"doc_hash": "b49b51ae0c259b4cb43f3b6a0a21b530e7e8eee5b21a917a11bf69da7e9ae887"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_setitem_extended_API_2d_rhs_func_of_lhs_test_setitem_errs.None_22.dx_0_0": {"doc_hash": "95561ebde8aafb4c43af29cade8e395be4470c2d34740e3ec144996c3459a30b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_constructors_chunks_dict_test_pandas_from_dask_array.if_PANDAS_GT_130_and_not_.else_.assert_eq_s_values_a_": {"doc_hash": "f08d4a255c2ea821ca5cf8c6d9ca4c5ecaa7b76b816593d894efe38a0a25c656"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_stack_functions_require_sequence_of_arrays_test_array_function_fft.assert_eq_res_y_res_x_": {"doc_hash": "b1dc77378bf34242fe5f9b2b1168dd302837e6fa6ac3d10d8dce9cab7b29ce0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_notimpl_function_dask_test_array_function_sparse.assert_eq_func_x_func_y": {"doc_hash": "fe28643c0dda0372c34b8c6c72d387477ec97be05b764802d131a64a8ba4535f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_non_existent_func_test_non_existent_func.with_pytest_warns_.assert_list_np_sort_x_": {"doc_hash": "a404b7c640e12a0ef713d31b79cd69343156783a162c494baee420fd5373937d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_binary_function_type_precedence_test_binary_function_type_precedence.assert_": {"doc_hash": "6cfc89fa4ccf02a22bbcbd895681685f061c7095c7ec072f44e592464b6dee25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_like_raises_": {"doc_hash": "49229b850e8f5d0143de118427759744f2b5ed90e0645f000b14134ab2094e9e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_np_kroncompare_examples_test_tile_np_kroncompare_examples.assert_eq_np_tile_x_reps": {"doc_hash": "13ad2fec7f60d2c9d054398e438ffd06f12f6f72fc6c0e1908b82aa6afa52624"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_np_functions._": {"doc_hash": "5e660fe15c2dc01a71242bd81491ff4d001fae4917e6aa090429a9672fb41ffe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_basic_test_basic.if_next_iter_ddc_dask_key.else_.assert_eq_ddc_ddn_check": {"doc_hash": "c229c3682ca6d8ed472b0bdb7c449beb4e41e1887f4cce0364a1e4b574a6e2cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_sizeof_test_asanyarray.if_isinstance_like_np_nd.else_.assert_type_a_is_type_li": {"doc_hash": "410c67ea0c4bdd8500e282159b633c560d02f68d8405b23175b74bb18387a913"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_vindex_test_vindex.assert_eq_res_np_res_cp_": {"doc_hash": "a7da3e6486e4d0f9b343b6e3063ebd43495dfa91f39dd09e804643c108e98fb6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_view_test_view.None_1.d_view_i4_order_asdf_": {"doc_hash": "ae8ce3c0f91c55f67eea0c7ce8975fecdbc3111640d482e532aa349c7ff83b7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_view_fortran_test_getter.assert_eq_result_np_aran": {"doc_hash": "b9f877a46a938329b47618f48872c2e4908ea73c1338c7e7cfe1201364053759"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_store_kwargs_test_store_kwargs.None_5": {"doc_hash": "75f6b6f2d85d5dff14b980c83b01232177908f4b7e46eda8a28f8faa64c23a8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_2d_test_setitem_extended_API_2d.assert_eq_x_dx_compute_": {"doc_hash": "0b4de91ef7d0b376a52bf3aa0e3ab2ce6eef1a391546d9604f8567c5f6e8e443"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_2d_rhs_func_of_lhs_test_setitem_extended_API_2d_rhs_func_of_lhs.None_13": {"doc_hash": "22687d51cef2904fd1bbc08c0ba9576ea232c24b52128823c869a4186f562e73"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_2d_rhs_func_of_lhs.index_18_test_setitem_on_read_only_blocks.assert_eq_dx_0_0_88_0_": {"doc_hash": "4245ecbb3945c813b6cd78989703cea6fc1ba033bcb8b4be7a1cdf1ec7ef1319"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_errs_test_setitem_errs.dx_6.da_from_array_x_chunks_": {"doc_hash": "b539f06c7d9d89208db4c7d85ac9b7090fb4f91130e5e837b1d6352c268019c8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_array_like_": {"doc_hash": "87d8f7cae27b0dbdb8ebf13ea89c1584699db9d05bb6010b66824f3fecaee15c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_np_test_diag.assert_eq_da_diag_dx_cu": {"doc_hash": "8a193e7aaa7bc8d3f531f35b2016f5689667e79c40523e44536ad22de434b83f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_test_diagonal_test_diagonal.None_11": {"doc_hash": "43fbd5f0ca92c2950bbcad11ca88e252f16f5944ca53f236c84e013757dfc747"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_test_pad_test_pad.if_mode_empty_.else_.assert_eq_np_r_da_r_che": {"doc_hash": "4f3f09a0196ce16a0626d144c975b879fe9b1bbc7622d8a35041dd7e8158737e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_test_tri_like_": {"doc_hash": "7544d9ee6b910c70fc7a56a3e1a780f6fc3fb3f690f30e2a1f02eb5169337371"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_gufunc.py_np_": {"doc_hash": "4b2c810a2aa33ba9d576c1c7c04158ae6c49d641b61f1d83344e89ed520b290c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_np_test_tsqr._full_matrix_returned": {"doc_hash": "34927338f6b6ae454262f5cb763621995a68f6eec0b9ec2e5a4e83114671ae12"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr.if_error_type_is_None__test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr": {"doc_hash": "ddae43f41778643039143d7b5fff76b26a546be0153b9a85a40ecb1718ed3110"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr_uncertain_test_tsqr_uncertain.if_vary_rows_.m.mat_shape_0_": {"doc_hash": "9c8268e450f20103eadc65908a736c7845d7413b324e61e005b607ceca87be62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr_uncertain.if_vary_cols__test_tsqr_uncertain._full_matrix_returned": {"doc_hash": "d4d6800cc05cbbc1cc7b5c8baab2d0694081b62a2dae43b9ec7f9228fac014f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr": {"doc_hash": "8ded8a3cfc21ac18dc376cedc8e77171295f60d398ff5b4a92037f2036f24d2e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_da_linalg_sfqr_dat": {"doc_hash": "ac0011c9f35861ec79609cbc7ec3a5b486aab58ffd358c3d7a03507a7760e148"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_lstsq_test_lstsq.None_6": {"doc_hash": "6c067a435d3188237ca5b36ebe8573efb2025e98c5e2e3b9004f12e2cb2c3358"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py__get_symmat_": {"doc_hash": "a587169d80b2fbaa843c134a7671a2844b80e7129c41e217c431cbbc31e4d05d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_np_test_overlap_internal.assert_same_keys_da_overl": {"doc_hash": "a701a3414db63e008c9a54b0631310721c125070a38950384b028602a6981e11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_": {"doc_hash": "1161cde5b07bae94b7885e2cbd167abc9952da0ce527accecc71c2e65a3fcd9c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_reflect_test_reflect.None_1": {"doc_hash": "0956f8df691405d7b455cad2246fd8ae184ff0cb1c1adb6fc927d1389e62cede"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_nearest_test_nearest.None_1": {"doc_hash": "a4cd66aa3312b070be0b312b794ae87ee76b70e026d5ab2385a71958a49f9d14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_constant_test_constant.assert_eq_e_1_np_on": {"doc_hash": "dff4397aa62ff0c169599814ee1c05f3f4696570e4020b41fe6a18b428e8b2be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_boundaries_": {"doc_hash": "36ef7b8447b4f1b5595921586265a8fb4c5abba24691b55051207c5670d1685f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_np_test_percentile.assert_not_same_keys_": {"doc_hash": "6b4bb330d47e88dfd20ba8249b8a7228cce43ba3f3699dd37dcd5c7d55f4b9b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentile_tokenize_test_percentile_tokenize.assert_same_keys_da_perce": {"doc_hash": "fab51a2c8822094f6a54115f22d26c2576cefb434f5a8a3d56fe33bb99397f9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_empty_arrays_test_percentiles_with_empty_arrays.assert_eq_result_np_arra": {"doc_hash": "48486622ec1472ee7ef04c7a3328836b5764c1a481c9bf30642bd721bbd81428"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_empty_q_test_percentiles_with_empty_q.assert_eq_result_np_arra": {"doc_hash": "dd60832567a03683559c15a2fd475de8ff6f3b0447ba3e08b865888efa49f66c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_scaler_percentile_test_percentiles_with_scaler_percentile.assert_eq_result_np_arra": {"doc_hash": "43c86f5878ffa6ca42c9af046d8d1281e148ab617e0fa31430fa8a7579b7a5d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_unknown_chunk_sizes_": {"doc_hash": "b91cb6ba01e8361505b75def64afd5c4dd600231bc49d51927a1edac2e7a24eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_random.py_pytest_test_random_all.rnd_test_rs_standard_t_2": {"doc_hash": "e013aeea3f0902b4cfa44a43a27601cbf0fd97221f2518c1725ca4ba0043caa4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_random.py_test_random_shapes_": {"doc_hash": "17248db838c488bb3e68e8705fcdccd56bdd818acda3648f59d34f97f179197c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_reductions.py_test_nanarg_reductions_": {"doc_hash": "18fe2214baf54a564f8105281c32b3f26c078ff6bb303e986deeba18ea360c63"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_np_test_bincount.None_2": {"doc_hash": "5cd1ecc361daa76f1ed6d782625f8cb15976f653638e3f9c023e66caf6576e1e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_compress_test_compress.assert_eq_np_compress_c_t": {"doc_hash": "a8b66bf1f4d905fa7940a403e1b7b7ba3210fe377f72d3af73e4643183f59b96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_diff_test_diff.assert_eq_da_diff_a_n_a": {"doc_hash": "6fc81da693f15ab2d28611b3f1e6cd0cba60521486b886423c566441edbd4317"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_diff_prepend_test_diff_prepend.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_prepend_cup": {"doc_hash": "2823f277c7c13dbcb59309452d12ee432f01dcbe21262be4c13672ccf0cd38dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_diff_append_test_diff_append.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_append_cupy": {"doc_hash": "ca67d983d239bf3aa6ffbbf23c5b2224c1de0e83cb18a035a74cd67ca6ec34ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_digitize_test_digitize.for_chunks_in_10_10_.for_right_in_False_True.assert_eq_": {"doc_hash": "4bba89536e074ad948fdbb3ec4f2c5eb2278651b295be87492d73109b2359b8f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_25_20_9_.assert_eq_da_tril_dA_k_": {"doc_hash": "12aeeddd7f61fcf0db632cabf1b12c44e4399ae8274cdd6ea5a99dbf892b49bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_tril_triu_non_square_arrays_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np": {"doc_hash": "ca7e28bde9c223dabcac80da0713031190b34123f87d9ab1e71c7a07df29710b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_unique_kwargs_test_unique_kwargs.if_any_kwargs_values_.with_pytest_raises_ValueE._test_unique_kwargs_": {"doc_hash": "3dcccaf4e476b21fe10a103f71fcd5b3741ba5762302a16384f87b3b118f45b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_unique_rand_": {"doc_hash": "793c9229e0895abba82bc29f1702e65790985f428c0db638ed09103c9f2e6cb4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_np_test_index_with_int_dask_array.assert_eq_x_T_idx_ex": {"doc_hash": "77e7e41fbdfc6caf032f67f112307b1f2c754754e766d48b8321c94b9694607b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_0d_test_index_with_int_dask_array_0d.None_3": {"doc_hash": "9c5d36381f51c2b5afd04f645e78fbf38a3c9c56a6d9ef90d015ab717c1eb02a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_nanchunks_test_index_with_int_dask_array_nanchunks.None_1": {"doc_hash": "9b8323cc61c88524f4acb0e498db421095d773cd85717bd5bbe1f577a10ce551"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_negindex_test_index_with_int_dask_array_negindex.None_1": {"doc_hash": "cec7bc180f0c5abbecb0a535b0caead68d072972776b12480a7e02967e7d8484"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_indexerror_test_index_with_int_dask_array_indexerror.None_3.a_idx_compute_": {"doc_hash": "5dd49814a9c74145ed1894c97a9fb9833c4a46330d1c1de342749e4b39872874"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_dtypes_test_index_with_int_dask_array_dtypes.None_1": {"doc_hash": "7715f383d4cdf584763d8fc1f1a77f4bc7cdcbcf9bae0dfd50eba2de325b90ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_nocompute_": {"doc_hash": "1a8f1bc5aca6cca7749d872ff660b432e89048b0aea23779c357ba09969b45eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_sparse.py_pytest_test_sparse_hstack_vstack_csr.assert_eq_x_y_todense_": {"doc_hash": "59f6500bb9fe7660227b4b6cc38fff8e6d2ce25bf251ed714be3cda36b08a24a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_sparse.py_test_sparse_concatenate_test_sparse_concatenate.assert_z_toarray_z_": {"doc_hash": "0d6af1568cb8ebae6ba8c44821a580c4d85ef8f0ef8c7a91e402b465b071259e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_sparse.py_test_sparse_dot_": {"doc_hash": "0f8e57620a6e03e2b4674a392826eacb2ad6913621e230d34c634585ff55f5ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_vectorize_whitespace_test_gufunc_vectorize_whitespace.gufoo_a_": {"doc_hash": "ee469ef356844a7082d7153ab628cd57bee79a2b27d6309dd93369de28a0c1f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_sys_test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com": {"doc_hash": "8fbd4ad9e0f4b48bf2916544e0469aa4139994f86af16deef7932196336539a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_roots_annotations_test_fuse_roots_annotations.assert_eq_za_z_": {"doc_hash": "ea90571748b11c5f2e5e99cb1d7d3cc2d1a14838ee4576b0405a979c1c54f1bc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_blockwise_duplicate_dependency_": {"doc_hash": "25d1fe25aa5025c497c70a2a88b0792c21d4869ea44c5428dc317c6681f2976c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_different_depths_test_map_overlap_multiarray_uneven_numblocks_exception.with_pytest_raises_ValueE.da_map_overlap_": {"doc_hash": "19d113131d54fa94ed80b6c1747a8ef1bb80bb7a4335357ffbf3f68de3a8a2f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_trim_using_drop_axis_and_different_depths_test_map_overlap_trim_using_drop_axis_and_different_depths.assert_array_almost_equal": {"doc_hash": "b1fcc54871eaca08c8565c2e627c73fb0ffd821d2c5329e472b7de997ef1d301"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_pytest_test_percentile.if_internal_method_td.assert_eq_": {"doc_hash": "1f499bf38b2cc6757a76060cf9f1cc8d838e433a261c4bda6f08ae97104cfbec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentiles_with_empty_arrays_test_percentiles_with_empty_q.assert_eq_": {"doc_hash": "cbf1271b19291107a020876a1db12cd60372463268aaac9af116cf84b5449126"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentiles_with_scaler_percentile_test_percentiles_with_scaler_percentile.assert_eq_": {"doc_hash": "052b64d0671817e5bbbdfa941a0c055fe6acd0d3f9adf05c28ddf49a944746a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_0d_test_test_reductions_0D.None_15": {"doc_hash": "289c29ac744161db16760d7628e8be170238479b8ee6363d988176a8dad5ceef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_object_test_nan_object.with_warnings_catch_warni.assert_eq_np_array_getatt": {"doc_hash": "b633432bb5f5a0abc0065cb05e71f6b4c5a6c7778b9f8f19116a2ca1e38243c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_median_does_not_rechunk_if_whole_axis_in_one_chunk_test_object_reduction.assert_result_1": {"doc_hash": "ad7693b5fbd5ed2f1ce0f818ea9c2a53384f37ad86d688122ddfb43fd7b53c17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_empty_chunk_nanmin_nanmax_test_empty_chunk_nanmin_nanmax.assert_eq_getattr_da_fun": {"doc_hash": "4005a3effbdda3ebeef673f2e6e0e3075d22ba6bf187628dafcbe3f76b43ce01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_empty_chunk_nanmin_nanmax_raise_test_empty_chunk_nanmin_nanmax_raise.assert_str_err_np_value_": {"doc_hash": "7a35eb379dc4ed7ac70a36562a20787dc69da381cb98406eef29f52d2584b9c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_prepend_test_diff_prepend.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_prepend_np_": {"doc_hash": "cd2b507549cdca025442104715487359c34237c13191f5eb1ce9cf15c3023278"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_append_test_diff_append.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_append_np_z": {"doc_hash": "70bd823bb6be89c161045b8dd312bbc7a2ba6718226b6487ed9c5e2f84b85f35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_negative_order_test_ediff1d.assert_eq_da_ediff1d_a_t": {"doc_hash": "0677e0cc845e5d955bf453cc0d16ebdef0666dc38d2d697756c8b53e635fb633"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_bins_test_histogram_delayed_n_bins_raises_with_density.with_pytest_raises_.da_histogram_data_bins_d": {"doc_hash": "47fd625cafd780e260260d0457d829b1d31ad24904f205999f5538a8a72f11c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram2d_test_histogram2d.assert_a1_compute_shape": {"doc_hash": "048403fad94aebf359fe28fb9e4bf60b4476ed81809f9a7fb28c824f034c2109"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram2d_array_bins_test_histogram2d_array_bins.assert_a1_compute_shape": {"doc_hash": "2debee356dce13fab82215a39265023b953424fc77cac946221a3640fb53a38d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_test_histogramdd.assert_a1_compute_shape": {"doc_hash": "5717ecf35056651638c769a2921b5e36b1471f1eca7af43c2d818a7b4240ab0b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_roll_test_roll.if__maybe_len_shift__.else_.assert_eq_np_roll_x_shif": {"doc_hash": "72b004d222337c16760bfafea5386bd1ae59f80e348353d19df21ffa02ae97b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_itertools_getitem": {"doc_hash": "0e6992bd301060c7884be550a9a6bda0569794954b7705fe5e3f32738a8e8c7c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_random_if_sparse_.pytest_importorskip_numb": {"doc_hash": "4439c06df0f33e3fe5b8e623c2f6c16eee179b498bbbb4a8df061a76b76fd81e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_functions_functions._": {"doc_hash": "fbd5568297fe435dfe1ab0b3bce8c51e430bef54f86de617784395cd5cc57475"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_metadata_test_metadata.None_13": {"doc_hash": "ab5e4a59c53df2810a4a21eb3094984005655a1c83f2a93ca88fc31c2e548243"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_dtype_kwarg_test_dtype_kwarg.None_1": {"doc_hash": "5906a7757b929aa6cc69493a6bcf0f42548172cef5cac3a63a1d08a3e9fdc089"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_test_ufunc_where.assert_eq_expected_resul": {"doc_hash": "fa82c9b0e0903398310b9a84de36341b123ff4beb62a64edef005b27202ef0c4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_broadcasts_test_ufunc_where_broadcasts.assert_eq_expected_resul": {"doc_hash": "f2d73722ccf941052771ed2bf70eaf3b5501b4ff1e30512e618e8dfd79d97bdd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_no_out_test_ufunc_where_no_out.assert_not_np_equal_resul": {"doc_hash": "e7671958c3d99403524564ecc60fb4b28cd99ffce09037f2c3d007c40cc77885"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_doesnt_mutate_out_": {"doc_hash": "23af0a5f5d0b366fca57a8ecc9f564f44c801cdd6ba2e6097236cb5115e867fe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_contextlib_meta_from_array.return.meta": {"doc_hash": "3f9cd50e6f5bfd1c17e654c50e4433abe2b31c4d4e807f1f6dc8784cbd00d641"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_scipy_linalg_safe_solve_triangular_safe.return.scipy_linalg_safe_solve_": {"doc_hash": "1a0eb49e38ad342aa8b3c47989e7956409172df02220fcdec2a18fc04a2a9411"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py___getattr___": {"doc_hash": "4233a877d4121a3150388791c9ca63de40cdb5de4f38f6775c554a960a0e978a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap__broadcast_trick_inner.return.np_broadcast_to_func_meta": {"doc_hash": "1c1fa6750ccf73273fd006703277f62d44ea99bbc9f0e365f502e7b8ff11c4a7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_broadcast_trick_broadcast_trick.return.inner": {"doc_hash": "8600ee4f3cb8814cda97e4140ed02878f6440407f002ea44d7170e224b71ccff"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_ones__full.__doc__._full___doc___replace_": {"doc_hash": "a30ef02861b18b9d1f011317e182fcec44627fe6c42bb6e0bbbc287678c5af52"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_full_": {"doc_hash": "6543c1c4865c9a99c4f78b7a57792c23d1b6d782aa50ae799a78d0989b3c3a09"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_task_lazify_task.if_not_start_and_head_in_.else_.return._head_tuple_lazify_ta": {"doc_hash": "b362654d4a01a6c182b2f639a439ef2704ec1f8bf767253c89ba9d61244860fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item.to_delayed_Item.to_delayed.return.Delayed_self_key_dsk_la": {"doc_hash": "3c8ed474dba6ad5900cc98782686b2c493ece15032b1ba7aec1d18a1b6a3cd1b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_dataframe_Bag.to_dataframe.return.dd_DataFrame_dsk_dfs_nam": {"doc_hash": "192a46f1563766440bf94632a29f60af994db8e8cf87a2e1dba45a06ec500719"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_delayed_Bag.to_delayed.return._Delayed_k_dsk_layer_la": {"doc_hash": "8b9db0f340f8be50600baf8bab4dc9f71bb515ed69531bc8bd0870a27bb42ed2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map_bag_map.npartitions_7.npartitions_pop_": {"doc_hash": "1d174a7591b9901b2e6da3e3bf87f61a28a063a57f073e63a42e446aa0ea3bf9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_with_scheduler_func_test_args.assert_c_npartitions_d": {"doc_hash": "99789154f0e46e66a278bb326509a2d1aca416b77915c14860fecd98983de665"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_class_extend_test_bag_compute_forward_kwargs.x_compute_bogus_keyword_1": {"doc_hash": "ed30ebfd6a8be8127db1cd6569f989fe7c220130445228bdf796872b0ffc1043"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.None_13": {"doc_hash": "36481bf67437e9628577f81f680d4eca98b82574af7031cbb75e3938e7e27641"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_dask_layers_to_delayed_test_dask_layers_to_delayed.with_pytest_raises_ValueE.db_Item_arr_dask_arr_na": {"doc_hash": "9e20b968e3ef402af76dc7caa30d8d2bdd23eaa67d6e682ce6ddceb05964ae98"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_dataframe_optimize_graph_": {"doc_hash": "1caf27b6e7b0dcde68ca9e39e72f3a80017db1b7f16e7c95cfb755cc78da4e2a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize_visualize._": {"doc_hash": "e56f9c567d2e0333ffd4aef8339915c2712e6949b4ae86817a0949b741a730de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize.from_dask_dot_import_dot__visualize.return.dot_graph_dsk_filename_f": {"doc_hash": "89eefd4616b98b705060c8a98252693c3ad79832157c668e570966e1cb95909c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist_persist.keys_postpersists_": {"doc_hash": "89f30f01a5fc92fad6880f44f7cff923ea22c1b379a192b8545741057dc274f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist.for_a_in_collections__persist.return.repack_results2_": {"doc_hash": "e1fa8416269da31396ce14305b5c06778932b5d7cd26180514cde7d9a1970abe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py___normalize_set.return.normalize_token_sorted_s_": {"doc_hash": "4fbe432f5aa84567faa4f5d0ff7ff5df0498ed30ffd6fbbddc5df5abc183c450"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_function_cache_normalize_function.try_.except_TypeError_not_.return._normalize_function_func_": {"doc_hash": "662c28f9003e05310053054c14aabdb58a50f580076bb11a837a3586f47ce1cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__init___Blockwise.__repr__.return.f_Blockwise_self_indices": {"doc_hash": "ada005140566612bec944346cb0b55efe863c365bdfcd2d749247bf47174e440"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__unique_dep_rewrite_blockwise.changed.True": {"doc_hash": "a24c98893d2d5034e35e151d6c57f9599689c4db414583b636725396f0f56df8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise.while_changed__rewrite_blockwise.sub_12._blockwise_token_k_bloc": {"doc_hash": "bddcd06ba67dded1b7a7981861161b495c7750c177739ca933d50a73a43dc021"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise.dsk_13_rewrite_blockwise.return.Blockwise_": {"doc_hash": "7cac9cd5fa534da1878a40d9f303b71eee89c6adbaf59a7dfb02a4406a1eb0ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_simple_test_loc.with_open_os_path_join_di.with_f_as_f_.assert_f_loc_4": {"doc_hash": "f4de8a116ea585a81eb436cb6b960e6985792bb0ccf665e1094385d3b64bc180"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_fetch_range_with_headers_test_fetch_range_with_headers.with_open_os_path_join_di.assert_data_expected_r": {"doc_hash": "c924a81518b7e9bc94f0e0dbfd7e44096c774d0825eebbca9b7ea5564aee75c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_test_ops.with_open_os_path_join_di.with_f_as_f_.assert_f_read_expect": {"doc_hash": "6b6fee7f71bec363024ae2f79224671a214324a5875b790f1638f9a0398ea17f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_blocksize_test_ops_blocksize.with_open_os_path_join_di.None_1.if_parse_version_fsspec__.else_.assert_f_read_10_expe": {"doc_hash": "aa02986e3168c0254e63945ff44d14ef76bc5b78925818335b52e7c2a3dfe6c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_pickability_of_lazy_files_test_pickability_of_lazy_files.with_filetexts_files_mod.for_f_f2_in_zip_myfiles_.with_f_as_f_open_f2_as_f.assert_f_open_read_f": {"doc_hash": "c637114091c65c7c8f50a10850feeb9ef855b12456da461dc6708f8048ff84f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_py2_local_bytes_": {"doc_hash": "1c081845ef0a0332123e41316246db1e366325873f1e88920d4e63dac1c1a43c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_base_s3_base.with_ensure_safe_environm.try_.except_subprocess_Timeout.if_sys_platform_win32.subprocess_call_f_TASKKIL": {"doc_hash": "995ed744238cce9628ababe82e20474690da35dde243682eca525fed758fb033"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_append_test_parquet_append.dd_utils_assert_eq_": {"doc_hash": "80c1b6421953d8e94c24a7f7079e6180e2849b8af7b0012afc44eaa9ce8e5357"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_ensure_file_ensure_file.try_.except_OSError_.pass": {"doc_hash": "3c656d5535291c5c068b8c200ef9aa25dee5071bee133ffebc4e8be9c212c015"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_expand_environment_variables_expand_environment_variables.if_isinstance_config_Map.else_.return.config": {"doc_hash": "3bda687a6953b2d65d002a4e1ab4b26a222e223e433920d5d2039f7d32f55c5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_deprecations_deprecations._": {"doc_hash": "a72cb4f02543dfc5e8102361265fa44e6e39f37b06135689f260c5b4aee4b1e2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_make_meta_object_make_meta_object.raise_TypeError_f_Don_t_k": {"doc_hash": "d5b8e5282fb080a059cba4b214e2f3b154a743d2fb0168ee6d8f42a139123937"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_meta_nonempty_object_meta_nonempty_dataframe.return.res": {"doc_hash": "27cfde73777a621f5e1cb10c3a0ef07b352ffcd38915cade97f4f84f983af686"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py__nonempty_index__nonempty_index.raise_TypeError_f_Don_t_k": {"doc_hash": "dd21b8670f3216ff281973a9ce08c92390bfc53b2ca4fa1bb649882313d5b1bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_categorical_dtype_pandas_percentile.return._percentile_a_q_interpo": {"doc_hash": "85c61508ebd0fdc3014b69166a53491a5cba73ca5086645fbdf8d168afdce5a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_None_3_": {"doc_hash": "d3d86f8cb4d950e9d4a20da9ce8f0bec429cacfc6ad77d1fd7608409963196af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_operator_pd_set_option_compute_us": {"doc_hash": "a9d8d2db3b37a396001da40f80a342f6155d1d19d8b51405b25b56c37b55d5be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__numeric_only__numeric_only.return.wrapper": {"doc_hash": "7ab18c6aa15b1cce858557b0f94a5a5614dbf25a814e453b5540b5acd72589a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar.to_delayed_Scalar.to_delayed.return.Delayed_self_key_dsk_la": {"doc_hash": "f4ad9a0dc06ba59c83a9ffbeed94de14ccc3355be3ece2afbaaad553b86ec5cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame__Frame._constructor.return.new_dd_object": {"doc_hash": "44a9ed5f222f38c5401b6c27d5bc1c2ae1d057cf5a5c59e3fc8b327518df7a6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.divisions__Frame.divisions.return.self__divisions": {"doc_hash": "bab19206658694b948ac04a5cc2f689c259284180426618836764e1911902000"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.divisions_9__Frame.divisions_9.self._divisions.value": {"doc_hash": "0b9f5c503b94533e42a7df6af71ab226cd2f4d4a1085ec4b245668dd776c3d56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.npartitions__Frame.__setstate__.self_dask_self__name_se": {"doc_hash": "759535d0a1dff69988a24e88088e83499805d718a9366dcd007ebb3012c5dcfe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.copy__Frame.__array_wrap__.raise_NotImplementedError": {"doc_hash": "20ce4a942430fd5984fed56763df78897f525f3d51e0131acce375e7f3390d58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__array_ufunc____Frame.__array_ufunc__.if_method___call___.else_.return.NotImplemented": {"doc_hash": "3bf199d1c4837a1759294d07bfbaf0829cca7f967aa73dcd5037adb39c5cbf15"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.index__Frame.index_27.self._meta.result__meta": {"doc_hash": "09ac1b042383c2d3a060220c2db146f19487f5ca6a82c8120f618b0a6530cd80"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.autocorr_Series.view.return.self_map_partitions_M_vie": {"doc_hash": "9a8818c31581e8feddd548ead0d276f4cd07c6dcce98dcfd0908c28e73d2e515"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.to_series_Index.to_frame.return.self_map_partitions_": {"doc_hash": "99470632ad33a47b377cf18d59121da455f65e94e384d3ff3fd21288d4a9ceb8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame_DataFrame.columns_3.self.dask.renamed_dask": {"doc_hash": "2dcf816637fd816e8a4736284af8412890d547351639215988056003612662fb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.assign_DataFrame.assign.return.data": {"doc_hash": "a36ec4bec5d625c53a1cd74a4564d4b51298e8c76242e4905426016c0118b917"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.eval_DataFrame.to_string.return.self__repr_data_to_stri": {"doc_hash": "8bf05cc5aba53bb37e7c20bb53a20d308468c27111f6d558639cb5aac9657527"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._get_numeric_data_DataFrame._validate_axis.return._None_0_index_0_co": {"doc_hash": "8733cfffa5b176d03f42811c613f2c380b1d731da12fc9ac3b5e9a39441c15a3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.applymap_DataFrame.nunique.if_axis_1_.else_.return.Series_graph_name_self_": {"doc_hash": "6e20680d7d3ed162066c581e8e6d2fd94557a1e52644687322cd769b5625a482"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.mode_DataFrame.corr.return.cov_corr_self_min_period": {"doc_hash": "3cc61f27ab3e1f20f8bab085bb9c43c17c3e02ec549103bd8515a10e11372b68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.to_records_DataFrame._repr_html_.return.get_template_dataframe_h": {"doc_hash": "64a87f65e9f3380f438ba7f73b4574ae497d4911cd04f1ac1518427decf4fe69"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_to_datetime_to_datetime.return.map_partitions_pd_to_date": {"doc_hash": "b52ab9c169fea5dd812bdbce40377c07c9630b189a012845e79cdac2764774d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_to_timedelta_has_parallel_type.return.get_parallel_type_x_is_n": {"doc_hash": "e8b6d3ea664886f8e381137ad2e0e9249b4052f9effc821b466110977440a5ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_collections__determine_levels.if_isinstance_by_tuple_.else_.return.0": {"doc_hash": "786d27b0132d2fad53c43c14590f03c4d9b5a1eb6a27f332efb73ecaa12b8dcd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_by__normalize_by.if_not_isinstance_df_Dat.else_.return.by": {"doc_hash": "b118738fe2d26dee7ef6e9a807b253101cb631f47341a3ea55e9ededa19e26ec"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_shift__groupby_slice_shift.return.g_shift_kwargs_": {"doc_hash": "efd84cf83e12574c1cf90e0fca7d46b7e2d061dcddbd12f06bc3d330b8c40680"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_aggregate__apply_chunk.if_is_series_like_df_or_.else_.return.func_g_columns_kwargs": {"doc_hash": "77b7ffa5f7f81672cada7cb82aee2016ec020a2732ca6e8e7e2815fd9b821cfd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__mul_cols__mul_cols.return._df": {"doc_hash": "d41f263ec5891509695f80d00d0e3994e559f684889d07ba41b9910fe218951e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_chunk__cov_chunk.return._x_mul_n_col_mapping_": {"doc_hash": "69eee1e961947f44ce964d4f979379dec48c1f9d194fba820dde062c8bb4c79e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__nunique_df_combine__make_agg_id.return.f_func_s_column_s_to": {"doc_hash": "2ca7745034e8747af8c53836a92d5e454340ec90850be976468437d7f153e2bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_single__build_agg_args_single.if_func_in_simple_impl_ke.else_.raise_ValueError_f_unknow": {"doc_hash": "dcc814263939178b007c28b1209c706cc17b6358e7d9fcc9ddc66ef92a88bf99"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_apply_funcs__groupby_apply_funcs.if_is_dataframe_like_df_.else_.return.df_head_0_to_frame___c": {"doc_hash": "c170b00e751eda914c8a66eafda906d0f194d3e6abedd3659ef4664c10365efe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.shift__GroupBy.shift.return.result": {"doc_hash": "a01300e5aaac91b0b0e4e76520fb46cc4196b19933cdf67b9a527bf454072a22"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.rolling__GroupBy.rolling.return.RollingGroupby_": {"doc_hash": "dfa8a6555dc1ebf87e570c62cbd2645d96fc666fe00cba191c78c5fc5c479932"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.tail_SeriesGroupBy.tail.return.self__aca_agg_": {"doc_hash": "798a1b5b6f825eecd76ad8b6f6fae0536a4e92474f190913415889c1e43e2a68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.head_SeriesGroupBy.head.return.self__aca_agg_": {"doc_hash": "8e8f5a28d5873537c967892bf69db962de010c16ff9c06625ac832cf48672e24"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_os_clear_known_categories": {"doc_hash": "edd27510c23c012b8e63d7d9ff57af155167795e2a4213755a788987cd2dcb75"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas.if_blocksize_and_sample_a_read_pandas.try_.except_pd_errors_ParserEr.raise": {"doc_hash": "ee2b3b94702196782d2ec6e6a729988fcab246f73c9b03b17a0076c61b5bfccd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas.None_20_read_pandas.return.text_blocks_to_pandas_": {"doc_hash": "85e3bc845344080a82df3e8b9dab0e0a7f2bbcc22156da0c598c4036baefc089"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_make_reader__write_csv.return.os_path_normpath_fil_path": {"doc_hash": "96488749ecb8878071a2f66ae02ecf290a8908195829afa5574b6bed75033606"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_": {"doc_hash": "4129715ebc6c09aaab13059b620dd53c1c77db0e106bdc33776493b6e957b8a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array.for_i_chunk_ind_in_en__link.return.None": {"doc_hash": "86a6d10c279dfb6117018a603645a60b559d9b35ea9c4beef02f5b9806bf9730"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py__df_to_bag__df_to_bag.if_isinstance_df_pd_Data.elif_isinstance_df_pd_Se.if_format_tuple_.elif_format_dict_.return.df_to_frame_to_dict_ori": {"doc_hash": "da4e97a9bfeffdc4063d00e2c0b213102b44e5d0a738968a7ca4bd031fd937bb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_io_write_json_partition.return.os_path_normpath_openfile": {"doc_hash": "bc259fdee0d15d38fbb2803f6308ba3f533e5113ea181a829496becf7cc914cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/__init__.py__": {"doc_hash": "2a6dfd3498503b82f2f3e186e7327ac758ba3746f9146341b0be458b2a00cf1f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py_pa_ArrowORCEngine.read_metadata.return.parts_schema_meta": {"doc_hash": "af35d3965823130e9afbc35197202d699ea6554554f8e6daf336070890d02335"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py_ArrowORCEngine._aggregate_files_ArrowORCEngine._aggregate_files.if_aggregate_files_is_Tru.else_.return.parts": {"doc_hash": "d28b426ad004d98cfda8368f6c706c6615d5790c5f1398f1ff6218ac54c40aef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py_ArrowORCEngine.read_partition_ArrowORCEngine.write_partition.with_fs_open_fs_sep_join_.orc_write_table_table_f_": {"doc_hash": "a92dc1cc15802ac1f2999ec9ce38e81d82491e6dbb985eeb6b35618a956a303e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py__read_orc_stripes_": {"doc_hash": "fe9a0faf500e54025fdc36e7373204ef037c1facfbb76ac35298afcf83eadfa4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py_copy_ORCFunctionWrapper.__call__.return._df": {"doc_hash": "51f3a73750f7a2b680b7a75d7bef4d0a2c1f9dc554f75ad1bcfbaa6e88247ae0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py__get_engine__get_engine.return.engine": {"doc_hash": "0f82b44ea85e6fa4b1b05238518c272d1f0cc58b884b502bdf7ee4cf7b86148d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py_read_orc_read_orc.return.new_dd_object_graph_outp": {"doc_hash": "1b1e2a7a60658c709462fae4d7a09deccbe0b6e223334345d2789800d35cf5cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py_to_orc_": {"doc_hash": "32ed3d3319e426c1eefe22ab8b332cd0373a1eb27b94a2fc0aae2da5bbacfda2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/utils.py__": {"doc_hash": "f48ab33bf285792528736932b2fb8fd209579a41b177b711de057aa20a4d5b4b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__frag_subset__get_pandas_metadata.if_has_pandas_metadata_.else_.return._": {"doc_hash": "e3505fa29b25efd29ee207767d5eff02b7ca5d9c02f4473e382742e3e138402b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_None_4_ArrowDatasetEngine.multi_support.return.cls_ArrowDatasetEngine": {"doc_hash": "dc8f78c46507a263c778e803260cc94998c246f9314c52c46089de86b85d68d3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info_ArrowDatasetEngine._collect_dataset_info.has_metadata_file.False": {"doc_hash": "188d4f3140dda7057a5d632c99bc1aa4a5640cb98afe52014a687f9d1e734f47"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info.return_ArrowDatasetEngine._collect_dataset_info.return._": {"doc_hash": "c2b6439b7eed74e9b350ced0ccb335006d839d8d1fe62a6757ba5b378321a367"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_file_parts_ArrowDatasetEngine._collect_file_parts.cmax_last._": {"doc_hash": "39f525f6a153c2161ffec5a8058a634607d70c8c7f5f7df94682d4b4f4c03242"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_file_parts.for_file_frag_in_file_fra_ArrowDatasetEngine._collect_file_parts.return._row_groups_to_parts_": {"doc_hash": "7de4763d76d7b40c22e596d54c059cb4e5b0ffb586c18c0040a035a619cf9a50"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._make_part_ArrowDatasetEngine._make_part.return._piece_full_path_rg_": {"doc_hash": "8b9a8371a6b38164e24268d34ba870c9a4d68b85ff1c936bc6e5029eaf6c515e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._read_table_ArrowDatasetEngine._read_table.return.arrow_table": {"doc_hash": "05217953e5c511aa6260aad28500774e024631daebccfea17c8074312868edd8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._arrow_table_to_pandas_ArrowDatasetEngine.aggregate_metadata.if_out_path_.else_.return.meta": {"doc_hash": "458fcdc92e35698a492ebc210391f8a38dc77fae9b132e3eee2e6d7801d637f6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine_ArrowLegacyEngine._collect_dataset_info.return._": {"doc_hash": "e5ff45b8535afe23142d8a7297fbfd4a5698323e93deaea38fea96c3f8d2d2cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._construct_collection_plan_ArrowLegacyEngine._construct_collection_plan.return.cls__construct_parts_": {"doc_hash": "c738a120b30b426ea9263a063546fd03f42bdb73e52fa071503e58e6ab33dda2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._gather_metadata_ArrowLegacyEngine._gather_metadata.metadata.None": {"doc_hash": "bd7b31ee3316e11023bb67901f92e1a2bd4555b1e70da74ea8d80811b70f83d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._construct_parts_ArrowLegacyEngine._construct_parts.return.cls__process_metadata_": {"doc_hash": "6cce008f1e8446685d7dcf8a53168558efc92755213758aa2abfa2eaa371300e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._update_metadata_options_ArrowLegacyEngine._update_metadata_options.return._": {"doc_hash": "0333541b61426c9b32bc762cbaa65765f4786b76672c7fae7df4bc670af789ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._organize_row_groups_ArrowLegacyEngine._organize_row_groups.cmax_last._": {"doc_hash": "982cedca9693c0bb4504e5bb3e6ef97a7774e9cde5365e3a7de86e38d02518e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._organize_row_groups.for_rg_in_sorted_row_grou_ArrowLegacyEngine._organize_row_groups.return._": {"doc_hash": "df7f6c81e89d22ea1b984a6e0b493651919bb851b5ab860885a74262e66e0768"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._read_table_ArrowLegacyEngine.multi_support.return.cls_ArrowLegacyEngine": {"doc_hash": "b538cd788be3d2f29eb6d3e92f95950ae610356af721a44c7058de108564f9cb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.__read_parquet._": {"doc_hash": "6860de7cfde1171b64d5e7aed80801a7d46474bc64979c78aa4de3637e838bb7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.if_read_from_paths_in_k_read_parquet._Parse_dataset_statistic": {"doc_hash": "ffd10b21b25fa5cc0fad84d1f6f94de85a6eb501d1cd5cb28ca1cab7d902f068"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet.compute_kwargs_to_parquet._below_if_index_cols_i": {"doc_hash": "f8539fbc628f5f9017f60fbbb904998e860ed8a2bdf50af213dc8dfd49a416fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_copy__FP_FILE_LOCK.threading_RLock_": {"doc_hash": "6790ff378653990a8853b7f8aa1eaac05f1365b2acb9581d0942747380c1e6c5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py__paths_to_cats__paths_to_cats.return.cats": {"doc_hash": "55183a04587d71dc0be2e089f07731601ec0c2c926962a9418fe8ab62c3a88cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_paths_to_cats_FastParquetEngine._organize_row_groups.cmax_last._": {"doc_hash": "0e9b316485fdefdf6f7f2dee7c9871fa1772b5bf01da89c1e567dcf3d19837ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._get_thrift_row_groups_FastParquetEngine._get_thrift_row_groups.return.real_row_groups": {"doc_hash": "e16946ca2ee8fb58a4437edd5852740a423584ab7d053c0d267f15de8071a458"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_dataset_info.if_len_paths_1_and_fs_FastParquetEngine._collect_dataset_info._Ensure_that_there_is_no": {"doc_hash": "84d9d3420bdafa904c6f90508efcbf3bc9d2fd28626d15f05bc2c6f14d4f929d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_dataset_info._and_explicit_columns_in_FastParquetEngine._collect_dataset_info.return._": {"doc_hash": "b0b374b13ef7a66994bae6ce0c17276b28f161d22050c6164163e120dc4d759a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._create_dd_meta_FastParquetEngine._create_dd_meta.return.meta": {"doc_hash": "2f544a64136371e40da7cb39c1e8737ccb1d9dff88f48e4389075562e48a1bc7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._construct_collection_plan_FastParquetEngine._construct_collection_plan.if_.return._": {"doc_hash": "3db40c6c7c646ecda57ce0b84047a37946cfb5d72c0c892bfd4da999ff267be2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._construct_collection_plan.dataset_info_kwargs_FastParquetEngine._construct_collection_plan.return.parts_stats_common_kwar": {"doc_hash": "cf6c4aa94b6ea71525517f07f2f39b3aa95b256b31a4dae5c7408ec710ddacbd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_file_parts_FastParquetEngine._collect_file_parts.return.parts_stats": {"doc_hash": "f1c051d09ba280d8091741bcd3ebf6b52bfcded72c9faa869e6b182ef126c0de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.multi_support_FastParquetEngine.read_partition.sample.pieces_0_": {"doc_hash": "8a98091181f875d633be8505477d3034ca9916f7d87bf9631a8a7a03978f64c3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_partition.if_isinstance_sample_tup_FastParquetEngine.read_partition.if_isinstance_sample_tup.else_.raise_ValueError_f_Expect": {"doc_hash": "73bb093cee1a1eb794c0d7baebc5ad534c1f1d4d594366aedd0b978865fd61da"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__row_groups_to_parts__row_groups_to_parts.return.parts_stats": {"doc_hash": "6eda5adbc3109b3e4706a936fb583984f859eb7e3b30fa53771f2dfd06fb9cfe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__get_aggregation_depth__get_aggregation_depth.return.aggregation_depth": {"doc_hash": "27339b7646ba030c5e47b383c605002b105408c25154bf495057b4b27525f0a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_comment_test_comment.with_filetexts_files_mod.assert_eq_df_expected_df": {"doc_hash": "f3adbb6cbde12bcb1577e22daa79b6ebaf336bcf1dbfea350fbbe4cc14c45f1e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skipfooter_test_skipfooter.with_filetexts_files_mod.assert_eq_df_expected_df": {"doc_hash": "beed50ab4b750cfa7fe1700ff735a62e037508042f4be718a062f4b0a09c3410"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_header_int_test_header_int.with_filetexts_test_hea.assert_eq_df_expected_c": {"doc_hash": "a51534fab7e5e023ec53cb39d27846be102616e6125638c606b95b08debc2471"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_with_datetime_index_partitions_n_xfail_pandas_100.pytest_mark_xfail_reason_": {"doc_hash": "24ac949dae63d258d20c8238471581eb453cf970f93f8396a4ad80fd78fda27b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_sep_test_read_csv_singleton_dtype.with_filetext_data_mode_.assert_eq_pd_read_csv_fn_": {"doc_hash": "4b7fbc4affecdfe43a0b0e2d5bd5600575fe5f51b2a3f8835be52878632a0243"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_robust_column_mismatch_test_robust_column_mismatch.with_filetexts_files_mod.assert_eq_ddf_ddf_": {"doc_hash": "67b1d245e66384435cfaec69b4af24c53a30ea400d6b62dc940bca44723b7b0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_different_columns_are_allowed_test_different_columns_are_allowed.with_filetexts_files_mod.assert_ddf_compute_col": {"doc_hash": "2aa13b23a2bc5b2b5cbd21569e417e64ecc007e4fb3c79486d200b38cc698c96"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_None_6_test_to_csv.for_npartitions_in_1_2_.None_2.assert_eq_result_df_": {"doc_hash": "fce9b3f855439c76be519b6fc0d42010a527a915c001a7ad76711acba63c351c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_csv_parse_fail_test_csv_parse_fail.assert_eq_df_expected_": {"doc_hash": "61b9424fcd451455714a27b7bc3e2adc2a93d3fdcdfbafe2265989d0e00aa6ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_csv_name_should_be_different_even_if_head_is_same_": {"doc_hash": "16ac66a1a1e52005f8b3166a0a000aedde9eb77e6a5317c0f687833a2151000b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_no_overlaps_test_make_timeseries_keywords.assert_1_bb_100": {"doc_hash": "a368b8034f5deab1c39c7e1e8f25f11b67a653f5d2c711c35bfac4fbe8cfb336"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_bag_test_to_bag.assert_ddf_x_to_bag_forma": {"doc_hash": "829a685d186354336750af8964b2e947efd3813e8fd01683b5a16d1d262e2cfe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_preserves_hlgs_test_from_delayed_preserves_hlgs.for_d_in_chained_.for_layer_name_layer_in_.assert_hlg_dependencies_l": {"doc_hash": "012fba2748d815b26313af36e2ecff4f6ff568a9980085df8303729372e4160d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_to_json_results_": {"doc_hash": "c3f19c2fb9c118915168f353fd04c89e7c1f327836eee03af49d0bc2b116f095"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_glob_orc_files.try_.finally_.shutil_rmtree_d_ignore_e": {"doc_hash": "7909df64614b06e58ac81f289e5d20bfbb957e6ab19bde97370a631c51d6cbf8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_single_test_orc_multiple.None_1": {"doc_hash": "8fd718e53c1df393138ddf16ce9e361e522f82a4cea1750ed4826f825a5f8800"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_roundtrip_test_orc_roundtrip.assert_eq_data_df2_chec": {"doc_hash": "d5131d4990d4f660f67f28c3440412676428ca5e05c7c18167c645e5e1a948f2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_roundtrip_aggregate_files_test_orc_roundtrip_aggregate_files.assert_eq_data_df2_chec": {"doc_hash": "40aec03099a731ce5ec9c81b9dd7eef999db8acc169d5cccd89caf8fbfb46096"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_aggregate_files_offset_test_orc_names.assert_out__name_startswi": {"doc_hash": "936c5ab62573fa0b0144ba776464a1736f0d7075ba96d76be595bfe5b5536aba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_to_orc_delayed_": {"doc_hash": "94fe67f9087a825d12debdb7fb30ae86733b81d26b863754941502b0f3af0bb1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_getengine_test_pyarrow_getengine.if_SKIP_PYARROW_LE_.with_pytest_warns_FutureW.get_engine_pyarrow_legac": {"doc_hash": "17c6603f1dcb77e7729e0f03e81c91f4f6036690d2f23e76f9fffe14cc88e671"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_test_gather_statistics_false.assert_eq_ddf_ddf2_chec": {"doc_hash": "c14a877e1ebc5dc6cb5bd73c35a82fe3eb0829bf56f5cd4460c4c475636e5bf3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_test_partition_on.for_val_in_df_a2_unique_.assert_set_df_d_df_a2_": {"doc_hash": "8795d8976dd13ad61e61e9376474e674d2fa61a6d50864d173f8bae8a221cd0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_test_filters.assert_f_y_c_all_": {"doc_hash": "414110561b6dc3db576ac120a036fe3ab71a503654075ee7ea131d89dc06ad18"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_v0_test_filters_v0.assert_eq_ddf2_ddf3_": {"doc_hash": "b3b1b985554100c8e38d19790297b3a53500ad863759cedb2c6a9b006ab531b9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_filter_divisions_test_pyarrow_filter_divisions.None_1": {"doc_hash": "b889c580f92e287b7d35ffa809426c00d2943797705fc88f2d37be7231149ce1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_test_timeseries_nulls_in_schema.assert_eq_ddf_read_ddf2_": {"doc_hash": "3e22fc8d752c00cda4086c1bc98185c66aa60071214dd47c9ec963a3b303c8ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_int_aggregate_files_test_split_row_groups_int_aggregate_files.assert_eq_df_ddf2_check": {"doc_hash": "957618a6733cdae75a13c9beb38a9187f05e688accd5fccf5f2cafe4795132e5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_empty_test_chunksize_empty.assert_eq_ddf1_ddf2_che": {"doc_hash": "9e0691817e82ba52e2ec6c20ff758ab9d26ab0859d606e13658a27c544026348"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_files_test_chunksize_files.if_partition_on_.else_.assert_eq_ddf1_ddf2_che": {"doc_hash": "ed967ec5050df376b56fd1edaba7d9fffe75aa9f8547f5918f72443150df78cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_aggregate_files_test_chunksize_aggregate_files.assert_eq_df1_c_d_": {"doc_hash": "5f43de8c6ceabe28ed081cfcdbbae5b3718dfd8f8a74612735f789fdb7369209"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_test_chunksize.if_not_chunksize_.else_.if_chunksize_1MiB_.assert_ddf2_npartitions_": {"doc_hash": "54cdddb696f1bdfe328a4b0017e4444732013a471552b91966186435d26b6150"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow_test_pandas_timestamp_overflow_pyarrow.if_pa_version_parse_ve.else_.from_dask_dataframe_io_pa": {"doc_hash": "f7e4d3aba9acbcad1fc0d01b640e7eb68725487c78c5a8df091402318a871771"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_no_pandas_metadata_test_partitioned_no_pandas_metadata.assert_eq_result_list_exp": {"doc_hash": "4e1eb06a960798a8da5919034919f58a557bf98c3740580dc4a484fbae64542f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_read_from_paths_test_pyarrow_dataset_read_from_paths.assert_eq_ddf_ddf_b_": {"doc_hash": "8720dd890bf6bb1ac4c884a1c3f7406ef5badc4c43ae3e89be3606689953032f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_metadata_test_custom_metadata.assert_User_defined_key_": {"doc_hash": "ca111e665b81eb22fd9ea999d9c7858e7f2ff1088afc0d451b94245ca621fcc2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_ignore_metadata_file_test_ignore_metadata_file.if_engine_pyarrow_leg.else_.with_pytest_raises_ValueE.dd_read_parquet_": {"doc_hash": "0c5e02af950c0d8f46a64332aa6073a8db48266344e6fb64545559c77cb75082"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_metadata_task_size_test_metadata_task_size.if_engine_pyarrow_leg.else_.with_pytest_raises_ValueE.dd_read_parquet_": {"doc_hash": "d1cc139d15b861d3f2da769ec957dad554db5db6a21e6b7402fd491b1d0b1751"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_extra_file_test_extra_file.if_engine_pyarrow_leg.with_pytest_raises_ValueE.dd_read_parquet_": {"doc_hash": "62d0e50ab76ac9ef7a9a821424651555198aa0e499393c003dfea10f13e976ed"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_unsupported_extension_file_test_unsupported_extension_file.assert_eq_df0_dd_read_pa": {"doc_hash": "e8ef5f8b5a0d1edbb60de6bef1d6e2653c037de22f4227fed7c2027be58431f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_unsupported_extension_dir_test_unsupported_extension_dir.assert_eq_ddf0_dd_read_p": {"doc_hash": "91afa9f98e2a4bf1995e9994fc3c273ddd9f9f38c34cf0f7583bc9658a3cfb88"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_filename_test_custom_filename.assert_eq_df_dd_read_par": {"doc_hash": "466dfafaeb1786bb51e5e55d939722b35d59f17012606b4df02729c740b8f392"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_filename_works_with_pyarrow_when_append_is_true_test_custom_filename_works_with_pyarrow_when_append_is_true.assert_eq_actual_expecte": {"doc_hash": "14ba6041a24d2abbc3172307e521afd1fc2a67a5c074e3bd173d3d340c287779"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_throws_error_if_custom_filename_is_invalid_test_throws_error_if_custom_filename_is_invalid.None_1.df_to_parquet_fn_name_fu": {"doc_hash": "2f71c7dc7445bafe222306023f3779ac0a9d6169edd8b2f1f2301150961d0be0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_single_partition_join_single_partition_join.return.joined": {"doc_hash": "b59bac1a786ca85f1fd13991ce0dd9aca1c2f6a1741651df84575dd4162d582e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py__head_timedelta__tail_timedelta.return.selected": {"doc_hash": "536d345c53d67baeb13852892e4c9a6fd48ea9e2ada1b3f5f497e0f4ad68d533"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling_Rolling.pandas_rolling_method.return.getattr_rolling_name_a": {"doc_hash": "f5c2142a1ea22a150391bf3346b3498397ed7f1c385e8e991bd32a4dbdee34c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.aggregate_Rolling.__repr__.return._Rolling_format_": {"doc_hash": "a79f43a3f50a0616601f6e65ffb3e126915ab5c715b3d3f8cc00b08c7c2a338b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_RollingGroupby_": {"doc_hash": "c7f1267596954a078c6c9fc37ed4691d0ed93f94e021071d1b5d32635827297c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes_test_reductions_frame_dtypes.assert_eq_df_numerics_var": {"doc_hash": "4289e18e00b9ad9e882a1de1d26089d8236c22dcaa78917c4bbab27c5fb2a795"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes_numeric_only_test_reductions_frame_dtypes_numeric_only.None_1.assert_eq_": {"doc_hash": "5cada9af25698d9d11e2997011bc226c521e50d739770f8b534d3f73337a2cba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_warnings_test_scalar_raises.with_pytest_raises_TypeEr.bool_s_": {"doc_hash": "dfebcfeb9baa92d98e5a7edaf32423c7b38bb2c90a1c84bc8c43a1dc9e93aaeb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attributes_test_describe_numeric.None_6": {"doc_hash": "8caa17e53bf0a8caab6fd342185df0fe814983f29fad76a3126732bf2f22a521"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_test_describe_for_possibly_unsorted_q.for_q_in_None_0_25_0_.for_f_convert_in_list_t.assert_eq_r_75_75_0_": {"doc_hash": "70dd75709ff03e61b15ed805b3414511211bc2fe496fb7d1b62943e34fb08e0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_test_clip.assert_eq_ds_clip_upper_u": {"doc_hash": "6abe4f1342bf58d9593a4228977e5c9a670eea11cd4a48c49e2c7dc6ea8058e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_squeeze_test_where_mask.ddf6.dd_from_pandas_pdf6_2_": {"doc_hash": "047f17c734a5108e8a14db372481e29d959221d15ad4af593b7e7b1fcf8aa36d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_where_mask.cases_test_metadata_inference_single_partition_aligned_args.assert_eq_res_ddf_": {"doc_hash": "0e7d9306e47baf099d4dcebf7a8eede0f8b1b36a90063b94de2ce536265ed532"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_dataframes_test_drop_duplicates.with_pytest_raises_NotImp.d_drop_duplicates_keep_Fa": {"doc_hash": "8d744eac2e6f6707796eb2cdd85171aa12bf09c4415485635b43dd8b4999e00b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_duplicates_subset_test_nbytes.assert_eq_d_index_nbytes_": {"doc_hash": "e3b710698e841478bfe7cafc85d42f98586b08079a877a4c5fc35f8bfc160abb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_test_empty_quantile.assert_eq_result_exp_": {"doc_hash": "5bb03a771993803f6d14d973f04935dc88d12f6a027c3c27bd540b00ed5051b8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_quantile_test_map.None_6": {"doc_hash": "8acc7d3dea409b8f1010afad3a4082e53e378a66c9161840d3f4c7667ac16264"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_concat_test_align.None_15": {"doc_hash": "97bea4170d70922faa056031a1c4063954fadd68662c80ab39ca926c5e639a80"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fillna_test_eval.with_pytest_raises_NotImp.d_eval_z_x_y_inpla": {"doc_hash": "3f4c4026561690326858bd91d61cbe04fe3997686de85f86c436d495e4309ed3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_select_dtypes_test_reduction_method.assert_eq_res_pd_DataFra": {"doc_hash": "3bbc0278d9e8a1f5da9de2e3dc835a921a31c1ef3afd771cd8277be5db29ff65"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reduction_method_split_every_test_to_dask_array.assert_result_chunks_e": {"doc_hash": "9d051be67fda3ccca77e9f3ab1f22fa5ad95ab445ffa0f83697eaca76565499d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mod_eq_test_nunique.assert_eq_": {"doc_hash": "bd2e37ea21a0617511929977ff7277842a19cb42ec4dd67e73762c5f14d10b0d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_view_test_dot.with_pytest_raises_TypeEr.dask_s1_dot_da_array_1_": {"doc_hash": "3bdaecf09c7f1e7e9d810292494b2ca0b455fc5a86374158c976f87e8f7f17a4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dot_nan_": {"doc_hash": "47a56422c86580c48431b799925e37746cdf6ab9a693342394cb300a235a2d4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_collections_auto_shuffle_method.yield": {"doc_hash": "2191c83e5e3fdaf80c9af14ddcc192dbf197e80dab6fa4527f246285fb563ce2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_test_full_groupby.with_pytest_warns_UserWar.assert_eq_expected_ddf_g": {"doc_hash": "fc72a641da77207f219a7e3be2ef57cc8bc0265415c4fdd37411ecc7af0743fa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_apply_multiarg_test_full_groupby_apply_multiarg.None_3": {"doc_hash": "9b02c1d73bcff3646d7e750b1fc82e160ff9828d34470bd8154ba7d4de2c52ba"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in__test_split_apply_combine_on_series.None_8": {"doc_hash": "04df9b5d73ad07efcda4b71fdeebb5b648d22ef240b8f0de16cec85a1384871a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.None_9_test_split_apply_combine_on_series.None_28": {"doc_hash": "b8d098740d13e9589be68eb40395037b3c5af9ebfd4341c2a68743fbf2c6afe8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_apply_tasks_test_groupby_apply_tasks.for_ind_in_lambda_x_A_.None_1": {"doc_hash": "833023bc17c24b1209676ae4118d45cb65ec89f6a799768bfdb0d66f0682a04b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_normalize_by_test_groupby_normalize_by.None_5": {"doc_hash": "ce059b5fdcc574a2af0b486d533ba31c27c6188d9bf041a6112541f4cb444278"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_apply_test_groupby_column_and_index_apply.None_3": {"doc_hash": "075612d3234d1b92e117e9bdd58aa54eac74b8bed5ea65683c6b40a56142841c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_basic_input_test_groupby_shift_basic_input.None_2.assert_eq_": {"doc_hash": "bc0c247059e031bd1187fd2c426c0122f7f0f76121a5dcaa0b5afda7ff867166"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_series_test_groupby_shift_series.with_pytest_warns_UserWar.assert_eq_": {"doc_hash": "8cce9d12e5ef0338d082e504c3d02d1c7fca6fb6a1f9cfb824a8c2c2281f3552"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_lazy_input_test_groupby_shift_lazy_input.with_pytest_warns_UserWar.assert_eq_": {"doc_hash": "e6650042c785993c4b297224369fe7994d8f0059e166012080fae56db5a181dd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_with_freq_test_groupby_shift_with_freq.assert_eq_df_result_ddf_": {"doc_hash": "a1fd91773feef4ecadd5399e1b5eb9eeb7bce0b7bb405ac2663c30cded70c492"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_empty_partitions_with_value_counts_test_empty_partitions_with_value_counts.assert_eq_expected_actua": {"doc_hash": "c40c18b998aebe42207d44059c2b3bc79491ce96686ba8c41d7611f55ffd9b3a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_with_pd_grouper_test_groupby_with_pd_grouper.None_1.ddf_groupby_key1_pd_G": {"doc_hash": "4246ef8cffaa121a5e25d08db41bd01a14a6ed7b5114cf63a456005411323cb0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_empty_partitions_with_rows_operation_test_groupby_empty_partitions_with_rows_operation.assert_eq_expected_actua": {"doc_hash": "99c49a8765c556d383f04e545fcda50ddf914c190eb2a07a05285470dab8ebd0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_with_row_operations_test_groupby_with_row_operations.assert_eq_expected_actua": {"doc_hash": "f293f5de48690d43baad53abf44ea156ce1ef94c2f9ca84499c6c62ee2d605d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_test_loc_with_array.None_1": {"doc_hash": "252d1c794acb186044fe19884b67a6e1be735a7d80a503334042baf4b5e82ca7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_function_test_loc_with_array_different_partition.with_pytest_raises_ValueE.ddf_loc_ddf_A_0_repar": {"doc_hash": "afeacfc5de63a6a91b5cd9c76e14337563a3f66fc6066876d0abe7864e57e2bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_non_boolean_series_test_loc_with_non_boolean_series.with_pytest_raises_.ddf_loc_s_": {"doc_hash": "2d541d5684834458200efd86951f3a817a6df8b27b29cfe9d93ccdef028b55d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_with_known_divisions_test_loc2d_with_known_divisions.assert_eq_": {"doc_hash": "3b640d868085df47dc8bb9770f35dac4f13da039857e522b15b975513f6d543c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_series_test_to_series.assert_eq_expected_actua": {"doc_hash": "054e47d57ec7eff4b0cd836e65e8230252cfe77d90db70b2ffe80990c6696c4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_frame_test_to_frame.assert_eq_df_index_to_fra": {"doc_hash": "7d5ba724e6c8d8fe52df7b5e43b787d78db7a362c839f7a485cb3d2953b050e1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_out_of_order_selection_test_iloc_out_of_order_selection.assert_c1_name_B_": {"doc_hash": "fb0e2add33d2a8c1be5112f7fe45aac114540d39ee605aa976f8f401e33f3a57"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_pandas_nullable_boolean_data_type_": {"doc_hash": "a326bb154ae28f5c880fa3ef3bdc3083857978f3a19e94f149374444f10f4ae0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_ddf_left_on.return.request_param": {"doc_hash": "0fea5d859c6b8d2d0c84b736f1cda39df90552c7fa89a9002b10c0f986100ae7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py__Tests_test_merge_known_to_known.assert_len_result___dask_": {"doc_hash": "7f52ff843ef4b661c521306cfd3f89252582c0dc65381b83ed767b2f77093b14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_unknown_test_merge_unknown_to_known.assert_eq_result_division": {"doc_hash": "e769cff039eb28b3a61ece3906e07daf0ab49fa895bca72c631cd9c10fb927de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_unknown_to_unknown_test_merge_unknown_to_unknown.assert_eq_result_division": {"doc_hash": "5d1e107053f7f87245476fa12e2f68a3c5d98b49822bec4c1b069b470163c3a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_double_bcast_right_test_merge_known_to_double_bcast_right.if_shuffle_method_tas.assert_eq_result_division": {"doc_hash": "33f25b4fcdbabbfe4c89ac014674757b80d1e922f7ef71a5a7482db9a8392d18"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_warnings_from_dask_utils_test_impo": {"doc_hash": "b2b0ef2db1a1857b4da44b2b3c0062ea7a984d31e0ab7d73b9d43e1b4479576e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_gives_proper_divisions_test_join_gives_proper_divisions.assert_eq_expected_actua": {"doc_hash": "f453e5f0f23a6019126fde985ef775ee2cf6137c0c7f8d72a9fb40aa171554cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_test_cheap_single_partition_merge.list_eq_cc_pd_merge_pd_": {"doc_hash": "94ddc553f52319e81c7810bf79659c8028275e24e698e7d7bab70e7333409257"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_join_test_categorical_join.assert_assert_eq_expected": {"doc_hash": "cc7fc1deaab148e97805ef71a58ebfcb4a7e682b9bc6e3eb6007e4df9673177c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_retains_category_dtype_test_categorical_merge_retains_category_dtype.assert_actual_A_dtype_": {"doc_hash": "59fce2fa90db2b15cd2e65ec158087695912671344f3d51e544cb52acb746f66"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_repr_test_time_rolling_constructor.assert_result__win_type_": {"doc_hash": "adfb0f446849023eadda1202fc7ead04cdf82d47edb19fd04b00eea25e5e89ce"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_methods_test_time_rolling_methods.None_1": {"doc_hash": "21e193f8f287843db2ffb7bd2a0be3a4dbaee3cf475a3f918b172b1f76ac6fc5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_cov_test_time_rolling_cov.None_1": {"doc_hash": "297a8a23f083aba4fa44b9911cceb40d865f9f66c9d3992e3ee845e75442b933"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_fixed_chunks_test_time_rolling_large_window_fixed_chunks.None_2": {"doc_hash": "55fbd0f4e1c275d36f5b58b133b0b3c3fc24a5b2863daad76873c53ed7611ac6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_variable_chunks_test_time_rolling.assert_eq_result_expecte": {"doc_hash": "feabd8c2c4cf3cdab6a441d0ac610214e69e0f826b7a2541404229bdfa6518ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_groupby_rolling_test_groupby_rolling.None_1": {"doc_hash": "f6c00a70377214959216af757b0a7273583b3f79f8e1e7e59981daf758266735"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_groupby_rolling_with_integer_window_raises_": {"doc_hash": "75f4881f263de23aaeaf16f30f30471d443ecda9e6b0466140e9d8b21321eede"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__conflicts_with_keyword__test_default_partitions.assert_shuffle_d_d_b_np": {"doc_hash": "c00cb7c5db68bcb4fa6db73606bb7ab89600fb7380fdf9dbb9a3c3c80357690d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_npartitions_test_shuffle_npartitions.assert_set_map_tuple_sc_": {"doc_hash": "574e34b06d2cd8b7b723961abfeaa3a92b73514352c5458bb232bfdc33e4094f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_npartitions_lt_input_partitions_test_shuffle_npartitions_lt_input_partitions.assert_set_map_tuple_sc_": {"doc_hash": "5eea6854c1616c44e1fc1687e67533d2795d06351fca9f4c8bdaddcf74401ef3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_index_with_non_series_df2.pd_DataFrame_": {"doc_hash": "5d0f834cdb0fa46d67ff615158c92a9e08773780f19a0764c9210b9b70977695"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_general_test_set_index_self_index.assert_eq_b_df_set_index": {"doc_hash": "c2eb7eb922e1dacf86777798c3e4b22ad9aac8c4e2dbd900ebdb2982727fdf70"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_2_test_set_index_3.assert_ddf2_npartitions_": {"doc_hash": "a45869205d574d10b3802529ff6bbdcd567421963d7dcaac8140b1e6a6d7f4f1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_dataframe_shuffle_on_arg_test_dataframe_shuffle_on_arg.if_ignore_index_and_shuff.else_.assert_df_out_1_index_dty": {"doc_hash": "1fd41e7dd77c72e0ab306174ae4608d5fdb987d14d4a274d765e416b736212bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_nan_partition_test_sort_values.dd_assert_eq_got_expect_": {"doc_hash": "e09f7fbb9f6110e2f28e8580e8e18080fbf85e319181c1bc13d6389c8d702367"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_sort_values_with_nulls_test_sort_values_with_nulls.dd_assert_eq_got_expect_": {"doc_hash": "7b7fa6293221933f92c2788a81174dfb985ba015c9bd1fd23016663c41b925ae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_sort_values_custom_function_": {"doc_hash": "d82e63ec1b948fa76865a5416e45bbe68046415a49ff19ba830ccc3d82ac6933"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_re_from_dask_local_import_ge": {"doc_hash": "94f301967f6b8fda33595235c3d23f6ce62ddc4f1b8b404ef0c7142ba1410b7a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_index_test_meta_nonempty_index.None_27": {"doc_hash": "cb89232429150be38fa87be53f627b089cbf84d3b55a396ec10a306ad3d8459d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_meta_test_check_meta.None_7": {"doc_hash": "e58cfa7b4638062eede8f15ac23d35bc4a2cba328bec4bffdc83b84e4897d083"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_nonempty_series_sparse_test_assert_eq_sorts.with_pytest_raises_Assert.assert_eq_df1_df2_r_": {"doc_hash": "9c6237af8f63d683d8e0c5a2675c6ee710684a7616221b6cb7a3820ce09f0c31"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_assert_eq_scheduler_": {"doc_hash": "d46db107df4077ac995d632f00856aca9d6e39c740b7fbacdd26e2dcff0ed837"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_matching_columns_index_summary.return.f_name_n_entries_sum": {"doc_hash": "e6266906804dbd23f0de6c965f02ac1aef0bd6e97968fa2cf08d46b99b0b1fb1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__maybe_sort__maybe_sort.return.a_sort_index_if_check_i": {"doc_hash": "82bd4f06a50b0067bc0381852d131218a9777b61c612dd61a17fc3b288e0ffe6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_eq_assert_eq.return.True": {"doc_hash": "39ad6f1aea5d4d41ab3d2d1e77543e8fc443a5bcfa2528757746452791f14f8b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed.if_isinstance_obj_Delaye_delayed.if_task_is_obj_.else_.return.Delayed_name_graph_nout": {"doc_hash": "091601b0a78acdde6f768a357b96a2d614912d9982950d24130e6c04b4323ea5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_right_optimize.return.dsk": {"doc_hash": "eeb72a28c03ca6b0d740507dccb009a8e93829d5f28972daccbcdb9d0b24c9b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed_Delayed.__dask_postpersist__.return.self__rebuild_": {"doc_hash": "5fee456e4bfce78b1dc94cd500bc2740dd7275ee7b07992101f8fffacbca8162"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed._rebuild_Delayed._rebuild.return.Delayed_key_dsk_self__l": {"doc_hash": "9393043b1b706f40a7465358f805d632b7a996e7bc9b4b504b9c0a2dc8e238d9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed.__repr___Delayed.__getattr__.return.DelayedAttr_self_attr_": {"doc_hash": "95698c0ae0e1381df08ca5a1fc20a873cf467cdd6fbf56617e23d6c93b88a4fd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_contextlib_format_time.if_h_.else_.return.f_s_4_1f_s_": {"doc_hash": "1ae386fb2bba962a9397ddcba32442feb8135aaab5cbe5cd42aebf9fc935cdca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_plot_with_invalid_bokeh_kwarg_raises_error_test_plot_multiple.visualize_prof_rprof_": {"doc_hash": "fc81e41c3898abf099253ea27fb0ac1b75b33e0cc382a599fa1e39c3b9126bc0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_saves_file_test_saves_file_path_deprecated.with_tmpfile_html_as_f.with_open_fn_as_f_.assert_html_in_f_read_": {"doc_hash": "8072beb19b2227c33bc756735dde63fabcc292ba5750002040f45a58d841752b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_get_colors_": {"doc_hash": "ff94dfa98c7ef87caa50cd7542cf1ae62fb38f7700da7c049ae84956edc0eac0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_os_graphviz.import_required_": {"doc_hash": "94ab8b7cc989479b98942ac211541b709299ab5765586b301a7c4e0c249f0952"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_task_label_task_label.if_any_has_sub_tasks_i_f.else_.return.head": {"doc_hash": "dfceb55e920dd1079b3067c62e4d79ea208133da4a22a1a5d6998adf59b91f08"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.cull_Layer.cull.return.MaterializedLayer_out_an": {"doc_hash": "bb56a45bbcee19409bdad04b9b3b04251373de4f1a432c3cd7bba4846a3a0349"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_unpack___Layer.__dask_distributed_unpack__.return._dsk_state_dsk_de": {"doc_hash": "c8f78e6cbabf7108bf068ca77644c4c7b0179e6373aede4b5521cf8fbe1ed701"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__reduce___Layer._repr_html_.return.get_template_highlevelgr": {"doc_hash": "dcbabb468971615bb551a009d30a8a3ce1efa0c17004a94aae592608a86759a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.layer_info_dict_Layer.layer_info_dict.return.info": {"doc_hash": "5ed49695f9d8dda6b5f3f7253daebe2fcef7e39a8e79dcacae1a166ad3d1a656"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.dependents_HighLevelGraph.merge.return.cls_layers_dependencies_": {"doc_hash": "2346ee100dfb1be08dbf5e2fcad159b1b383b5e2e914ef0a2deb3fe53a4fded8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.visualize_HighLevelGraph.visualize.return.g": {"doc_hash": "6d407999c783092294a1b32845f741214c47071106cd59cbbfe1e94f1a6f94ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__dask_distributed_unpack___HighLevelGraph.__dask_distributed_unpack__.return._dsk_dsk_deps_deps": {"doc_hash": "4cc713b8545e7bd0961660596818968c43d4306754772aa09510af223e2572c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__repr___HighLevelGraph._repr_html_.return.get_template_highlevelgr": {"doc_hash": "5d12d8da96c50762f4c628f0b9b93299521bc7eedb6463b3878ed6de9d14cc79"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_to_graphviz_to_graphviz.if_color_layer_type_.layer_colors._": {"doc_hash": "dfeb4cf079aa3f0f16588ba9020c56a1633176e8e0048e7fbfd2d0328399da10"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_to_graphviz.None_2_": {"doc_hash": "f887535786d1a031e0c57eb7188bf4230c4556cb544a9a75cb0ff4e75b985b4c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_from_concurrent_futures_i_if_os_name_nt_.else_.queue_get.return.q_get_": {"doc_hash": "f22b32305ec03f7c10faad621e8be75ca023145e8ee589b94ea15a358e2946d8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/ml.py___getattr___": {"doc_hash": "24279148ef3b36675a2e2beb98435db6b9b404ebc0decd810ca1360347cba800"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.finish_now_key_order._Although_running_a_task": {"doc_hash": "69e68346453d1eed4aa9c3fe6bf0a5aaf2ff3f81444cbf54d904bb683b3b756f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order._can_be_beneficial_by_pr_order.is_init_sorted.False": {"doc_hash": "e3ac712f9ff89f62c8713e3db0648e3d671547d8855ee6dce468bcd1ffe068ab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True__order.while_True_.while_True_.if_already_seen_.deps.deps_already_seen": {"doc_hash": "051c61fe8e12181e8eab00e604b99997d41dabbb97c98f9473db16cff5942023"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.while_True_.if_len_deps_1__order.while_True_.while_True_.if_len_deps_1_.elif_len_deps_2_.if_inner_stack_.else_.if_add_to_inner_stack_.else_.if_key2_item_key_.else_.later_nodes_key2_append_": {"doc_hash": "d5b04c238bc9668d267746fdde07bd389d4cbb7f673875b4ea70ae0165cf7d2c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.while_True_.if_len_deps_1_.else__order.while_True_.while_True_.if_len_deps_1_.else_.if_inner_stack_.else_.for_key_vals_in_dep_pool.if_key_item_key_.else_.later_nodes_key_append_v": {"doc_hash": "597ce7bd8aa8c498534c88d5dc28df75f0dffee6d26f8f1d8f33223c5aebec39"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.if_len_dependencies_l_order.return.result": {"doc_hash": "462747ad4d26ed4d78f2b6558dd0f5bec07b224f6a84f17f547ed0ef7c8b119f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_StrComparable_OrderInfo.namedtuple_": {"doc_hash": "4558dfea336a57b6cd67e01ed374e1d38a71ea9b15a1066459bfa60e64606079"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_diagnostics_": {"doc_hash": "2a4ad29e1bf9506339cad20748bf5276115390c25b6df856247acbb92b261c83"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_array_on_object_dtype_test_tokenize_numpy_array_on_object_dtype.with_dask_config_set_to.with_pytest_raises_Runtim.tokenize_x_": {"doc_hash": "ff206c301e3de8e4593f5e6b789b832a0268ea1e9ad6b4e7ac9b808c702a280d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_index_test_tokenize_same_repr.assert_tokenize_Foo_1_": {"doc_hash": "1355efd96f68a27ca6bf59b3ace943aca95a33ed5d6f51340134a38a99f94a14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_method_test_tokenize_method.assert_before_after": {"doc_hash": "296d4cbc844b8949560c8f09a314cd6ed390e1c40149fccb9d1bf51d1edb5c4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_with_recursion_error_test_tokenize_datetime_date.None_3": {"doc_hash": "faca3d284df4f7a040b5b723e5929ae43eab75174d0ba1ec0dc34c7a0cf4c8b4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_highlevelgraph_test_visualize_highlevelgraph.with_tmpdir_as_d_.assert_isinstance_viz_gr": {"doc_hash": "68c91f90bbbec1d097c58ccc976cdbf2027b94190d63aedf1e0b4e1178ffeef4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_raise_get_keyword_test_get_scheduler.None_6": {"doc_hash": "953858e338467c4aa517876312cbcffbf72aa6f9b011d81e1a3f948a1537dd28"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_os_test_canonical_name.None_5": {"doc_hash": "1c7192fd6245167ca672ec223bac6422020b9f3b419bc6ee2c77c97a59a2f44e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_dask_layers_test_dask_layers.explicit_dask_validate_": {"doc_hash": "16dc7e60bd8cca2fe6b66f5e05c0cfe2e638eeca490c1e5790bfca305c6fdfaa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_annotations_survive_optimization_": {"doc_hash": "a0b0241fcc2ea18114401e7f24296de770f5060fdbd1b3401d89301beb4baf03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_annotation_pack_unpack_test_annotation_pack_unpack.assert_annotations_w": {"doc_hash": "1a3b19ef09f2f75aca79dfadf64d758e659901d860d2afc570cabfe19b897e9a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_docs.py_from_pathlib_import_Path_": {"doc_hash": "cc76f0b0ed826e352ecf4da2ecdfa4e8affa61b1caa2c075abf91fe5240287eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_test_to_graphviz_custom.assert_set_shapes_b": {"doc_hash": "1638344b24ea96de58f12b15e3a852262c3d0bfe88f6b75af9152efcc5197eb7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_attributes_test_to_graphviz_collapse_outputs_and_verbose.assert_set_shapes_b": {"doc_hash": "0b96c69cbe5c2872967fb5989d8a54ea0fd7f0716d9c7feb52001b2b2e17767a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_multiple_annotations_test_multiple_annotations.assert_clayer_annotations": {"doc_hash": "1e1da447dbb4d43c2db215d0cf93ae86642654b77b6f1bee9e3dad184eea4432"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_annotation_pack_unpack_test_materializedlayer_cull_preserves_annotations.assert_culled_layer_annot": {"doc_hash": "43e604c678cf3f5346d97e761c43fe8ba3c7d7995d9c9f60123bb2c2b8fe7dae"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_node_tooltips_exist_": {"doc_hash": "6f97bdde87bfc4b2696b4132a9f1ca6a08012e668d3df5c0704a2af5f66134c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_pytest_test_start_state.assert_result_expected": {"doc_hash": "8433894f0e334ec581f392c83720fdb8e3dfa89ea0a49a7827297c2799cda945"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_exceptions_propagate_test_ordering.assert_L_sorted_L_": {"doc_hash": "a1c542e3665a9134b3659bc095a836dd4f44f9c58bdeabb3613de39eabc9d685"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_complex_ordering_": {"doc_hash": "3ffcd0c5cac9e551f13abd6be60de26b9a148f42810ffe6e7d4fd2144c150334"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_ml.py__": {"doc_hash": "23d408e4634608b0e002916e8180710e083b939664c80ac7d1433bc453937e85"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_base_of_reduce_preferred_test_base_of_reduce_preferred.assert_o_b_1_3": {"doc_hash": "e5eff1792add67246d91ef8aed27cfbcd2c1b686db8d2dd3212072b7ed59d991"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_array_store_final_order_test_array_store_final_order.assert_connected_max_di": {"doc_hash": "8430e75228b32122993cf35be74878832e245eb1061f3cee7ddd74047f77fdc1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_eager_to_compute_dependent_to_free_parent_test_eager_to_compute_dependent_to_free_parent.r_https_github_com_da": {"doc_hash": "d73b1b2080af769012924b88e9248582101595f1c834fd04412301d88b650a98"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_eager_to_compute_dependent_to_free_parent.dsk_test_eager_to_compute_dependent_to_free_parent.dsk._": {"doc_hash": "888f59a2826a6848f6fb2b9e2c4901e200aba019472f593a3a01a44953c1d569"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_eager_to_compute_dependent_to_free_parent.dependencies_dependents__test_eager_to_compute_dependent_to_free_parent.assert_sum_costs_1_": {"doc_hash": "186e262b5fbc1ab14a7791162b226c07245d035dfa7b1ead11ff7433a309fd98"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_diagnostics_test_diagnostics.assert_key_val_num_depe": {"doc_hash": "1f631e8df38c68ae824138b08a9d558324aa459d86eaa62495541975744f1c14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_diagnostics.assert_key_val_num_data_": {"doc_hash": "d4d6ce77440340daffd85bb0d1265c6943ce1b540c7c29dabca4bb4f1eb94d6a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_lazy_walks_mro_test_dispatch_lazy_walks_mro.assert_foo_Eager_1_": {"doc_hash": "595147493af4382cce40ebf8267d29dba7989a614b897a61aac24f2b5abee942"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils_test.py_test_hlg_layer_topological_": {"doc_hash": "bdb10d73c992b6212f67de22c20b507a8c49a9972f90937669cbe6c56bd90b56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_from___future___import_an_apply.if_kwargs_.else_.return.func_args_": {"doc_hash": "08cf1be9bec72c822efa2cfd46a7af9cc2fb0299fe8fdc5e7d885ad48d18afd3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_homogeneous_deepmap_import_required.try_.except_ImportError_as_e_.raise_RuntimeError_error_": {"doc_hash": "ba72bc12940bdbb670f5d9ec1c21202210049756d479f417a4118bb8c9ee849f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_tmpfile_tmpfile.try_.finally_.if_os_path_exists_filenam.with_suppress_OSError_.if_os_path_isdir_filename.else_.os_remove_filename_": {"doc_hash": "3162ca6c62871b879b49f21b779b9c770cf4c07d2a6be606253b423902b780b3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_tmpdir_tmpdir.try_.finally_.if_os_path_exists_dirname.if_os_path_isdir_dirname_.else_.with_suppress_OSError_.os_remove_dirname_": {"doc_hash": "0ff34e049a3f0ad3f16d3aca3bab83669fbf120247bc14e6def68ae0c0e6c7f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_filetext_IndexCallable.__getitem__.return.self_fn_key_": {"doc_hash": "a86056ea8d5b8b754a10858c6ab3e18dbaa35731de3f52e97edf906c8e2dcf5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from_derived_from.ua_args.ua_args_or_": {"doc_hash": "e50653840398740f1994b2e14f7e1d5a8ddfb1ca7fb1090842a56644974592fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_typename_typename.try_.except_AttributeError_.return.str_typ_": {"doc_hash": "c3a3e4a9efa84ef493bd859b6d4a6101dcfd44c64fcfc3816865ce4b85739e0a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ensure_bytes_memory_repr.for_x_in_bytes_KB_.num_1024_0": {"doc_hash": "dc34d9335eec064f615b1de045076c2a594436143c46b29345bd4793a6fa01dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_dataframe_like_is_dataframe_like.return._": {"doc_hash": "06af395a7fcd2a82e6829f680f7a03a99f0532af848c6cd054e7fd3207fa3109"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_series_like_is_cupy_type.return._cupy_in_str_type_x_": {"doc_hash": "3cfa30bf3a87df8a661666e757024728d7174b46f109d2bbae93adb2dec2b999"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_from___future___import_an_GetFunctionTestMixin.test_get_with_list.assert_self_get_d_z_": {"doc_hash": "777477325069b349240b5071c9be3ac9aae2e9da2824e69f90bf9077d2c880af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_nested_list_GetFunctionTestMixin.test_with_HighLevelGraph.assert_self_get_graph_z": {"doc_hash": "03b3672be383b38ce5a91a98ed6ca2b2b36a7ebf9269c58a1153ee27f064f7b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_import_or_none_": {"doc_hash": "46135b83f64a887002e5a354da3c46b2b792d7803a3efdbc2d4a337c49520261"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/widgets/__init__.py_try__": {"doc_hash": "298b3858f8039bd67c77506579615e4405854d2771f295dbe93f3ccb97320725"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/widgets/tests/test_widgets.py_os.path_": {"doc_hash": "783e5ec367422fe4f9052f9b4dd5bd052b7779e1c051925f9e97f4091bc85a69"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/widgets/widgets.py_datetime_": {"doc_hash": "67406f68f6e0def69a0438ea3d9136afe4cdbb87b1543c6b3e4fa7e9c1e360f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_texinfo_documents__https_tech_signavio_c": {"doc_hash": "838ce7df8a882650745cf24056e504777a5d80e41cdbf7c253cddec5e4d91882"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_argtopk.return.np_take_along_axis_a_idx": {"doc_hash": "bfb7b986a7adecbc3d140c503f53f64e7318f476b598065b536871cd829b2399"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_graph_from_arraylike_graph_from_arraylike.if_inline_array_.else_.return.HighLevelGraph_layers_de": {"doc_hash": "4d82e3d9633bd2c60f0901174da316100e469d14b788b8bb14199155ffd085a5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_concatenate.if_n_0_.elif_n_1_.return.seq2_0_": {"doc_hash": "a9e584eff8afb36b5808b4f7c6880f71d41d3ef5994254546e5b98d48727153c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate.if_not_allow_unknown_chun_concatenate.return.Array_graph_name_chunks": {"doc_hash": "fadf0ce61e16d66744214cd766c3aaad22e2f7d4a416d907ed28b67b97e567ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asanyarray_asanyarray.return.from_array_": {"doc_hash": "be38d33922e4893ecf684520ec40c8488c6b46d6f7855a6a2ef37ddccff35455"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diag_diag.if_k_0_.None_1.return.pad_diag_v_k_0_0": {"doc_hash": "7ab9470dfb5a3495f508235cd3af14f152d1cd6358a0daffd5adead32f90eaa8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diagonal_diagonal.kdiag_chunks._": {"doc_hash": "5233f73a840842eeca4d7b1a8531af451802590349004ad1117bc4346f3687cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diagonal.while_kdiag_row_start_a_diagonal.return.Array_graph_name_out_ch": {"doc_hash": "68e3c20dd2e04850011c6b8f9fa835bf01d7954625a59b7da7492d9944b69f00"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Look_for_invocation_usi_map_overlap.assert_int_chunksize_x_": {"doc_hash": "286d828818794ae0678562f773f99527bca9ed496b551087242055c06bb9a33c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap.if_trim__map_overlap.if_trim_.else_.return.x": {"doc_hash": "83ec6879f8f41395401e5090f6f9dbd18bbba17052713b769cdc910a713959a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_contextlib_RandomState.seed.self__numpy_state_seed_se": {"doc_hash": "809e8d74d5da4df3150c632218331c86452066f77176832d33f463926d96ab6e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__intersect_1d__intersect_1d._will_hold_the_list_of_s": {"doc_hash": "e96475cf7c799ee2530e362f9c01502833736f4645a9e2603631105dcdd5025c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__intersect_1d.for_idx_in_range_1_len_b__intersect_1d.return.ret": {"doc_hash": "4b521c6f6fdb50910ab584289044f2f3c4705c8a256f9b441414e0170737adc8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_from___future___import_an_result_type.return.np_result_type_args_": {"doc_hash": "41ee07f7079de6987c747bbcf024ca2c97d6a9389d3b2d00be22dee1b4021287"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_shape_expand_dims.return.a_reshape_shape_": {"doc_hash": "9abf889ba48fdca88267ea766b4072675c697e146a20e29c0b63adb2facee5ac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_squeeze_squeeze.return.a": {"doc_hash": "21c806d4d87c3430361fe5e3712ae8f6cdd09feaef4c30194ef06004c9785354"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__partition__partition.return.multiples_remainder": {"doc_hash": "ca1c0715e26f9d63ce25b1c6537a73a40a9c5b94e958724793efa90faedfe267"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_parse_assignment_indices_parse_assignment_indices._": {"doc_hash": "2e2b0f895dc61145205e140bcb199e9ba7a8c294437253d249f89f35c6d8b4bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_parse_assignment_indices._numpy_allows_these_but_parse_assignment_indices.n_lists.0": {"doc_hash": "b20aa2aabf52f533ddcd3327be98296f3f3d8e66f6ffd0a99bb766f25ac47602"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_parse_assignment_indices.for_i_index_size_in_e_parse_assignment_indices.return.parsed_indices_implied_s": {"doc_hash": "ad52d7d150d42ee00dfd1a737be28e0cad677fde0e9bdf5a51c77ecd640ff1ef"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_None_24_ttest_ind.return.delayed_Ttest_indResult_": {"doc_hash": "ca325a9171ee4c18744a0fc64ecf85c6a1d9d7ea33d99d1785a3e45cae670725"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_contextlib_test_graph_from_arraylike.assert_any_arr_is_v_for_v": {"doc_hash": "ca4055a081594b7e36dab6d931e7ec8449809c800ed6e839d54591f8c34244f7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_zarr_unique_name_test_tiledb_roundtrip.None_2.assert_a_chunks_tdb_ch": {"doc_hash": "5fe67a54ee1159e00ba5ec8e83876a44a7949eb3db32930dc01663473af4305a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_tiledb_multiattr_test_blockview.with_pytest_raises_IndexE.blockview_100_100_": {"doc_hash": "ecbedbc7073494e7d6a94c509a71335040630a20927af963d689816f19ab954c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blocks_indexer_test_nbytes_auto.None_3.normalize_chunks_10B_": {"doc_hash": "9afba3f0feddfb55632ac0b96bd626d48feefb7bb0b704ec77fbaf3dc30e5e41"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_auto_chunks_h5py_": {"doc_hash": "8e8d7c473dd18d70f3424397418974bfc6c5ba1fd56a11ca203a12c8ad1c0d1a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_test_meta_from_array_literal_test_meta_from_array_type_inputs.assert_da_from_array_np_o": {"doc_hash": "4f8142fde11ae8b1542e2cc7ec11f5300650876a3cd2d4db4010a9b89fd55198"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_test_assert_eq_checks_dtype_": {"doc_hash": "01a5546a40be12331f11127ea22c186e3e1adef7e2b574e0a2bd708358b2ae5b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_linspace_test_linspace.None_11": {"doc_hash": "aaa7f6d4adf5b80447b055ba6a0937081f8c1e1b7fbe3a4624e415a3f3dbb3d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_bad_input_test_diag_bad_input.with_pytest_raises_TypeEr.da_diag_v_k_": {"doc_hash": "548c1ec05797a8cf36965debeca8b0f93c748746359cd421a80084023dabd300"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_2d_array_creation_test_diag_2d_array_creation.None_1": {"doc_hash": "bb75ebcdd1948240395c17ebfcd61c3fcc7070b5fc142e2412cf241d066d47b6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_extraction_test_diag_extraction.None_5": {"doc_hash": "38aabe7b7e2e451ddd2b699afe6530209d7920279fd9c8caed69d492d5b59d5d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_udf_test_pad_udf.assert_eq_np_r_da_r_": {"doc_hash": "e460ecdf87e88b584ee2ec8c0ebabeb1306d78819a1a0387f83b1a57b352c354"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_auto_chunks_": {"doc_hash": "5d8bafaa63e62181523c26dc8c69dff6f3ecd42bc2c20eb688a32c431bd14cc8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_1d_test_setitem_extended_API_0d.None_1": {"doc_hash": "b0e336f32abff5741cfe386e4e6ec39af10b5d91c6914da28f5e420f6fbfb7b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_1d_test_setitem_extended_API_1d.assert_eq_x_dx_compute_": {"doc_hash": "4a77a7db560764cbdeddc7496af476f9567977b9d453bb7caba5a0ff2df7dc66"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_nep35_test_index_with_int_dask_array_nep35.None_3": {"doc_hash": "576b0223a087f922613f1e2b1fb686438fb4bc746519ba21d6a943099161a155"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_shapes__check_lu_result.assert_eq_u_da_triu_u_": {"doc_hash": "4fadd73101f4e620ddbfb07064aaa6d80f8540c3f42e1cb46b199dad16eae768"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_escapes_to_map_blocks_when_depth_is_zero_test_map_overlap_no_depth.assert_eq_y_x_": {"doc_hash": "d73a8509285978567ac3700567769cb02adfe44a6d9409dc52f30ef74bd491bd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_different_depths_and_boundary_combinations_test_different_depths_and_boundary_combinations.None_3": {"doc_hash": "d8f7717af91e0d85a95e0e4ceb2a7341742977c8528526e3bce64c9717d96c25"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_boundary_test_trim_boundary.None_2": {"doc_hash": "67005f477dfffc45a7a47f87a40604e87456b324907c130780627e09ed8c4116"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_internals_1_test_rechunk_internals_1.assert_i1d_1_answer6": {"doc_hash": "bda6d7252d772ccb0701044b782816c4a252ca305867ac226fef618d64d93427"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_split_into_n_chunks_test_balance_split_into_n_chunks.for_N_in_array_lens_.for_nchunks_in_range_1_2.assert_len_y_chunks_0_": {"doc_hash": "fc290e6d08ce26c07351d235c20144d488f251d798aba2e36af4ccb090618386"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_zero_test_rechunk_with_zero.assert_eq_result_expecte": {"doc_hash": "9879c10310464bfea8533d866112b451c3101329fe83e293cb944588a2a6c401"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_chunks_with_nonzero_test_intersect_chunks_with_nonzero.assert_result_expected": {"doc_hash": "d54441392355fe60e4f9300b90b460232dc229603e103d3f79825b45d88621d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_chunks_with_zero_test_intersect_chunks_with_zero.None_2": {"doc_hash": "d70a40ca33fa219175c757302701f164983c5a6167c07da559c525206f6e1862"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_chunks_with_zero.old_12_test_intersect_chunks_with_zero.None_3": {"doc_hash": "7977e23e30e59355ce0c58158571aa9244f2788d8884f69f3dadfeb2b9753b68"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_with_zero_": {"doc_hash": "dbfa798601a431132d069f02b35c9cbcd25c86f20c0512442106226aeba78354"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_mean_func_does_not_warn_test_nan_func_does_not_warn._did_not_warn": {"doc_hash": "f1751181c9f3d652b92e726121d2fb13ddca5ba64ef8f5e3cf13b4ca45cb20d1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_chunk_structure_independence_": {"doc_hash": "d8966db64e3d5b1efc6fb7e9bbb589346b7f81f207849506fb13f48c7a4ea45f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_roll_always_results_in_a_new_array_test_union1d.assert_eq_result_expecte": {"doc_hash": "6ef18c63017b564abb13bc81d2f557e1e629e7144ad10c52edf58feb478d03cc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_expand_dims_test_expand_dims.if_axis_is_None_.else_.assert_same_keys_d_e_da_": {"doc_hash": "42be24834a247d6f68a608fbf2060dc23a600e81b9023ffe945ef7bb88958414"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh4043_test_gh4043.assert_eq_al_al_": {"doc_hash": "90bbf43e9556c92f22be2713e6206d64126700e540d4fa152f480a8d40d2e235"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_array_3d_with_bool_numpy_array_": {"doc_hash": "e0bf0b7d67655338525da57cd837329096a86480a6f79457e6371e042a7dd298"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_assert_eq_assert_eq.if_str_adt_str_bdt_.raise_AssertionError_f_a_": {"doc_hash": "ad38b970b514f548fac2cfbb932cb67588a20ee9c2c0c8266ecc78833ee5ebdb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_assert_eq.try__assert_eq.return.True": {"doc_hash": "d7c1c431bfa5868866f2e691e715c1b69c95fe6bc543ef4bfd0b9b1ada20e0ea"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_from___future___import_an___all__._": {"doc_hash": "570f2c5ef90cb5657e146c8b46341e547ab0c46b11d3af4f0bb4ceebe6a4a574"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__normalize_seq_func_normalize_range.return.list_map_normalize_token_": {"doc_hash": "bb17abaf92e4303675cb437f7ac52dfe37c7d00a22e7002ae9e55576a1f43dac"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_normalize_object_normalize_object.raise_RuntimeError_": {"doc_hash": "b7f3ad6411aae56e1743ed62bfa3346b7a0fa8740fc301bc06c19771df47606f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__normalize_function_normalize_dataclass.return._": {"doc_hash": "c238208bccfa59eb65c9d4a804ffeb1f49c938e7c4c72f4872794650e4a5ce4d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_from___future___import_an__deprecated": {"doc_hash": "9d71945e049d37b2ab56a2208e09a3feca481a831b20bfaa8e533b110cf49c16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockIndex_BlockIndex.__dask_distributed_unpack__.return.cls_state_": {"doc_hash": "41df931bf4d8054820a48a1667a7d2a72ccb0ba2022865904ad60af0560b864a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/__init__.py__": {"doc_hash": "16db12ad67ff6aa8050541ebd8445f17b39990ba91d09e492c210c5e81053be4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_from___future___import_an_if_parse_version_fsspec__.errs.errs_aiohttp_client_ex": {"doc_hash": "090fa99a86502425c43e63807ddb93f0ac0ac3d5874c63563d3144979c497fa2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_test_parquet._Should_succeed_otherwis": {"doc_hash": "349341f7c4322b49e9d16cbda6071175e84060b8c0f648e23ec8da44b12b8f38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet.df3_test_parquet.None_3": {"doc_hash": "311d98a8cd392aa1c1c95d21ba4a1e51200793e870376dffe209b482e24bd745"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_from___future___import_an_Callback.unregister.Callback_active_remove_se": {"doc_hash": "106367ed2a67f88d901c9963cb07a9a5507b5aec41766959518bb79665368d6c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/compatibility.py__": {"doc_hash": "01fe25d7192dff691721bd9512b9759cd582bd55644da7e8367339a9ff307ead"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_from___future___import_an__get_paths.return.paths": {"doc_hash": "0e2fabc39851d49a6cd54c2c3e82cff916585056bd7e5a08b9bc266078812bb2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_paths_canonical_name.return.k": {"doc_hash": "5969bbaade1515cfdee606c569d3d3a85392855badc9a8104cd1a853a4eddda4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_rename_rename.set_new_config_config_": {"doc_hash": "09ce329fc381bb90ab7d3a54041bfa67546d25d1f0a1099c89f73f395cdb5cc2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_update_defaults_update_defaults.update_config_new_prior": {"doc_hash": "0a387989f4708085586b550275874879c27d87b0c2809b532e571920c6402a56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_from___future___import_an_make_meta_pandas_datetime_tz.return._nonempty_scalar_x_": {"doc_hash": "0961f905db2d118c7ebca0fcfd4fe13c628243beb98695b8a80ae74e06892055"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reset_index__Frame.clear_divisions.return.type_self_self_dask_sel": {"doc_hash": "e6d914b0699aa26cfd2c1e5ea75bbf5fb5c614415219d84274cd56f4c816aef9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.compute_current_divisions__Frame.compute_current_divisions.return.compute_divisions_self_c": {"doc_hash": "9b38dcbb85e8479d0e303fc0ceb9531a1664ed8d1d68d80f2ec8f2ced31f026b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.get_partition__Frame.get_partition.if_0_n_self_npartiti.else_.raise_ValueError_msg_": {"doc_hash": "99f5dc3e2fdf87d9caa0f76b8289a6246620ca242cdda89585e58022211d76f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.std__Frame.std.return.handle_out_out_result_": {"doc_hash": "66388e4036e354f13701bdb20b46e918634e6cfd71ea203d006744f1fe9e7a18"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._convert_time_cols_to_numeric__Frame._convert_time_cols_to_numeric.return.numeric_dd_needs_time_co": {"doc_hash": "69661e3015f35e9872012a9ecdc5638f0eb63c6f87bfd1f74f3d80a5995a53aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.append__Frame.append.return.concat_": {"doc_hash": "dbaf2f6d411c11c5406f3d920524600073d99c3ac79658257c361c7692d5bf45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.dot__Frame.dot.return.self_map_partitions__dot_": {"doc_hash": "c00c152568f061db045faf39fcef30c4ccfa9427dc75a2f0826e5fa390098900"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._repartition_quantiles_Series._get_numeric_data.return.self": {"doc_hash": "a95eb2c51e27693ba8635e8d3b10a8a7753c78674aceb77cbed1c57149381855"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.iteritems_Series.iteritems.return.__self_": {"doc_hash": "3b7af4ce5b5a1b536d387001269276ccfffd38240d447d17ca50a8f87822958e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.__iter___Series.nunique.if_dropna_.else_.return.uniqs_size": {"doc_hash": "7e68fbead8ec44731c919d18215e059aa563a2d073a7bf10f664f4179aa7915d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.map_Index.map.return.applied": {"doc_hash": "f13c349cc9b30d48e44345c3d28333d2e4e230364378d2570001f76700e50d19"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.is_monotonic_Index.is_monotonic_decreasing.return.super_is_monotonic_decr": {"doc_hash": "aabbce513f92cd609966cbc6eaf4dee21c736f144da91f4cd974024c25653c62"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply.if_split_every_is_None__apply_concat_apply.return.new_dd_object_graph_fina": {"doc_hash": "2805c21ca6701bc757c8995a16ab663f5552623f2d3ce4733dc9dfbf7821c276"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions_map_partitions.if_meta_is_no_default_.else_.meta_is_emulated.False": {"doc_hash": "2f6ca5afcdd5aa8707e848d2542bf26fd9f38dc4669cc0f3805dd3f617234e2a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions.if_all_isinstance_arg_Sc_map_partitions.return.new_dd_object_graph_name": {"doc_hash": "0b4ed214b15b96f2b8e869f04b5c49ccdd9623ffa675dca4d0ca7f3d994ea609"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__take_last__take_last.if_skipna_is_False_.else_.if_is_dataframe_like_a_.else_.return._last_valid_a_": {"doc_hash": "04e446a01ba32c32841d173143a333cb38e3d008c6261a1b9b02c540e281fa11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_check_divisions_check_divisions.if_len_divisions_1_.raise_ValueError_msg_": {"doc_hash": "adcbba255c82f58b32d525083851f96449c6c315b715e49512bb5a3cd7ca8739"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_mapseries_series_map.return.new_dd_object_graph_fina": {"doc_hash": "bc1fe180eed26ccb9a4f47cf4a5324b40b54c0d43a15030f909c797c50325f17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__convert_to_numeric_": {"doc_hash": "b09414b2b6249f058464cf317814a0daf944b86b28a3c0c2d755a323afc67b26"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy__GroupBy.__iter__.raise_NotImplementedError": {"doc_hash": "aa6ab99148e3c55837aeb0495e3956afbdc9b3d0e48ebda612ebf8652ac95821"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.compute__GroupBy._shuffle.return.df4_by2": {"doc_hash": "d0b270765c0228a7c18d95bb8082ab44afa62edfddb2a7d6eccfa10cbdafaa5d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json_read_json._Create_a_dataframe_fro": {"doc_hash": "63d09d6304b35840dc3b101567f4b556cb439a89b6d57682703dbf40ee385878"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json.if_lines_is_None__read_json.return.from_delayed_parts_meta_": {"doc_hash": "527dd5e895e0ebb60afa9b4294af147c9a5f3017a920bee5b430d22c549cc049"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json_chunk_": {"doc_hash": "4c46ea25196d44c80acc73b6a4deb27ea264f9f5fb6a3efbd9ce681a4bba8b58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__read_table_from_path__read_table_from_path.if_partition_keys_.else_.with__open_input_files_.if_row_groups_None_.else_.return.pq_ParquetFile_fil_read_": {"doc_hash": "13ee9b97f3ece35de338558d1cd3f03af6d0c6cfa53730ebcf4337dc53012d90"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info.if_len_paths_1_and_fs_ArrowDatasetEngine._collect_dataset_info._be_avoided_at_all_costs": {"doc_hash": "51e1727ffdcada140aad9a2b3390a3d248b0f8c2bdc46973e75330bf9dda3373"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info.if_gather_statistics_is_N_ArrowDatasetEngine._collect_dataset_info.None_46": {"doc_hash": "f5274dad658dd73f4f66c19258af08a5a0f4b615fd01a7ef3c54cb630aa0028c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._construct_collection_plan_ArrowDatasetEngine._construct_collection_plan._Check_if_this_is_a_very": {"doc_hash": "c8e74745613327f18380b8230dd1aac7e2a34f66530bd22cb49ad0e2d5eda502"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._construct_collection_plan._the_path_names_ArrowDatasetEngine._construct_collection_plan.return.parts_stats_common_kwar": {"doc_hash": "2869aaf312353b27d153d980548fb97f924f2686e8cd63017954c20ac00f7540"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_from___future___import_an_NONE_LABEL.___null_dask_index___": {"doc_hash": "d42ad3175752c32d78b8dede953edfa232940e933fe8722428eb2e3ee14868a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py___ParquetFunctionWrapper.__call__.return.read_parquet_part_": {"doc_hash": "00df5f903189013743bfcccdf76480267ba287cca1e21fa941ad77adaab5cc7e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ToParquetFunctionWrapper_ToParquetFunctionWrapper.__dask_tokenize__.return._": {"doc_hash": "269ced75df667fa7f935a64eb7453eb80019c03c0f99ba4d5fd449f57d6718af"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ToParquetFunctionWrapper.__call___read_parquet": {"doc_hash": "5a9cc3cfc3093e88c3384e17caca37136aaf68eb6a875763b0ddfef0d48f73d7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.parts_divisions_index__read_parquet.return.new_dd_object_graph_outp": {"doc_hash": "1c27335b00ff7e9d33dd1b618e1b2ce670fe1f3d150e2c654501302dddbb6dc4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_check_multi_support_read_parquet_part.return.df": {"doc_hash": "47a1fb4280fdbdaeb4e09b3415c6dc1189ad6b4de29c2e83cd6391c54e03e738"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet._index_preservation_itse_to_parquet.meta_name._metadata_data_write_": {"doc_hash": "7070856033c1bfbe7e8905c91b597891b4e882c63acb2d417b92bcd7e3ee1d56"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet.if_write_metadata_file__to_parquet.if_compute_.else_.return.Scalar_graph_meta_name_": {"doc_hash": "9f195d38a26a95022821e23db4fa6af73d682b365a10e93988d9561003b37430"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_create_metadata_file.out_dir_create_metadata_file.return.out": {"doc_hash": "6c2d1fc3dee614f8759c0ca75b068cfa1a7bf0c94d8bdd721a36f05c874ba850"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py__ENGINES_get_engine.if_engine_auto_.else_.raise_ValueError_": {"doc_hash": "85de921dfb8bbea3116f2d47ba8fdc121b3c8a274e681e2f14f17515a1bd6450"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_dataset_info_FastParquetEngine._collect_dataset_info.require_extension.dataset_kwargs_pop_": {"doc_hash": "415d58465ddecb77c99600ecc6df52e9293c0982671cec4c76f50c4ad97c0729"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.pf_to_pandas_FastParquetEngine.pf_to_pandas.return.df": {"doc_hash": "70619833740edecad443db879eff5f9c21ebc0f56a5532c510fddd20fea13453"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__set_metadata_task_size__set_metadata_task_size.return.metadata_task_size": {"doc_hash": "b2b30336bd1d73ff1587ed3cca054d42236a3b9d3e82f85c456264adba51e28e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__process_open_file_options__process_open_file_options.return.precache_options_open_fi": {"doc_hash": "f1a51750e22695c05a4f43387a29d145ac725edd4a7bb6b4ed10b82fd06a6b06"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__split_user_options_": {"doc_hash": "f066eeebf76dd9b5626249b64995791dfc3fc1f1a64439d8bea24434072e47eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_warnings_read_sql_query.if_not_isinstance_index_c.raise_ValueError_": {"doc_hash": "df9d24eab457cf7df7475937a1862a9960140e5da9fec42de5e5b0cab340c312"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_query.if_not_head_rows_0__read_sql_query.return.from_delayed_parts_meta_": {"doc_hash": "35d50383b7997b247fa48a70046e6c37bd67a98b16a31935ec9605efd60e79d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table_read_sql_table.from_sqlalchemy_import_sq": {"doc_hash": "f1dfec83df9d1f46ea6d9d6c57e7ccef39cb12bb57dd53227035d9236817c6bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table.if_table_in_kwargs__read_sql_table.return.read_sql_query_": {"doc_hash": "13ff1db2672e0958b884a1aee2f3d60560ad88fc27d4563a8a8a814a88b3bebe"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_read_sql.if_isinstance_sql_str_.else_.return.read_sql_query_sql_con_": {"doc_hash": "15f3d9035205515979a2a21f69f68247f4efd41a46752b8cef70eac76856d42c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__to_sql_chunk__gt14.if_.else_.return.True": {"doc_hash": "9f04990042fab5cd147f2cdd9b234adecd98f6319138bd684e6cf89a1b2d5fb2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__old_read_sql_table__old_read_sql_table._": {"doc_hash": "7e27c6892a1057d3d0289f7ad46ca30c790e866922cff6153f7988b6094552c1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__old_read_sql_table.sa__old_read_sql_table.if_head_rows_0_.else_.if_divisions_is_None_and_.raise_ValueError_": {"doc_hash": "79051aac968aa7f03697e70b3de97a953bc1dbb04ab81ff01148c85be94f3b5c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__old_read_sql_table.if_divisions_is_None___old_read_sql_table.return.from_delayed_parts_meta_": {"doc_hash": "874284323c9b2503962eb8f04d922f87b73e146321ebba2824c43a54ff8489e4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_nodes_test_to_hdf_modes_multiple_nodes.None_4.assert_eq_dd_concat_df_": {"doc_hash": "fb0f3ff407ba1b69300979686c97f7a5322243278bb580eec2a4aa116bc42dd1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_files_test_to_hdf_modes_multiple_files.None_3.assert_eq_dd_concat_df_": {"doc_hash": "1d94a2b459d1b59c6e74506c69701a9165489d2626bfedf8ff041fa0b23b3c87"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_contextlib_test_meta_from_array._Should_be_5_partitions_": {"doc_hash": "8ea2a3dd67e739a1133fa25a8069c55a165db901d6b1031f64dca204912b3347"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_with_record_dtype_check_bcolz_deprecation_warning.with_pytest_warns_FutureW.yield": {"doc_hash": "dd810892855ff46c8b8be98aeac9e4ea5456f13569c913c938a5e675f0d3b34b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_multiple_threads_test_from_bcolz_multiple_threads.with_check_bcolz_deprecat.with_ThreadPoolExecutor_5.list_pool_map_check_rang": {"doc_hash": "26eb1f4816ad2d04b9fafda46a45a682b129ee1be9e8fe762433c0974a4cc7b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_test_from_bcolz.with_check_bcolz_deprecat.None_8": {"doc_hash": "1ea37aa346a4bb6a01a75f0fdd019175a8ba8c576e357a62421b674c8f9065a1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_json_test_read_json_with_path_column.with_tmpfile_json_as_f.assert_eq_actual_actual_": {"doc_hash": "4c4cd9f6a6f768a8ed496116e1454733ac7e54d40b9cd9ba095658b3084f2c83"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_path_column_with_duplicate_name_is_error_test_write_orient_not_records_and_lines.with_tmpfile_json_as_f.with_pytest_raises_ValueE.dd_to_json_ddf_f_orient": {"doc_hash": "36d3b700323a25061c6450589a3030e09eb1589a522ef8a0b44b694e011e268b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_multiple_files_with_path_column_test_read_json_multiple_files_with_path_column.assert_eq_res_sol_check": {"doc_hash": "864e65f25fbedcaaad28828e2a6480b5745a7799efb03fc040d371990449d1a2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_basic_test_read_json_basic.with_tmpfile_json_as_f.assert_eq_actual_df_": {"doc_hash": "6931aa6fcb62229250513ce18cc347d1ef826bd09ec0c886feeb96f33bd2a721"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_json_compressed_test_read_json_inferred_compression.with_tmpdir_as_path_.assert_eq_df_actual_che": {"doc_hash": "368a3c8659a03179f559885db6ca1a8338c72b2dd92e5cbcddc5e6d1c9bb7b5d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_layer_creation_info_test_layer_creation_info.assert_eq_ddf1_ddf3_": {"doc_hash": "032e28df8f1accdcc2b3d2cff7d565673ea7bb30ab79c6a96122cd0cfc6f6021"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_extra_connection_engine_keywords_test_extra_connection_engine_keywords.None_1": {"doc_hash": "52d61cb5f0b4fbd67738f229721d369da81b9d0db3373244d71dba7e9b4b4c3e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_query_test_query.assert_eq_out_df_loc_5_": {"doc_hash": "af102588b6988fd9454442284d5a08d4b1d5c99dc0b05188e05f663174c62b20"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_query_index_from_query_test_query_index_from_query.assert_eq_out_lenname_df": {"doc_hash": "f5744de347575f302396eaa711bf5ad6ab5a4e9aac7c4b5320fdb6a9e5922fab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_query_with_meta_test_query_with_meta.assert_eq_out_df_name_": {"doc_hash": "591f63ffca3558009bc3fb98819a3c0aab4ff89c49a9eb0a5a306da772ba9d17"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_character_index_without_divisions_tmp_db_uri.with_tmpfile_as_f_.yield_sqlite_s_f": {"doc_hash": "15fa8e11e77c29a8761a740b41f662b86c98abef897c103e5efebe15d7d562c2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_test_to_sql.None_5.assert_actual_npartiti": {"doc_hash": "585eb593e113d6935a71451de552ce31b6504ef4d8a6ac64f3f4797ee7a2867c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_kwargs_": {"doc_hash": "83f40f6f9b9d60e6db7f667610fa2eaa9abe5e419ad19b71be76e6cf3ff225b7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py__meta_from_dtypes__set_context.return.stack_enter_context_obj_": {"doc_hash": "0861cb069014e18a525c8636e52cd472864f214354e4cbef88ce4025777614ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py__open_input_files_": {"doc_hash": "50ca53aa971ffbc4adfdebdfd4dce4a9c2657acaa90ea5ac65cda553ed0ad311"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py__cum_aggregate_apply_pivot_agg_last.return.df_groupby_level_0_last_": {"doc_hash": "4c13b3143237d33d7fa83075064d863703ffb02d5799f579af2566c7b6ae48c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_pivot_sum_": {"doc_hash": "ab24776f4703cc2e76089aa1b03c25887c3fab697cba6d6e9c4a8d51688384e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_3_pivot_table.if_aggfunc_in_count_.pv_count.apply_concat_apply_": {"doc_hash": "d2c37eab9154630c9f3c704ba31e70c36c0997182f9c09443e2e77ba03d7bacb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_pivot_table.if_aggfunc_sum__pivot_table.if_aggfunc_sum_.else_.raise_ValueError": {"doc_hash": "86739595b1cd56ac1d59dcd988d0f38f8fe4c65f8922accfc9bd3c239eb9801b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py__compute_partition_stats__compute_partition_stats.if_not_allow_overlap_.else_.return._non_empty_mins_non_empt": {"doc_hash": "9b9a90c67491e830a6d49dab6968e10dd3f402946ae7c731ac7cec4434233cd6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_compute_divisions_compute_and_set_divisions.return.fix_overlap_df_mins_max": {"doc_hash": "50de065a41ce01a4412026134bf8fa134adff270ca163f638bb5c7d58df81184"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_agg_with_min_count_assert_near_timedeltas.assert_eq_pd_to_numeric_t": {"doc_hash": "31363208e0c4fd600b591286aea052c916cee6c75e50b6b1a6873e3043714a45"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_datetime_std_creates_copy_cols_test_datetime_std_creates_copy_cols.None_3": {"doc_hash": "28931916d3199083ecbe741032e8b3b58baf6556a5618865feb835b0da851906"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_datetime_std_with_larger_dataset_test_datetime_std_with_larger_dataset.None_2": {"doc_hash": "58e490a8f8a0fa68e6eb3c0fd71bbddf5f2674843e274c96b516ff9aa57b0ae2"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_datetime_std_across_axis1_null_results_": {"doc_hash": "bd62f044c406d33f2e42a8ce6f094b787e14da6df6b215a14ec77b4f104ec91b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_astype_categoricals_test_column_assignment.assert_z_not_in_orig_co": {"doc_hash": "4089f1f1e2fb8b632594ef50d817dc911b265ef72d64f3dc6aa82e56d34afe16"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_array_assignment_test_inplace_operators.assert_eq_ddf_df_assign_": {"doc_hash": "11732da3b3c24e6b9108b49dc40b7c88667ec52ca71a71bd92e05b04a9c0979c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_idxmaxmin_test_shift.with_pytest_raises_TypeEr.ddf_shift_1_5_": {"doc_hash": "5d511298980530568fa796843ba48c6aaf875bc8492fce40c09aa562cace51dc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_DatetimeIndex_test_memory_usage.assert_": {"doc_hash": "67e92986b66f854ec93edb6e1405bf3581dc57fc10b8be8c4841ba46225aae7d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_memory_usage_per_partition_test_with_boundary.tm_assert_frame_equal_res": {"doc_hash": "083f0b3227e41d58db939f63f8050fc40db305a1481fe36eb70c5de05ba338b1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_error_test_groupby_error.None_2": {"doc_hash": "4e04246e2dcc9ceac1224d503927423a76080a43e2a334ab38803476e63273f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_cumfunc_with_named_index_test_series_groupby_cumfunc_with_named_index.assert_eq_result_expecte": {"doc_hash": "a47a77fbf7f2d153c9c03408669c0b558c53e4183ab449c0ab8bddb26316d2f5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate_dask_test_aggregate_dask.for_spec_in_specs_.for_other_spec_in_specs_.if_isinstance_spec_list_.None_1": {"doc_hash": "8d160c76913c29b22ed66674fe0ac20d1047b2fe3eef5ba6dc09911379d9a6fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_within_partition_sorting_test_groupby_shift_within_partition_sorting.for___in_range_10_.assert_eq_": {"doc_hash": "218ec1007d44f9b3886d675c0c7b01760cbde2e60b982e20f57c61f5f92619f9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multi_index_with_row_operations_test_groupby_multi_index_with_row_operations.assert_eq_expected_actua": {"doc_hash": "d67c700d7a71d87e1effb780aab1dc053c3100592c8912e500052ff3a6f7d280"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_iter_fails_": {"doc_hash": "a6a88be641058853a723c60a792257d00d50ce60374268dbb8b5d108bd2b94de"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_kwargs_test_get_dummies_kwargs.None_3": {"doc_hash": "2893042964793d645f7fac6c2b7d242d25a599b441dddffef6dcfa8c7914e87e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_check_pandas_issue_45618_warning_check_pandas_issue_45618_warning.return.decorator": {"doc_hash": "b3a75c03215cb61287d917dedce972859a9e69b4f80b87acbf374c71435152aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_firstlast_test_pivot_table_firstlast.None_1": {"doc_hash": "c9392759be13f4c5c0cf18dd7655b5b7e15601fc032a4a5f077ccc7757ebb450"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_by_column_with_narrow_divisions_test_rearrange_by_column_with_narrow_divisions.list_eq_df_a_": {"doc_hash": "4fbdd5a10e372f7e0b636168b09cf1ea80d0e21252cf67ae1c910b8612d70ca9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_maybe_buffered_partd_test_maybe_buffered_partd.assert_f4_tempdir_tmp_": {"doc_hash": "b57c0ba0f00b9c533defe1622581605746a52726454c4614c900a6c4b7df0951"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_empty_divisions_test_set_index_divisions_2.assert_list_result_comput": {"doc_hash": "ef5ba124a8e08b9b4c30484c958c510f1b5d18914a7382fe80a72405c237a0d0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_empty_and_overlap_test_set_index_with_empty_and_overlap.assert_eq_result_expecte": {"doc_hash": "73f59e2389174caf256aba7dbca45a4ee3dfc2f05648f8b8f1372f9beaa3804e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_current_divisions_nan_partition_test_compute_current_divisions_nan_partition.None_1": {"doc_hash": "3550f659fb62864a30baca9e4df99101ae185e51315d38eec54ebdeecc45a4f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_current_divisions_overlap_test_compute_current_divisions_overlap.with_pytest_warns_UserWar.assert_len_p_for_p_in_b": {"doc_hash": "e322d29d39717df00cd09566d7f4be67a91023b5410f38987c4e2829fd5047f4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_current_divisions_overlap_2_test_compute_current_divisions_overlap_2.with_pytest_warns_UserWar.ddf2_compute_current_divi": {"doc_hash": "c4d5a1172d9de94797ee2f777797fd8b3c6e64af6ccb8a17928d3d71f78b2ac7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_sort_values_single_partition_test_sort_values_single_partition.dd_assert_eq_got_expect_": {"doc_hash": "efb04965856f262e581ce9ec9932e826f8f6b2a973228fd6c233b6ae5f6d4ddf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_values_raises_test_noop.assert__noop_test_None": {"doc_hash": "13bfe92749149c1b271bf2f08aa4763791d2f2c910eef810e86dafea6ba90e01"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_make_meta_test_make_meta.meta_15.make_meta_1_0_parent_met": {"doc_hash": "8ef86adadbf9db991d419c2e6c982ad9ae436b23da7823127f7047cfa279903e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_make_meta.None_36_test_make_meta.assert_pytest_raises_Type": {"doc_hash": "1195fa6c757c2616bccd58f2dfc259d06d2a862d82348a539f0025668a2cedf0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_np_getnanos.try_.except_ValueError_.return.None": {"doc_hash": "40608c45f304d441a50ab679348943984e3f68ea4b28ac656efb7c53a6470efb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py__resample_series__resample_series.return.out_reindex_new_index_fi": {"doc_hash": "055b95feb39e6ea3b4300b9c64736634d6f4536d55a3efc2b72dedfecdbc3b9b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_from___future___import_an_is_float_na_dtype.return.isinstance_dtype_types_": {"doc_hash": "2a9099e833d90e597f6b009922e9f459543fecf4f27474a4eeaedcd55b4c10df"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_contextlib_dsk2._a_1_b_2_c_sl": {"doc_hash": "67fb54dfe8e866c657c86dbddcc36495427c2ae108d0e8224512b2da36eb22bf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__Tools_to_modify_alread_T.TypeVar_T_": {"doc_hash": "94839885ca0f9f04e2af0dceab04d78979eb3dd09bffc05a1b312378182b3b4b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_bind_bind._parents_None_is_a_speci": {"doc_hash": "4f2dffbd8749d2058e28c710f1c06c1382e77bdd3af3803f94097391222cb2e0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_bind.blocker_bind.return.repack_": {"doc_hash": "30747aaadad7fbeb24089f9963e8cc07c887daef72967d048312eebf3babdc95"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_from___future___import_an_compute_layer_dependencies.return.ret": {"doc_hash": "4251967f4ba0ec639727cf9b2ec57ea0633eca57cc7046efa23ffaad8858b573"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_pack___Layer.__dask_distributed_pack__.dependencies_update_": {"doc_hash": "0d35c11e8325d617eb416e97d86a69d58476604c16ddb1db8ba68d0bb3cbf4cd"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_pack__._Add_in_deps_for_any_t_Layer.__dask_distributed_pack__.return._dsk_dsk_dependencie": {"doc_hash": "1a44989d268ac051b4533de0d9a566a78e14d8e21b7b75e8c39329bddca36566"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__len___HighLevelGraph.keys.return.self_to_dict_keys_": {"doc_hash": "81f97eaad28ba650fe4bfec9b65a2a3719c623777605ca82bbc15883360bcb37"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_from___future___import_an_CallableLazyImport.__call__.return.import_term_self_function": {"doc_hash": "e6943eb7f8011304b706579965f6ac53b3f74c748bcba466d2c0873b29b26555"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_None_5_ArrayBlockwiseDep.__dask_distributed_unpack__.return.cls_state_": {"doc_hash": "8043b8c126e789e26898dc6b97a3517216664d0bbaa54afe0f27a9344ecda183"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ArrayChunkShapeDep_ArraySliceDep.__getitem__.return.tuple_slice_s_None_for": {"doc_hash": "05f88bca21b47aa65db09382a663ac707c56925f7fd908b0f3d8d61983d88779"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_None_10_SimpleShuffleLayer.__len__.return.len_self__dict_": {"doc_hash": "1763b8005b03d79db14af92758630d80ba2bf565510ab428901561f875012922"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameIOLayer.project_columns_DataFrameIOLayer.__repr__.return._DataFrameIOLayer_name_": {"doc_hash": "2709b58076a69c049933fa382a299ac5be9b7b3bf3b90f14903914b2c3d92091"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction_DataFrameTreeReduction._make_key.return.name_parts_split_if_": {"doc_hash": "cac4ca1aa91af34ded437fe1ca4bf48ffcf99c27492d58f20feb627e42149570"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction._define_task_DataFrameTreeReduction._construct_graph.return.dsk": {"doc_hash": "5a9842905a4912c74742afbe8fa1694e8356e288f3fa5b54020d355201a21142"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction.__repr___DataFrameTreeReduction.cull.if_output_partitions_s.else_.return.self_deps": {"doc_hash": "2d7d55393f22cbca45c6f44c960054b089d8869eb936dcfa348a90082c9bcca4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction.__dask_distributed_pack___DataFrameTreeReduction.__dask_distributed_pack__.return._": {"doc_hash": "d699968279e942ad666d75fd1b9a1f4e6b344051cd683ffbf347a8b94fb783d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction.__dask_distributed_unpack___": {"doc_hash": "7db216e54a22a40a69b6bc270e2143e12a8ba351f43f31201430fbd8cad52bdb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_from___future___import_an__process_get_id.return.multiprocessing_current_p": {"doc_hash": "da4f78de357f9097f0a44c50b3ec8a9390c33b6dc905df4f246b78cedf278ddb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_dataclasses_f3.pass": {"doc_hash": "e2aba5dbc5b98dfab1ed97c579486deffd963f1025aed5933fb062c1c243dfa8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_partial_func_args_kwargs_consistent_test_tokenize_partial_func_args_kwargs_consistent.assert_res_sol": {"doc_hash": "41fd17f59e013fe58a5444af23261f5d7c31230760570c5dd5e5d33cdd1b55be"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_base_test_tokenize_callable._Consistent_token": {"doc_hash": "44ba8ea5dfb7fc935d6ed338f71ef6082dd568333a26180f1347dc7f71abd987"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_sequences_BDataClass.dataclasses_make_dataclas": {"doc_hash": "29d905d9c5cd95b99a19b1617d873715aae911bfbe9d2ba8e0fd0c598e40ef03"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_dataclass_test_tokenize_dataclass.None_6": {"doc_hash": "b41d7c325a7d2e80433a0d48dafe9c953f575aba8fee0cd6f1aff97c22556dca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_is_dask_collection_test_unpack_collections.build.return.t": {"doc_hash": "07629388ff5f7f0df501131133fab066876224fee73028d9c566fe1e55613f55"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_get_collection_names_test_get_collection_names.None_1": {"doc_hash": "61c7c191d9a3dab9ea4246c94eeee4f5271d991438481458ffecf27e45e535aa"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_item_change_name_test_normalize_function_dataclass_field_no_repr.None_2": {"doc_hash": "a8fde5af3cb4942a92dbff0e7e67d7c8cad9974ea1a56c17bb0feffec352fe94"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_get_scheduler_with_distributed_active_test_callable_scheduler.assert_called_0_": {"doc_hash": "1eec5d3800eea00382c745059503e2d6d56575701328b6e6baf3eb73645854b5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_num_workers_config_test_optimizations_ctd.assert_dsk1_dsk2": {"doc_hash": "72d51114dfed6f115e6caf2b87c2a5255a18c4aab8799ee8c22ed8f0d2204735"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_clone_key_test_clone_key.with_pytest_raises_TypeEr.clone_key_1_2_": {"doc_hash": "0d63aad2d0f1ac71da5d364858c6e60a07e295efe1733e1ab9daaad202fcb235"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_nested_test_set_hard_to_copyables.with_dask_config_set_x_th.with_dask_config_set_y_1_.pass": {"doc_hash": "c1d22a05324c7a8464826bacbc7e554b25cd28f0386a9ccb16662ff99af4786e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_config_serialization_test_config_inheritance.assert_dask_config_get_a": {"doc_hash": "52ffba9083cab0e8a3c5d239b891b04241a132bf7f4e52d29daeafe3d14d5982"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test__get_paths_": {"doc_hash": "7f990b0eb0e7a073e55b59662a98b537b6207175b5009c2796dd3f1fef9ec49d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_serializable_groupby_agg_test_serializable_groupby_agg.dd_utils_assert_eq_": {"doc_hash": "c229647cae333e6b178eb156f27ee6e89125083ac6e377da735de3ba60f4b18a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_concatenate_test_blockwise_concatenate.da_assert_eq_z_x_schedu": {"doc_hash": "a77d5d4cb29ef0d863f027b10f7cdfd9db384fb8cf5c5424dba7c308989d8a3d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_pack_MaterializedLayer_handles_futures_in_graph_properly_test_pack_MaterializedLayer_handles_futures_in_graph_properly.assert_unpacked_deps_": {"doc_hash": "c0ae2674a8a793fde5942b9d0792dd6e04fca866d34d1012ff9f8dd2e371e923"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_to_sql_engine_kwargs_test_to_sql_engine_kwargs.with_tmpfile_as_f_.dd_utils_assert_eq_": {"doc_hash": "15e8771f0f86cc45ec17f1cdc47f4ede921f1c6a9611353d8c10b4593f163577"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_non_recursive_df_reduce_": {"doc_hash": "3bcccbbb04d54cec3439f5c942ea1cdf5f74044c0ac50bf1f50c3f0f75b022eb"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_len_does_not_materialize_test_len_does_not_materialize.None_4": {"doc_hash": "b72f86d9e8a627fa709e633f0bce46ea562e8e2ec707df97a64604ca68417824"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_os_SchedulerImportCheck.start.for_mod_in_set_sys_module.if_not_mod_startswith_sel.else_.sys_modules_pop_mod_": {"doc_hash": "f6145c1875b2f540a67b2728c16df94a73a182eb4a36b63416922c599801416e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_array_chunk_shape_dep_test_array_chunk_shape_dep.for_i_in_range_d_.for_j_in_range_d_.check_i_j_": {"doc_hash": "5cbcf238a3023a8c2a612654b2fec0a285725ff02c12d9191ed088ac8177d52f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_array_slice_deps_test_array_slice_deps.for_i_in_range_d_.for_j_in_range_d_.check_i_j_": {"doc_hash": "358a36437a918c697ed159b412d4ab0e911315871733a67eb62c8a486f68e2cf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__dataframe_shuffle__dataframe_tree_reduction.return.dd_from_pandas_df_nparti": {"doc_hash": "5b6d4bc558d59074318d29d0f9bd0a5e8868f6fc55dc55fd05eb3149676046a0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_func_test_derived_from_dask_dataframe.assert_Object_with_missi": {"doc_hash": "94a967ca5ea18909bf25639120704a77a93d2b12388b4c1681c8f3a72b8b7a38"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils_test.py_warnings_test_hlg_layer.with_pytest_raises_KeyErr.utils_test_hlg_layer_hg_": {"doc_hash": "04c53953396299a872071af369870afb57d1f8aab0e9c5fb2db86fcbf16cbca4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_cached_property__cumsum.if_initial_zero_.else_.return.tuple_toolz_accumulate_ad": {"doc_hash": "737e926f820aeb0cb0d4acc5381b3235f3d0deb69f17109b6ffee1f6de519882"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_cached_cumsum_": {"doc_hash": "8ef6f60ae2402d8e82dd7cf5c8f50638e06011dc3a0ef53b167c0f2381cd9c35"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_redirect_files_redirect_files._": {"doc_hash": "cf0da021c8cda976f25988acc543790c0a1b1db861fc55cbf842c096012e4657"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_redirect_template_": {"doc_hash": "bdb66c17a9e79492e86672790019157d6d75d14bcb7a861b0e57c346ff6c81d4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_3d_test_field_access_with_shape.assert_eq_x_col1_col": {"doc_hash": "08ede6d7c1b7dfb9e2ab9b19d2dfd0aab8fc4f48456d75b3cfa7e4436a2e3d60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_matmul_test_broadcast_operator.assert_eq_w_d_w_": {"doc_hash": "ce127727ef5487a7274177c916c3869ddb7e99ac53d8564fc7307dfbcf0c5e90"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic_test_elemwise_consistent_names.assert_same_keys_da_maxim": {"doc_hash": "c498bc304a007717da30fc45a0917dbd4f36b7e0d430cfef390c90050bbf7249"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_optimize_test_Array_normalizes_dtype.assert_isinstance_x_dtype": {"doc_hash": "a1e2973ea0ff5835aa20c69236358c61dd4e2d284ca87c1b03afe4660a2542ca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_with_lock_test_asanyarray.assert_da_asanyarray_dx_": {"doc_hash": "9ea29b9ef136913d94eaf25ca517f0d15c1b86b759de15bb5f4ad98437b7ec7b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asanyarray_dataframe_test_from_func.assert_same_keys_d_from_": {"doc_hash": "37cf0b61851d84ee12ad11d23d1e41b5511d02486e2f323dcac9a35521301763"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_reductions.py_warnings_test_arg_reductions.assert_eq_dfunc_a2_0_sp": {"doc_hash": "a00e0423369b6c9028432ecf0410f3bfdfc4a6ccb1abe4364b307b7aec4eefab"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_2d_test_reduction_2d_test.if_split_every_.None_8": {"doc_hash": "afab298e8c04e32eb1e9d5b21072c97964f92f47553b08926121400512d3cf08"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nanarg_reductions_test_nanarg_reductions.with_warnings_catch_warni.None_2.dfunc_a_compute_": {"doc_hash": "dd1fe30a4d44694a05dea2a56e8914fde06fc087493e1671060e7dbf3dbec15f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_2D_nans_test_reductions_2D_nans.with_warnings_catch_warni.None_17": {"doc_hash": "56f5ccc905b0ac6350f0736ddc4e469e7741c1d46f250ceefea941649a32fc30"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_test_einsum.with_warnings_catch_warni.assert_eq_": {"doc_hash": "1e266cff020b531239c4aeea9c4abea962c87fa1ad646fcece49d87acc0230a9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_warnings_test_measures.assert_isinstance_result_": {"doc_hash": "669a5df7fa4ecab0590c31c6164ad53d08148ade28f823bea881e4e4ea142bdc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_npartitions_warn_test_read_text.pytest_raises_ValueError_": {"doc_hash": "dec1a8fbd8e3dd7c89d18b42f70a1ebab9dcb82ee99f8c0a32fef77c2cb24643"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_warn_test_to_textfiles_name_function_warn.with_tmpdir_as_dn_.a_to_textfiles_dn_name_f": {"doc_hash": "550e6ead8f42d923b4fa71740b4f2e688e1bc4b51d706b6aca7b647e791f1c72"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_from___future___import_an_maybe_wrap_pandas.return.x": {"doc_hash": "ecc9e45a19bb66033ca2136872335153219e80eb6d81055d9b468525e686257e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor_Accessor._property_map.return.self__series_map_partitio": {"doc_hash": "7f276c8f902cea83d5f0814eb1c3c4950a81f31f4e904de69e6ed8b5ccc71a04"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_DatetimeAccessor_DatetimeAccessor._accessor_properties._": {"doc_hash": "9103814bd4dd95f3fec9416e42626b4b6db783583e7f4611eae4834f817570ee"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor_StringAccessor._accessor_properties._": {"doc_hash": "ed10ed6ee6c8a3a4c8aab8d12509ea29663690f6bff8b567224ecf7d7a8a59c0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor._split_StringAccessor.rsplit.return.self__split_rsplit_pat": {"doc_hash": "30a2efd900c5b6921444af25c55ed192314ec30c4cfff6f837201d52b1393a72"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor.cat_StringAccessor.__getitem__.return.self__series_map_partitio": {"doc_hash": "75d20a1b487da089081984cd0774298d43af5b7657c18ca8cb11117991ad162c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_str_extractall_CachedAccessor.__init__.self._accessor.accessor": {"doc_hash": "41daf0d72a801049a020bc0840b805e467edd20de4229a90d15ae6dc9c7ad03d"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_CachedAccessor.__get___CachedAccessor.__get__.return.accessor_obj": {"doc_hash": "85004d1c7380e20e4cf4868ab5c3705c7b168c3fe4febaa86a1080c2407daf60"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py__register_accessor__register_accessor.return.decorator": {"doc_hash": "9f73b58f864868f6e8b0638a3ca623ba0118c4ff9d13444158ef635f04583e27"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_register_dataframe_accessor_": {"doc_hash": "5d91eab1240db308fef28854a9d83994d9f9fdffd26583360566b47d391c9fca"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._create_dd_meta_ArrowDatasetEngine._create_dd_meta.if_categories_.meta.clear_known_categories_": {"doc_hash": "057e01a7a80e503a726f3e6698c7fe161dbb069a65c20775dbe46cb3f6fc3ce4"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._create_dd_meta.if_partition_obj__ArrowDatasetEngine._create_dd_meta.return.meta": {"doc_hash": "8d2014641bec200671f527a350028ac8e29d1de2714fc8542a9654c77a83ae11"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_gzip_tsv_text2._": {"doc_hash": "3d27e34b3f0f6e8dcbbcef987ea37be68e92df35d1b5a41e059a98d46fba6e4e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_timeseries_tsv_units_row.csv_units_row_replace_b_": {"doc_hash": "ab6cd50ec2c8e54f4f7d945025989508a6fb9c05317313857188eeea988168f3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_and_table_test_pandas_read_text_with_header.assert_df_id_sum_1_": {"doc_hash": "b04e46525ee4e1e06d0e53000f869db45a2cd77e41ffac9d184675255339a9e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_getitem_optimization_after_filter_test_getitem_optimization_after_filter.with_filetext_timeseries_.assert_eq_expect_ddf_": {"doc_hash": "d3b4fd6d164f0bafca89af5ce84f9e71bb4123682b5341079bbb36dc83a74e0f"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_fmt_warns_test_to_fmt_warns.with_tmpdir_as_dn_.a_to_csv_fn_name_functio": {"doc_hash": "3f70d8e0aab2ae3a60867557c20a0efff08a6e56ca1622a5a8840d7a8394b5c9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_after_filter_test_getitem_optimization_after_filter.assert_eq_df2_ddf2_": {"doc_hash": "d26e1155af46ef0f635c16df0aec5e8db9e37d90de19257c3ad5d2266d2b22c6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_after_filter_complex_test_getitem_optimization_after_filter_complex.assert_eq_df2_ddf2_": {"doc_hash": "18d6bee7215d1676e1d026e7d24a4e9aa81eb87f082e9a0776534cf261892697"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_filename_with_partition_test_custom_filename_with_partition.assert_eq_": {"doc_hash": "8ebd55e892448f232070a26ec6113fcafd34cf8e6debc16cb0b1c8872ba8c6e3"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_partitioned_pyarrow_dataset_": {"doc_hash": "11c4c687ec66ad466854ceb9d5c0d45504f69eaa1421fcd078c91e39d3ba3e3e"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py_optimize_dataframe_getitem_optimize_dataframe_getitem.dependencies.dsk_dependencies_copy_": {"doc_hash": "55092fe6195606ce2a6ea90596961980f9d6dff1648839c12e880f8816adf8fc"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py_optimize_dataframe_getitem.for_io_layer_name_in_io_l_": {"doc_hash": "df09b5811c671469213931b2b59efb7efa8adb99e1aed7b27abca08e60b9ac14"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_cat_none_test_str_accessor_split_noexpand.assert_call_ds_n_1_expa": {"doc_hash": "09b2a844dd8b840d794681a06bca2aab720f56c8c1f21a81171e397fbe952db9"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_split_expand_test_str_accessor_split_expand.None_2.assert_eq_": {"doc_hash": "f849de75b8e2cb910ed98c619d9ba95be7d9d51416f15fc038a041c4a8e5f513"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_split_expand_more_columns_": {"doc_hash": "24c18e5defd1b7257cbe912ceb6e8c035383c96aa72cd2c97095f1aacdf7b02c"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in__test_frame_series_arithmetic_methods.for_l_r_el_er_in_.assert_eq_l_rmod_r_fill_": {"doc_hash": "00cc90af817110704f42a2ad0262ed418d3d12556aaae23ade7bb0ffd74c64c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_test_corr.pytest_raises_TypeError_": {"doc_hash": "82f56d04231b4423ddea344d190c06db932c33262719289e288b9abe918e8818"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_corr_same_name_test_autocorr.pytest_raises_TypeError_": {"doc_hash": "cdb9294ddddf90acd0443be965359166c5ed5d62dbe725c8dd98c83ffb69d484"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_infer_columns_test_astype.assert_eq_a_x_astype_floa": {"doc_hash": "9e91e35158455dfd3358b6307c10acfe1ccb2e39200cac5afbee684d98f35a70"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_boundary_slice_same_test_meta_nonempty_uses_meta_value_if_provided.with_warnings_catch_warni.assert_eq_expected_actua": {"doc_hash": "c52f275155034478e567eccf0068bddaf2f1fbdfabd262371af0d4ab32a7eff5"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dask_dataframe_holds_scipy_sparse_containers_test_partitions_indexer.None_2": {"doc_hash": "bccb88febb6f9eaa0156164f5289c5ed59692cc6cff2c9ac7bf94d1189cb0b81"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_on_index_test_groupby_on_index.with_dask_config_set_sche.with_warnings_catch_warni.None_5": {"doc_hash": "44f67839fc951107937af336957da545a43181e1d5efc548e26ae6fa4bcc9025"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_test_concat_unknown_divisions.assert_not_record": {"doc_hash": "36f0d12a88caf50b217a4c1ece688a544c7247692d66e5b03253d369cc51513a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat5_test_concat5.None_1.None_3": {"doc_hash": "649f46b931becf51d93c1c01240245c6116e54fe957053c01b0854a5b3b41ece"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_datetimeindex_check_append_with_warning.return.result": {"doc_hash": "30331211c7df8aeb28ead772ddae7a4731e9a9b208e54fdf29070b0cc28e7c58"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_outer_empty_test_dtype_equality_warning.assert_not_record": {"doc_hash": "ec168356234886231ff580c9c9698c24205cdf998078fda3cbfb8974a6269318"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_warnings_test_get_dummies.tm_assert_index_equal_res": {"doc_hash": "62e38b001a98c6163d838822905f17c687718afbb7c6a712e6fce8f48caa4288"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_plot_test_profiler_plot.assert_not_record": {"doc_hash": "693e1ec83b1fd7413803d4201ab5ffaee42556aee1c37408199fbaee6bbc41f0"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_plot_test_cache_profiler_plot.assert_not_record": {"doc_hash": "b2db03d1e4bd9b5bb726bdfdd0f2af766e5d06221ab1ffad7ae7c4b25ed0b3a8"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_dense_sparse_array_test_tokenize_dense_sparse_array.None_3": {"doc_hash": "4f16bea4c18829c499fe29db26ea7650ed1108d93fda14959e486080fd8553e6"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_as_if_collection_low_level_task_graph_": {"doc_hash": "f236f185368b63287a69832f764d07c1f37d6a2df31974e14694395c2b56476a"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_no_filename_test_dot_graph_no_filename.assert_isinstance_result_": {"doc_hash": "7cc0ed732c42f4ac7abb9bbff586b3f6f7ffe7b4ebcbf121897a114bc73dc9c7"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_defaults_test_dot_graph_defaults.try_.finally_.ensure_not_exists_target_": {"doc_hash": "c611aa80062e3acad2a0642d7f3ad63da51d9ebf75498c245493427d57af1a4b"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py____Options_for_HTML_out": {"doc_hash": "2aef0d9d03aa614c850b895b82b77694085d95d18abba50490c75db814897caf"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_html_theme_None_124": {"doc_hash": "d8e5a315d05b9eaec8f2434e55d959c3aeb8772adda74325cf858ba002d0f8ca"}}, "docstore/data": {"/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/conftest.py_pytest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/conftest.py_pytest_", "embedding": null, "metadata": {"file_path": "conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 71, "span_ids": ["imports", "pytest_addoption", "pytest_runtest_setup", "shuffle_method", "impl:23"], "tokens": 448}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask\n\n# The doctests in these files fail due to either:\n# - Non-required dependencies not being installed\n# - Imported doctests due to pulling the docstrings from other packages\n# (e.g. `numpy`). No need to run these doctests.\ncollect_ignore = [\n \"dask/bytes/hdfs3.py\",\n \"dask/bytes/pyarrow.py\",\n \"dask/bytes/s3.py\",\n \"dask/array/ghost.py\",\n \"dask/array/fft.py\",\n \"dask/dataframe/io/io.py\",\n \"dask/dataframe/io/parquet/arrow.py\",\n \"dask/dot.py\",\n \"dask/ml.py\",\n]\n\ncollect_ignore_glob = []\ntry:\n import numpy # noqa: F401\nexcept ImportError:\n collect_ignore_glob.append(\"dask/array/*\")\n\ntry:\n import pandas # noqa: F401\nexcept ImportError:\n collect_ignore_glob.append(\"dask/dataframe/*\")\n\ntry:\n import scipy # noqa: F401\nexcept ImportError:\n collect_ignore.append(\"dask/array/stats.py\")\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError:\n collect_ignore.append(\"dask/dataframe/io/orc/arrow.py\")\n\ntry:\n import tiledb # noqa: F401\nexcept ImportError:\n collect_ignore.append(\"dask/array/tiledb_io.py\")\n\ntry:\n import sqlalchemy # noqa: F401\nexcept ImportError:\n collect_ignore.append(\"dask/dataframe/io/sql.py\")\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--runslow\", action=\"store_true\", help=\"run slow tests\")\n\n\ndef pytest_runtest_setup(item):\n if \"slow\" in item.keywords and not item.config.getoption(\"--runslow\"):\n pytest.skip(\"need --runslow option to run\")\n\n\npytest.register_assert_rewrite(\n \"dask.array.utils\", \"dask.dataframe.utils\", \"dask.bag.utils\"\n)\n\n\n@pytest.fixture(params=[\"disk\", \"tasks\"])\ndef shuffle_method(request):\n with dask.config.set(shuffle=request.param):\n yield request.param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py__This_file_helps_to_comp_register_vcs_handler.return.decorate": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py__This_file_helps_to_comp_register_vcs_handler.return.decorate", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 66, "span_ids": ["VersioneerConfig", "impl", "NotThisMethod", "get_keywords", "register_vcs_handler", "docstring", "get_config"], "tokens": 481}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# This file helps to compute a version number in source trees obtained from\n# git-archive tarball (such as those provided by github's download-from-tag\n# feature). Distribution tarballs (built by setup.py sdist) and build\n# directories (produced by setup.py build) will contain a much shorter file\n# that just contains the computed version number.\n\n# This file is released into the public domain. Generated by\n# versioneer-0.16 (https://github.com/warner/python-versioneer)\n\n\"\"\"Git implementation of _version.py.\"\"\"\n\nimport os\nimport re\nimport subprocess\nimport sys\n\n\ndef get_keywords():\n \"\"\"Get the keywords needed to look up the version information.\"\"\"\n # these strings will be replaced by git during git-archive.\n # setup.py/versioneer.py will grep for the variable names, so they must\n # each be defined on a line of their own. _version.py will just call\n # get_keywords().\n git_refnames = \"$Format:%d$\"\n git_full = \"$Format:%H$\"\n keywords = {\"refnames\": git_refnames, \"full\": git_full}\n return keywords\n\n\nclass VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\"\n\n\ndef get_config():\n \"\"\"Create, populate and return the VersioneerConfig() object.\"\"\"\n # these strings are filled in when 'setup.py versioneer' creates\n # _version.py\n cfg = VersioneerConfig()\n cfg.VCS = \"git\"\n cfg.style = \"pep440\"\n cfg.tag_prefix = \"\"\n cfg.parentdir_prefix = \"dask-\"\n cfg.versionfile_source = \"dask/_version.py\"\n cfg.verbose = False\n return cfg\n\n\nclass NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\"\n\n\nLONG_VERSION_PY: dict = {}\nHANDLERS = {}\n\n\ndef register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n\n return decorate", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_run_command_run_command.return.stdout": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_run_command_run_command.return.stdout", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 69, "end_line": 100, "span_ids": ["run_command"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen(\n [c] + args,\n cwd=cwd,\n stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr else None),\n )\n break\n except FileNotFoundError:\n continue\n except OSError as e:\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None\n else:\n if verbose:\n print(f\"unable to find command, tried {commands}\")\n return None\n stdout = p.communicate()[0].strip().decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n return None\n return stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_versions_from_parentdir_versions_from_parentdir.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_versions_from_parentdir_versions_from_parentdir.return._", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 105, "end_line": 124, "span_ids": ["versions_from_parentdir"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes\n both the project name and a version string.\n \"\"\"\n dirname = os.path.basename(root)\n if not dirname.startswith(parentdir_prefix):\n if verbose:\n print(\n \"guessing rootdir is '%s', but '%s' doesn't start with \"\n \"prefix '%s'\" % (root, dirname, parentdir_prefix)\n )\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n return {\n \"version\": dirname[len(parentdir_prefix) :],\n \"full-revisionid\": None,\n \"dirty\": False,\n \"error\": None,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_get_keywords_git_get_keywords.return.keywords": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_get_keywords_git_get_keywords.return.keywords", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 125, "end_line": 147, "span_ids": ["git_get_keywords"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs)\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n f.close()\n except OSError:\n pass\n return keywords", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_versions_from_keywords_git_versions_from_keywords.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_versions_from_keywords_git_versions_from_keywords.return._", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 150, "end_line": 198, "span_ids": ["git_versions_from_keywords"], "tokens": 563}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = {r.strip() for r in refnames.strip(\"()\").split(\",\")}\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"main\".\n tags = {r for r in refs if re.search(r\"\\d\", r)}\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs - tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix) :]\n if verbose:\n print(\"picking %s\" % r)\n return {\n \"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": None,\n }\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": \"no suitable tags\",\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 201, "end_line": 291, "span_ids": ["git_pieces_from_vcs"], "tokens": 784}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n if not os.path.exists(os.path.join(root, \".git\")):\n if verbose:\n print(\"no .git in %s\" % root)\n raise NotThisMethod(\"no .git directory\")\n\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out = run_command(\n GITS,\n [\n \"describe\",\n \"--tags\",\n \"--dirty\",\n \"--always\",\n \"--long\",\n \"--match\",\n \"%s*\" % tag_prefix,\n ],\n cwd=root,\n )\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[: git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r\"^(.+)-(\\d+)-g([0-9a-f]+)$\", git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = \"unable to parse git-describe output: '%s'\" % describe_out\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%s' doesn't start with prefix '%s'\"\n print(fmt % (full_tag, tag_prefix))\n pieces[\"error\"] = \"tag '{}' doesn't start with prefix '{}'\".format(\n full_tag,\n tag_prefix,\n )\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix) :]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"], cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n return pieces", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_plus_or_dot_render_pep440.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_plus_or_dot_render_pep440.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 296, "end_line": 324, "span_ids": ["plus_or_dot", "render_pep440"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\"\n\n\ndef render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_pre_render_pep440_post.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_pre_render_pep440_post.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 327, "end_line": 367, "span_ids": ["render_pep440_post", "render_pep440_pre"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%d\" % pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%d\" % pieces[\"distance\"]\n return rendered\n\n\ndef render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%s\" % pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%s\" % pieces[\"short\"]\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_old_render_pep440_old.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_pep440_old_render_pep440_old.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 370, "end_line": 389, "span_ids": ["render_pep440_old"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_render_git_describe.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_render_git_describe.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 392, "end_line": 409, "span_ids": ["render_git_describe"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_long_render_git_describe_long.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_git_describe_long_render_git_describe_long.return.rendered", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 412, "end_line": 429, "span_ids": ["render_git_describe_long"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_render.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_render_render.return._", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 432, "end_line": 465, "span_ids": ["render"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\n \"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"],\n }\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%s'\" % style)\n\n return {\n \"version\": rendered,\n \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"],\n \"error\": None,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_get_versions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/_version.py_get_versions_", "embedding": null, "metadata": {"file_path": "dask/_version.py", "file_name": "_version.py", "file_type": "text/x-python", "category": "implementation", "start_line": 468, "end_line": 516, "span_ids": ["get_versions"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_versions():\n \"\"\"Get version information or return default if unable to do so.\"\"\"\n # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have\n # __file__, we can work backwards from there to the root. Some\n # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which\n # case we can only use expanded keywords.\n\n cfg = get_config()\n verbose = cfg.verbose\n\n try:\n return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)\n except NotThisMethod:\n pass\n\n try:\n root = os.path.realpath(__file__)\n # versionfile_source is the relative path from the top of the source\n # tree (where the .git directory might live) to this file. Invert\n # this to find the root from __file__.\n for i in cfg.versionfile_source.split(\"/\"):\n root = os.path.dirname(root)\n except NameError:\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to find root of source tree\",\n }\n\n try:\n pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)\n return render(pieces, cfg.style)\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n except NotThisMethod:\n pass\n\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to compute version\",\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/__init__.py_try__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/__init__.py_try__", "embedding": null, "metadata": {"file_path": "dask/array/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 268, "span_ids": ["impl"], "tokens": 1202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "try:\n from ..base import compute\n from . import backends, fft, lib, linalg, ma, overlap, random\n from .blockwise import atop, blockwise\n from .chunk_types import register_chunk_type\n from .core import (\n Array,\n PerformanceWarning,\n asanyarray,\n asarray,\n block,\n broadcast_arrays,\n broadcast_to,\n concatenate,\n from_array,\n from_delayed,\n from_npy_stack,\n from_zarr,\n map_blocks,\n stack,\n store,\n to_hdf5,\n to_npy_stack,\n to_zarr,\n unify_chunks,\n )\n from .creation import (\n arange,\n diag,\n diagonal,\n empty_like,\n eye,\n fromfunction,\n full_like,\n indices,\n linspace,\n meshgrid,\n ones_like,\n pad,\n repeat,\n tile,\n tri,\n zeros_like,\n )\n from .gufunc import apply_gufunc, as_gufunc, gufunc\n from .numpy_compat import moveaxis, rollaxis\n from .optimization import optimize\n from .overlap import map_overlap\n from .percentile import percentile\n from .rechunk import rechunk\n from .reductions import (\n all,\n any,\n argmax,\n argmin,\n argtopk,\n cumprod,\n cumsum,\n max,\n mean,\n median,\n min,\n moment,\n nanargmax,\n nanargmin,\n nancumprod,\n nancumsum,\n nanmax,\n nanmean,\n nanmedian,\n nanmin,\n nanprod,\n nanstd,\n nansum,\n nanvar,\n prod,\n reduction,\n std,\n sum,\n topk,\n trace,\n var,\n )\n from .reshape import reshape\n from .routines import (\n allclose,\n append,\n apply_along_axis,\n apply_over_axes,\n argwhere,\n around,\n array,\n atleast_1d,\n atleast_2d,\n atleast_3d,\n average,\n bincount,\n choose,\n coarsen,\n compress,\n corrcoef,\n count_nonzero,\n cov,\n delete,\n diff,\n digitize,\n dot,\n dstack,\n ediff1d,\n einsum,\n expand_dims,\n extract,\n flatnonzero,\n flip,\n fliplr,\n flipud,\n gradient,\n histogram,\n histogram2d,\n histogramdd,\n hstack,\n insert,\n isclose,\n isin,\n isnull,\n matmul,\n ndim,\n nonzero,\n notnull,\n outer,\n piecewise,\n ptp,\n ravel,\n ravel_multi_index,\n result_type,\n roll,\n rot90,\n round,\n searchsorted,\n select,\n shape,\n squeeze,\n swapaxes,\n take,\n tensordot,\n transpose,\n tril,\n tril_indices,\n tril_indices_from,\n triu,\n triu_indices,\n triu_indices_from,\n union1d,\n unique,\n unravel_index,\n vdot,\n vstack,\n where,\n )\n from .tiledb_io import from_tiledb, to_tiledb\n from .ufunc import (\n absolute,\n add,\n angle,\n arccos,\n arccosh,\n arcsin,\n arcsinh,\n arctan,\n arctan2,\n arctanh,\n bitwise_and,\n bitwise_not,\n bitwise_or,\n bitwise_xor,\n cbrt,\n ceil,\n clip,\n conj,\n copysign,\n cos,\n cosh,\n deg2rad,\n degrees,\n divide,\n divmod,\n equal,\n exp,\n exp2,\n expm1,\n fabs,\n fix,\n float_power,\n floor,\n floor_divide,\n fmax,\n fmin,\n fmod,\n frexp,\n frompyfunc,\n greater,\n greater_equal,\n hypot,\n i0,\n imag,\n invert,\n iscomplex,\n isfinite,\n isinf,\n isnan,\n isneginf,\n isposinf,\n isreal,\n ldexp,\n less,\n less_equal,\n log,\n log1p,\n log2,\n log10,\n logaddexp,\n logaddexp2,\n logical_and,\n logical_not,\n logical_or,\n logical_xor,\n maximum,\n minimum,\n mod,\n modf,\n multiply,\n nan_to_num,\n negative,\n nextafter,\n not_equal,\n power,\n rad2deg,\n radians,\n real,\n reciprocal,\n remainder,\n rint,\n sign,\n signbit,\n sin,\n sinc,\n sinh,\n spacing,\n sqrt,\n square,\n subtract,\n tan,\n tanh,\n true_divide,\n trunc,\n )\n from .utils import assert_eq\n from .wrap import empty, full, ones, zeros\n\nexcept ImportError as e:\n msg = (\n \"Dask array requirements are not installed.\\n\\n\"\n \"Please either conda or pip install as follows:\\n\\n\"\n \" conda install dask # either conda install\\n\"\n ' python -m pip install \"dask[array]\" --upgrade # or python -m pip install'\n )\n raise ImportError(str(e) + \"\\n\\n\" + msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_numbers_blockwise._Tensor_operation_Gene": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_numbers_blockwise._Tensor_operation_Gene", "embedding": null, "metadata": {"file_path": "dask/array/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 153, "span_ids": ["imports", "blockwise"], "tokens": 1361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numbers\nimport warnings\n\nimport tlz as toolz\n\nfrom .. import base, utils\nfrom ..blockwise import blockwise as core_blockwise\nfrom ..delayed import unpack_collections\nfrom ..highlevelgraph import HighLevelGraph\n\n\ndef blockwise(\n func,\n out_ind,\n *args,\n name=None,\n token=None,\n dtype=None,\n adjust_chunks=None,\n new_axes=None,\n align_arrays=True,\n concatenate=None,\n meta=None,\n **kwargs,\n):\n \"\"\"Tensor operation: Generalized inner and outer products\n\n A broad class of blocked algorithms and patterns can be specified with a\n concise multi-index notation. The ``blockwise`` function applies an in-memory\n function across multiple blocks of multiple inputs in a variety of ways.\n Many dask.array operations are special cases of blockwise including\n elementwise, broadcasting, reductions, tensordot, and transpose.\n\n Parameters\n ----------\n func : callable\n Function to apply to individual tuples of blocks\n out_ind : iterable\n Block pattern of the output, something like 'ijk' or (1, 2, 3)\n *args : sequence of Array, index pairs\n Sequence like (x, 'ij', y, 'jk', z, 'i')\n **kwargs : dict\n Extra keyword arguments to pass to function\n dtype : np.dtype\n Datatype of resulting array.\n concatenate : bool, keyword only\n If true concatenate arrays along dummy indices, else provide lists\n adjust_chunks : dict\n Dictionary mapping index to function to be applied to chunk sizes\n new_axes : dict, keyword only\n New indexes and their dimension lengths\n\n Examples\n --------\n 2D embarrassingly parallel operation from two arrays, x, and y.\n\n >>> import operator, numpy as np, dask.array as da\n >>> x = da.from_array([[1, 2],\n ... [3, 4]], chunks=(1, 2))\n >>> y = da.from_array([[10, 20],\n ... [0, 0]])\n >>> z = blockwise(operator.add, 'ij', x, 'ij', y, 'ij', dtype='f8')\n >>> z.compute()\n array([[11, 22],\n [ 3, 4]])\n\n Outer product multiplying a by b, two 1-d vectors\n\n >>> a = da.from_array([0, 1, 2], chunks=1)\n >>> b = da.from_array([10, 50, 100], chunks=1)\n >>> z = blockwise(np.outer, 'ij', a, 'i', b, 'j', dtype='f8')\n >>> z.compute()\n array([[ 0, 0, 0],\n [ 10, 50, 100],\n [ 20, 100, 200]])\n\n z = x.T\n\n >>> z = blockwise(np.transpose, 'ji', x, 'ij', dtype=x.dtype)\n >>> z.compute()\n array([[1, 3],\n [2, 4]])\n\n The transpose case above is illustrative because it does transposition\n both on each in-memory block by calling ``np.transpose`` and on the order\n of the blocks themselves, by switching the order of the index ``ij -> ji``.\n\n We can compose these same patterns with more variables and more complex\n in-memory functions\n\n z = X + Y.T\n\n >>> z = blockwise(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji', dtype='f8')\n >>> z.compute()\n array([[11, 2],\n [23, 4]])\n\n Any index, like ``i`` missing from the output index is interpreted as a\n contraction (note that this differs from Einstein convention; repeated\n indices do not imply contraction.) In the case of a contraction the passed\n function should expect an iterable of blocks on any array that holds that\n index. To receive arrays concatenated along contracted dimensions instead\n pass ``concatenate=True``.\n\n Inner product multiplying a by b, two 1-d vectors\n\n >>> def sequence_dot(a_blocks, b_blocks):\n ... result = 0\n ... for a, b in zip(a_blocks, b_blocks):\n ... result += a.dot(b)\n ... return result\n\n >>> z = blockwise(sequence_dot, '', a, 'i', b, 'i', dtype='f8')\n >>> z.compute()\n 250\n\n Add new single-chunk dimensions with the ``new_axes=`` keyword, including\n the length of the new dimension. New dimensions will always be in a single\n chunk.\n\n >>> def f(a):\n ... return a[:, None] * np.ones((1, 5))\n\n >>> z = blockwise(f, 'az', a, 'a', new_axes={'z': 5}, dtype=a.dtype)\n\n New dimensions can also be multi-chunk by specifying a tuple of chunk\n sizes. This has limited utility as is (because the chunks are all the\n same), but the resulting graph can be modified to achieve more useful\n results (see ``da.map_blocks``).\n\n >>> z = blockwise(f, 'az', a, 'a', new_axes={'z': (5, 5)}, dtype=x.dtype)\n >>> z.chunks\n ((1, 1, 1), (5, 5))\n\n If the applied function changes the size of each chunk you can specify this\n with a ``adjust_chunks={...}`` dictionary holding a function for each index\n that modifies the dimension size in that index.\n\n >>> def double(x):\n ... return np.concatenate([x, x])\n\n >>> y = blockwise(double, 'ij', x, 'ij',\n ... adjust_chunks={'i': lambda n: 2 * n}, dtype=x.dtype)\n >>> y.chunks\n ((2, 2), (2,))\n\n Include literals by indexing with None\n\n >>> z = blockwise(operator.add, 'ij', x, 'ij', 1234, None, dtype=x.dtype)\n >>> z.compute()\n array([[1235, 1236],\n [1237, 1238]])\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.out_blockwise.chunks._chunkss_i_for_i_in_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.out_blockwise.chunks._chunkss_i_for_i_in_out_", "embedding": null, "metadata": {"file_path": "dask/array/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 154, "end_line": 252, "span_ids": ["blockwise"], "tokens": 743}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def blockwise(\n func,\n out_ind,\n *args,\n name=None,\n token=None,\n dtype=None,\n adjust_chunks=None,\n new_axes=None,\n align_arrays=True,\n concatenate=None,\n meta=None,\n **kwargs,\n):\n out = name\n new_axes = new_axes or {}\n\n # Input Validation\n if len(set(out_ind)) != len(out_ind):\n raise ValueError(\n \"Repeated elements not allowed in output index\",\n [k for k, v in toolz.frequencies(out_ind).items() if v > 1],\n )\n new = (\n set(out_ind)\n - {a for arg in args[1::2] if arg is not None for a in arg}\n - set(new_axes or ())\n )\n if new:\n raise ValueError(\"Unknown dimension\", new)\n\n from .core import normalize_arg, unify_chunks\n\n if align_arrays:\n chunkss, arrays = unify_chunks(*args)\n else:\n arginds = [(a, i) for (a, i) in toolz.partition(2, args) if i is not None]\n chunkss = {}\n # For each dimension, use the input chunking that has the most blocks;\n # this will ensure that broadcasting works as expected, and in\n # particular the number of blocks should be correct if the inputs are\n # consistent.\n for arg, ind in arginds:\n for c, i in zip(arg.chunks, ind):\n if i not in chunkss or len(c) > len(chunkss[i]):\n chunkss[i] = c\n arrays = args[::2]\n\n for k, v in new_axes.items():\n if not isinstance(v, tuple):\n v = (v,)\n chunkss[k] = v\n\n arginds = zip(arrays, args[1::2])\n numblocks = {}\n\n dependencies = []\n arrays = []\n\n # Normalize arguments\n argindsstr = []\n\n for arg, ind in arginds:\n if ind is None:\n arg = normalize_arg(arg)\n arg, collections = unpack_collections(arg)\n dependencies.extend(collections)\n else:\n if (\n hasattr(arg, \"ndim\")\n and hasattr(ind, \"__len__\")\n and arg.ndim != len(ind)\n ):\n raise ValueError(\n \"Index string %s does not match array dimension %d\"\n % (ind, arg.ndim)\n )\n numblocks[arg.name] = arg.numblocks\n arrays.append(arg)\n arg = arg.name\n argindsstr.extend((arg, ind))\n\n # Normalize keyword arguments\n kwargs2 = {}\n for k, v in kwargs.items():\n v = normalize_arg(v)\n v, collections = unpack_collections(v)\n dependencies.extend(collections)\n kwargs2[k] = v\n\n # Finish up the name\n if not out:\n out = \"{}-{}\".format(\n token or utils.funcname(func).strip(\"_\"),\n base.tokenize(func, out_ind, argindsstr, dtype, **kwargs),\n )\n\n graph = core_blockwise(\n func,\n out,\n out_ind,\n *argindsstr,\n numblocks=numblocks,\n dependencies=dependencies,\n new_axes=new_axes,\n concatenate=concatenate,\n **kwargs2,\n )\n graph = HighLevelGraph.from_collections(\n out, graph, dependencies=arrays + dependencies\n )\n\n chunks = [chunkss[i] for i in out_ind]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.if_adjust_chunks__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/blockwise.py_blockwise.if_adjust_chunks__", "embedding": null, "metadata": {"file_path": "dask/array/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 253, "end_line": 286, "span_ids": ["impl:2", "blockwise", "atop"], "tokens": 336}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def blockwise(\n func,\n out_ind,\n *args,\n name=None,\n token=None,\n dtype=None,\n adjust_chunks=None,\n new_axes=None,\n align_arrays=True,\n concatenate=None,\n meta=None,\n **kwargs,\n):\n # ... other code\n if adjust_chunks:\n for i, ind in enumerate(out_ind):\n if ind in adjust_chunks:\n if callable(adjust_chunks[ind]):\n chunks[i] = tuple(map(adjust_chunks[ind], chunks[i]))\n elif isinstance(adjust_chunks[ind], numbers.Integral):\n chunks[i] = tuple(adjust_chunks[ind] for _ in chunks[i])\n elif isinstance(adjust_chunks[ind], (tuple, list)):\n if len(adjust_chunks[ind]) != len(chunks[i]):\n raise ValueError(\n f\"Dimension {i} has {len(chunks[i])} blocks, adjust_chunks \"\n f\"specified with {len(adjust_chunks[ind])} blocks\"\n )\n chunks[i] = tuple(adjust_chunks[ind])\n else:\n raise NotImplementedError(\n \"adjust_chunks values must be callable, int, or tuple\"\n )\n chunks = tuple(chunks)\n\n if meta is None:\n from .utils import compute_meta\n\n meta = compute_meta(func, dtype, *args[::2], **kwargs)\n return new_da_object(graph, out, chunks, meta=meta, dtype=dtype)\n\n\ndef atop(*args, **kwargs):\n warnings.warn(\"The da.atop function has moved to da.blockwise\")\n return blockwise(*args, **kwargs)\n\n\nfrom .core import new_da_object", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__A_set_of_NumPy_functi_keepdims_wrapper.return.keepdims_wrapped_callable": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__A_set_of_NumPy_functi_keepdims_wrapper.return.keepdims_wrapped_callable", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 44, "span_ids": ["keepdims_wrapper", "docstring"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" A set of NumPy functions to apply per chunk \"\"\"\nimport contextlib\nfrom collections.abc import Container, Iterable, Sequence\nfrom functools import wraps\nfrom numbers import Integral\n\nimport numpy as np\nfrom tlz import concat\n\nfrom ..core import flatten\n\n\ndef keepdims_wrapper(a_callable):\n \"\"\"\n A wrapper for functions that don't provide keepdims to ensure that they do.\n \"\"\"\n\n @wraps(a_callable)\n def keepdims_wrapped_callable(x, axis=None, keepdims=None, *args, **kwargs):\n r = a_callable(x, axis=axis, *args, **kwargs)\n\n if not keepdims:\n return r\n\n axes = axis\n\n if axes is None:\n axes = range(x.ndim)\n\n if not isinstance(axes, (Container, Iterable, Sequence)):\n axes = [axes]\n\n r_slice = tuple()\n for each_axis in range(x.ndim):\n if each_axis in axes:\n r_slice += (None,)\n else:\n r_slice += (slice(None),)\n\n r = r[r_slice]\n\n return r\n\n return keepdims_wrapped_callable", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__Wrap_NumPy_functions_to_None_2.nanstd.np_nanstd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py__Wrap_NumPy_functions_to_None_2.nanstd.np_nanstd", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 47, "end_line": 79, "span_ids": ["impl", "keepdims_wrapper"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Wrap NumPy functions to ensure they provide keepdims.\nsum = np.sum\nprod = np.prod\nmin = np.min\nmax = np.max\nargmin = keepdims_wrapper(np.argmin)\nnanargmin = keepdims_wrapper(np.nanargmin)\nargmax = keepdims_wrapper(np.argmax)\nnanargmax = keepdims_wrapper(np.nanargmax)\nany = np.any\nall = np.all\nnansum = np.nansum\nnanprod = np.nanprod\n\nnancumprod = np.nancumprod\nnancumsum = np.nancumsum\n\nnanmin = np.nanmin\nnanmax = np.nanmax\nmean = np.mean\n\nwith contextlib.suppress(AttributeError):\n nanmean = np.nanmean\n\nvar = np.var\n\nwith contextlib.suppress(AttributeError):\n nanvar = np.nanvar\n\nstd = np.std\n\nwith contextlib.suppress(AttributeError):\n nanstd = np.nanstd", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_coarsen_coarsen.return.reduction_x_reshape_newsh": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_coarsen_coarsen.return.reduction_x_reshape_newsh", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 88, "end_line": 142, "span_ids": ["coarsen"], "tokens": 549}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def coarsen(reduction, x, axes, trim_excess=False, **kwargs):\n \"\"\"Coarsen array by applying reduction to fixed size neighborhoods\n\n Parameters\n ----------\n reduction: function\n Function like np.sum, np.mean, etc...\n x: np.ndarray\n Array to be coarsened\n axes: dict\n Mapping of axis to coarsening factor\n\n Examples\n --------\n >>> x = np.array([1, 2, 3, 4, 5, 6])\n >>> coarsen(np.sum, x, {0: 2})\n array([ 3, 7, 11])\n >>> coarsen(np.max, x, {0: 3})\n array([3, 6])\n\n Provide dictionary of scale per dimension\n\n >>> x = np.arange(24).reshape((4, 6))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5],\n [ 6, 7, 8, 9, 10, 11],\n [12, 13, 14, 15, 16, 17],\n [18, 19, 20, 21, 22, 23]])\n\n >>> coarsen(np.min, x, {0: 2, 1: 3})\n array([[ 0, 3],\n [12, 15]])\n\n You must avoid excess elements explicitly\n\n >>> x = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n >>> coarsen(np.min, x, {0: 3}, trim_excess=True)\n array([1, 4])\n \"\"\"\n # Insert singleton dimensions if they don't exist already\n for i in range(x.ndim):\n if i not in axes:\n axes[i] = 1\n\n if trim_excess:\n ind = tuple(\n slice(0, -(d % axes[i])) if d % axes[i] else slice(None, None)\n for i, d in enumerate(x.shape)\n )\n x = x[ind]\n\n # (10, 10) -> (5, 2, 5, 2)\n newshape = tuple(concat([(x.shape[i] // axes[i], axes[i]) for i in range(x.ndim)]))\n\n return reduction(x.reshape(newshape), axis=tuple(range(1, x.ndim * 2, 2)), **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_trim_trim.return.x_tuple_slice_ax_ax_if_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_trim_trim.return.x_tuple_slice_ax_ax_if_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 146, "end_line": 165, "span_ids": ["trim"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def trim(x, axes=None):\n \"\"\"Trim boundaries off of array\n\n >>> x = np.arange(24).reshape((4, 6))\n >>> trim(x, axes={0: 0, 1: 1})\n array([[ 1, 2, 3, 4],\n [ 7, 8, 9, 10],\n [13, 14, 15, 16],\n [19, 20, 21, 22]])\n\n >>> trim(x, axes={0: 1, 1: 1})\n array([[ 7, 8, 9, 10],\n [13, 14, 15, 16]])\n \"\"\"\n if isinstance(axes, Integral):\n axes = [axes] * x.ndim\n if isinstance(axes, dict):\n axes = [axes.get(i, 0) for i in range(x.ndim)]\n\n return x[tuple(slice(ax, -ax if ax else None) for ax in axes)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_topk.return.a_tuple_k_slice_if_i_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_topk.return.a_tuple_k_slice_if_i_a", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 168, "end_line": 183, "span_ids": ["topk"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def topk(a, k, axis, keepdims):\n \"\"\"Chunk and combine function of topk\n\n Extract the k largest elements from a on the given axis.\n If k is negative, extract the -k smallest elements instead.\n Note that, unlike in the parent function, the returned elements\n are not sorted internally.\n \"\"\"\n assert keepdims is True\n axis = axis[0]\n if abs(k) >= a.shape[axis]:\n return a\n\n a = np.partition(a, -k, axis=axis)\n k_slice = slice(-k, None) if k > 0 else slice(-k)\n return a[tuple(k_slice if i == axis else slice(None) for i in range(a.ndim))]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_aggregate_argtopk_preprocess.return.a_idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_topk_aggregate_argtopk_preprocess.return.a_idx", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 186, "end_line": 209, "span_ids": ["argtopk_preprocess", "topk_aggregate"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def topk_aggregate(a, k, axis, keepdims):\n \"\"\"Final aggregation function of topk\n\n Invoke topk one final time and then sort the results internally.\n \"\"\"\n assert keepdims is True\n a = topk(a, k, axis, keepdims)\n axis = axis[0]\n a = np.sort(a, axis=axis)\n if k < 0:\n return a\n return a[\n tuple(\n slice(None, None, -1) if i == axis else slice(None) for i in range(a.ndim)\n )\n ]\n\n\ndef argtopk_preprocess(a, idx):\n \"\"\"Preparatory step for argtopk\n\n Put data together with its original indices in a tuple.\n \"\"\"\n return a, idx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_aggregate_argtopk_aggregate.return.idx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_aggregate_argtopk_aggregate.return.idx_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 234, "end_line": 253, "span_ids": ["argtopk_aggregate"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def argtopk_aggregate(a_plus_idx, k, axis, keepdims):\n \"\"\"Final aggregation function of argtopk\n\n Invoke argtopk one final time, sort the results internally, drop the data\n and return the index only.\n \"\"\"\n assert keepdims is True\n a_plus_idx = a_plus_idx if len(a_plus_idx) > 1 else a_plus_idx[0]\n a, idx = argtopk(a_plus_idx, k, axis, keepdims)\n axis = axis[0]\n\n idx2 = np.argsort(a, axis=axis)\n idx = np.take_along_axis(idx, idx2, axis)\n if k < 0:\n return idx\n return idx[\n tuple(\n slice(None, None, -1) if i == axis else slice(None) for i in range(idx.ndim)\n )\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_arange_view.if_order_C_.else_.return.x_T_view_dtype_T": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_arange_view.if_order_C_.else_.return.x_T_view_dtype_T", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 261, "end_line": 296, "span_ids": ["view", "linspace", "astype", "arange"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arange(start, stop, step, length, dtype, like=None):\n from .utils import arange_safe\n\n res = arange_safe(start, stop, step, dtype, like=like)\n return res[:-1] if len(res) > length else res\n\n\ndef linspace(start, stop, num, endpoint=True, dtype=None):\n from .core import Array\n\n if isinstance(start, Array):\n start = start.compute()\n\n if isinstance(stop, Array):\n stop = stop.compute()\n\n return np.linspace(start, stop, num, endpoint=endpoint, dtype=dtype)\n\n\ndef astype(x, astype_dtype=None, **kwargs):\n return x.astype(astype_dtype, **kwargs)\n\n\ndef view(x, dtype, order=\"C\"):\n if order == \"C\":\n try:\n x = np.ascontiguousarray(x, like=x)\n except TypeError:\n x = np.ascontiguousarray(x)\n return x.view(dtype)\n else:\n try:\n x = np.asfortranarray(x, like=x)\n except TypeError:\n x = np.asfortranarray(x)\n return x.T.view(dtype).T", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_idx_if_i_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_idx_if_i_axis_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 299, "end_line": 342, "span_ids": ["slice_with_int_dask_array"], "tokens": 394}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_int_dask_array(x, idx, offset, x_size, axis):\n \"\"\"Chunk function of `slice_with_int_dask_array_on_axis`.\n Slice one chunk of x by one chunk of idx.\n\n Parameters\n ----------\n x: ndarray, any dtype, any shape\n i-th chunk of x\n idx: ndarray, ndim=1, dtype=any integer\n j-th chunk of idx (cartesian product with the chunks of x)\n offset: ndarray, shape=(1, ), dtype=int64\n Index of the first element along axis of the current chunk of x\n x_size: int\n Total size of the x da.Array along axis\n axis: int\n normalized axis to take elements from (0 <= axis < x.ndim)\n\n Returns\n -------\n x sliced along axis, using only the elements of idx that fall inside the\n current chunk.\n \"\"\"\n from .utils import asarray_safe, meta_from_array\n\n idx = asarray_safe(idx, like=meta_from_array(x))\n\n # Needed when idx is unsigned\n idx = idx.astype(np.int64)\n\n # Normalize negative indices\n idx = np.where(idx < 0, idx + x_size, idx)\n\n # A chunk of the offset dask Array is a numpy array with shape (1, ).\n # It indicates the index of the first element along axis of the current\n # chunk of x.\n idx = idx - offset\n\n # Drop elements of idx that do not fall inside the current chunk of x\n idx_filter = (idx >= 0) & (idx < x.shape[axis])\n idx = idx[idx_filter]\n\n # np.take does not support slice indices\n # return np.take(x, idx, axis)\n return x[tuple(idx if i == axis else slice(None) for i in range(x.ndim))]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_try__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_try__", "embedding": null, "metadata": {"file_path": "dask/array/chunk_types.py", "file_name": "chunk_types.py", "file_type": "text/x-python", "category": "implementation", "start_line": 107, "end_line": 149, "span_ids": ["is_valid_array_chunk", "impl:3", "is_valid_chunk_type"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "try:\n import cupy\n\n register_chunk_type(cupy.ndarray)\nexcept ImportError:\n pass\n\ntry:\n from cupyx.scipy.sparse import spmatrix\n\n register_chunk_type(spmatrix)\nexcept ImportError:\n pass\n\ntry:\n import sparse\n\n register_chunk_type(sparse.SparseArray)\nexcept ImportError:\n pass\n\ntry:\n import scipy.sparse\n\n register_chunk_type(scipy.sparse.spmatrix)\nexcept ImportError:\n pass\n\n\ndef is_valid_chunk_type(type):\n \"\"\"Check if given type is a valid chunk and downcast array type\"\"\"\n try:\n return type in _HANDLED_CHUNK_TYPES or issubclass(\n type, tuple(_HANDLED_CHUNK_TYPES)\n )\n except TypeError:\n return False\n\n\ndef is_valid_array_chunk(array):\n \"\"\"Check if given array is of a valid type to operate with\"\"\"\n return array is None or isinstance(array, tuple(_HANDLED_CHUNK_TYPES))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_slices_from_chunks_slices_from_chunks.return.list_product_slices_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_slices_from_chunks_slices_from_chunks.return.list_product_slices_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 191, "end_line": 207, "span_ids": ["slices_from_chunks"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slices_from_chunks(chunks):\n \"\"\"Translate chunks tuple to a set of slices in product order\n\n >>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE\n [(slice(0, 2, None), slice(0, 3, None)),\n (slice(0, 2, None), slice(3, 6, None)),\n (slice(0, 2, None), slice(6, 9, None)),\n (slice(2, 4, None), slice(0, 3, None)),\n (slice(2, 4, None), slice(3, 6, None)),\n (slice(2, 4, None), slice(6, 9, None))]\n \"\"\"\n cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]\n slices = [\n [slice(s, s + dim) for s, dim in zip(starts, shapes)]\n for starts, shapes in zip(cumdims, chunks)\n ]\n return list(product(*slices))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_dotmany_dotmany.return.sum_map_partial_np_dot_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_dotmany_dotmany.return.sum_map_partial_np_dot_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 252, "end_line": 271, "span_ids": ["dotmany"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):\n \"\"\"Dot product of many aligned chunks\n\n >>> x = np.array([[1, 2], [1, 2]])\n >>> y = np.array([[10, 20], [10, 20]])\n >>> dotmany([x, x, x], [y, y, y])\n array([[ 90, 180],\n [ 90, 180]])\n\n Optionally pass in functions to apply to the left and right chunks\n\n >>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)\n array([[150, 150],\n [150, 150]])\n \"\"\"\n if leftfunc:\n A = map(leftfunc, A)\n if rightfunc:\n B = map(rightfunc, B)\n return sum(map(partial(np.dot, **kwargs), A, B))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_apply_infer_dtype_apply_infer_dtype.return.o_dtype_if_nout_is_None_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_apply_infer_dtype_apply_infer_dtype.return.o_dtype_if_nout_is_None_e", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 362, "end_line": 431, "span_ids": ["apply_infer_dtype"], "tokens": 511}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=\"dtype\", nout=None):\n \"\"\"\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only attributes\n ``ndim`` and ``dtype`` are used.\n\n kwargs: dict\n Additional ``kwargs`` to the ``func``\n\n funcname: String\n Name of calling function to improve potential error messages\n\n suggest_dtype: None/False or String\n If not ``None`` adds suggestion to potential error message to specify a dtype\n via the specified kwarg. Defaults to ``'dtype'``.\n\n nout: None or Int\n ``None`` if function returns single output, integer if many.\n Deafults to ``None``.\n\n Returns\n -------\n : dtype or List of dtype\n One or many dtypes (depending on ``nout``)\n \"\"\"\n from .utils import meta_from_array\n\n # make sure that every arg is an evaluated array\n args = [\n np.ones_like(meta_from_array(x), shape=((1,) * x.ndim), dtype=x.dtype)\n if is_arraylike(x)\n else x\n for x in args\n ]\n try:\n with np.errstate(all=\"ignore\"):\n o = func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n suggest = (\n (\n \"Please specify the dtype explicitly using the \"\n \"`{dtype}` kwarg.\\n\\n\".format(dtype=suggest_dtype)\n )\n if suggest_dtype\n else \"\"\n )\n msg = (\n f\"`dtype` inference failed in `{funcname}`.\\n\\n\"\n f\"{suggest}\"\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n f\"{e!r}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n f\"{tb}\"\n )\n else:\n msg = None\n if msg is not None:\n raise ValueError(msg)\n return o.dtype if nout is None else tuple(e.dtype for e in o)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_extra_argpairs__map_blocks.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_extra_argpairs__map_blocks.return.out", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 759, "end_line": 782, "span_ids": ["map_blocks"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n # ... other code\n\n if extra_argpairs:\n # Rewrite the Blockwise layer. It would be nice to find a way to\n # avoid doing it twice, but it's currently needed to determine\n # out.chunks from the first pass. Since it constructs a Blockwise\n # rather than an expanded graph, it shouldn't be too expensive.\n out = blockwise(\n _pass_extra_kwargs,\n out_ind,\n func,\n None,\n tuple(extra_names),\n None,\n *concat(extra_argpairs),\n *concat(argpairs),\n name=out.name,\n dtype=out.dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=dict(zip(out_ind, out.chunks)),\n meta=meta,\n **kwargs,\n )\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_chunks_broadcast_chunks.return.tuple_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_chunks_broadcast_chunks.return.tuple_result_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 785, "end_line": 826, "span_ids": ["broadcast_chunks"], "tokens": 422}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_chunks(*chunkss):\n \"\"\"Construct a chunks tuple that broadcasts many chunks tuples\n\n >>> a = ((5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((5, 5),)\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((1,), (5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((3, 3,), (5, 5),)\n >>> broadcast_chunks(a, b)\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]\n \"\"\"\n if not chunkss:\n return ()\n elif len(chunkss) == 1:\n return chunkss[0]\n n = max(map(len, chunkss))\n chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]\n result = []\n for i in range(n):\n step1 = [c[i] for c in chunkss2]\n if all(c == (1,) for c in step1):\n step2 = step1\n else:\n step2 = [c for c in step1 if c != (1,)]\n if len(set(step2)) != 1:\n raise ValueError(\"Chunks do not align: %s\" % str(step2))\n result.append(step2[0])\n return tuple(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_blockdims_from_blockshape_blockdims_from_blockshape.return.tuple_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_blockdims_from_blockshape_blockdims_from_blockshape.return.tuple_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 974, "end_line": 1000, "span_ids": ["blockdims_from_blockshape"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def blockdims_from_blockshape(shape, chunks):\n \"\"\"\n\n >>> blockdims_from_blockshape((10, 10), (4, 3))\n ((4, 4, 2), (3, 3, 3, 1))\n >>> blockdims_from_blockshape((10, 0), (4, 0))\n ((4, 4, 2), (0,))\n \"\"\"\n if chunks is None:\n raise TypeError(\"Must supply chunks= keyword argument\")\n if shape is None:\n raise TypeError(\"Must supply shape= keyword argument\")\n if np.isnan(sum(shape)) or np.isnan(sum(chunks)):\n raise ValueError(\n \"Array chunk sizes are unknown. shape: %s, chunks: %s%s\"\n % (shape, chunks, unknown_chunk_message)\n )\n if not all(map(is_integer, chunks)):\n raise ValueError(\"chunks can only contain integers.\")\n if not all(map(is_integer, shape)):\n raise ValueError(\"shape can only contain integers.\")\n shape = tuple(map(int, shape))\n chunks = tuple(map(int, chunks))\n return tuple(\n ((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))\n for d, bd in zip(shape, chunks)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_finalize_CHUNKS_NONE_ERROR_MESSAGE._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_finalize_CHUNKS_NONE_ERROR_MESSAGE._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1203, "end_line": 1221, "span_ids": ["finalize", "impl:9"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def finalize(results):\n if not results:\n return concatenate3(results)\n results2 = results\n while isinstance(results2, (tuple, list)):\n if len(results2) > 1:\n return concatenate3(results)\n else:\n results2 = results2[0]\n return unpack_singleton(results)\n\n\nCHUNKS_NONE_ERROR_MESSAGE = \"\"\"\nYou must specify a chunks= keyword argument.\nThis specifies the chunksize of your array blocks.\n\nSee the following documentation page for details:\n https://docs.dask.org/en/latest/array-creation.html#chunks\n\"\"\".strip()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__new___Array.__new__.return.self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__new___Array.__new__.return.self", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1210, "end_line": 1268, "span_ids": ["Array.__new__"], "tokens": 456}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):\n self = super().__new__(cls)\n assert isinstance(dask, Mapping)\n if not isinstance(dask, HighLevelGraph):\n dask = HighLevelGraph.from_collections(name, dask, dependencies=())\n self.dask = dask\n self._name = str(name)\n meta = meta_from_array(meta, dtype=dtype)\n\n if (\n isinstance(chunks, str)\n or isinstance(chunks, tuple)\n and chunks\n and any(isinstance(c, str) for c in chunks)\n ):\n dt = meta.dtype\n else:\n dt = None\n self._chunks = normalize_chunks(chunks, shape, dtype=dt)\n if self.chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)\n\n for plugin in config.get(\"array_plugins\", ()):\n result = plugin(self)\n if result is not None:\n self = result\n\n try:\n layer = self.dask.layers[name]\n except (AttributeError, KeyError):\n # self is no longer an Array after applying the plugins, OR\n # a plugin replaced the HighLevelGraph with a plain dict, OR\n # name is not the top layer's name (this can happen after the layer is\n # manipulated, to avoid a collision)\n pass\n else:\n if layer.collection_annotations is None:\n layer.collection_annotations = {\n \"shape\": self.shape,\n \"dtype\": self.dtype,\n \"chunksize\": self.chunksize,\n \"chunks\": self.chunks,\n \"type\": typename(type(self)),\n \"chunk_type\": typename(type(self._meta)),\n }\n else:\n layer.collection_annotations.update(\n {\n \"shape\": self.shape,\n \"dtype\": self.dtype,\n \"chunksize\": self.chunksize,\n \"chunks\": self.chunks,\n \"type\": typename(type(self)),\n \"chunk_type\": typename(type(self._meta)),\n }\n )\n\n return self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__reduce___Array.__dask_keys__.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__reduce___Array.__dask_keys__.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1086, "end_line": 1112, "span_ids": ["Array.__dask_layers__", "Array.__dask_keys__", "Array.__reduce__", "Array.__dask_graph__"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __reduce__(self):\n return (Array, (self.dask, self.name, self.chunks, self.dtype))\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_layers__(self):\n return (self.name,)\n\n def __dask_keys__(self):\n if self._cached_keys is not None:\n return self._cached_keys\n\n name, chunks, numblocks = self.name, self.chunks, self.numblocks\n\n def keys(*args):\n if not chunks:\n return [(name,)]\n ind = len(args)\n if ind + 1 == len(numblocks):\n result = [(name,) + args + (i,) for i in range(numblocks[ind])]\n else:\n result = [keys(*(args + (i,))) for i in range(numblocks[ind])]\n return result\n\n self._cached_keys = result = keys()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__dask_tokenize___Array.npartitions.return.reduce_mul_self_numblock": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__dask_tokenize___Array.npartitions.return.reduce_mul_self_numblock", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1298, "end_line": 1342, "span_ids": ["Array.numblocks", "Array.__dask_tokenize__", "Array:5", "Array._reset_cache", "Array.__dask_postpersist__", "Array.__dask_postcompute__", "Array._rebuild", "Array.npartitions", "Array._key_array"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __dask_tokenize__(self):\n return self.name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"array_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return self._rebuild, ()\n\n def _rebuild(self, dsk, *, rename=None):\n name = self._name\n if rename:\n name = rename.get(name, name)\n return Array(dsk, name, self.chunks, self.dtype, self._meta)\n\n def _reset_cache(self, key=None):\n \"\"\"\n Reset cached properties.\n\n Parameters\n ----------\n key : str, optional\n Remove specified key. The default removes all items.\n \"\"\"\n if key is None:\n self.__dict__.clear()\n else:\n self.__dict__.pop(key, None)\n\n @cached_property\n def _key_array(self):\n return np.array(self.__dask_keys__(), dtype=object)\n\n @cached_property\n def numblocks(self):\n return tuple(map(len, self.chunks))\n\n @cached_property\n def npartitions(self):\n return reduce(mul, self.numblocks, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.compute_chunk_sizes_Array.compute_chunk_sizes.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.compute_chunk_sizes_Array.compute_chunk_sizes.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1344, "end_line": 1391, "span_ids": ["Array.compute_chunk_sizes"], "tokens": 373}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def compute_chunk_sizes(self):\n \"\"\"\n Compute the chunk sizes for a Dask array. This is especially useful\n when the chunk sizes are unknown (e.g., when indexing one Dask array\n with another).\n\n Notes\n -----\n This function modifies the Dask array in-place.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)\n >>> x.chunks\n ((2, 2, 1),)\n >>> y = x[x <= 0]\n >>> y.chunks\n ((nan, nan, nan),)\n >>> y.compute_chunk_sizes() # in-place computation\n dask.array\n >>> y.chunks\n ((2, 1, 0),)\n\n \"\"\"\n x = self\n chunk_shapes = x.map_blocks(\n _get_chunk_shape,\n dtype=int,\n chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),\n new_axis=x.ndim,\n )\n\n c = []\n for i in range(x.ndim):\n s = x.ndim * [0] + [i]\n s[i] = slice(None)\n s = tuple(s)\n\n c.append(tuple(chunk_shapes[s]))\n\n # `map_blocks` assigns numpy dtypes\n # cast chunk dimensions back to python int before returning\n x._chunks = tuple(\n tuple(int(chunk) for chunk in chunks) for chunks in compute(tuple(c))[0]\n )\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_ufunc___Array.__array_ufunc__.if_method___call___.else_.return.NotImplemented": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_ufunc___Array.__array_ufunc__.if_method___call___.else_.return.NotImplemented", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1270, "end_line": 1307, "span_ids": ["Array.__array_ufunc__"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n if _should_delegate(x):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc is np.matmul:\n from .routines import matmul\n\n # special case until apply_gufunc handles optional dimensions\n return matmul(*inputs, **kwargs)\n if numpy_ufunc.signature is not None:\n from .gufunc import apply_gufunc\n\n return apply_gufunc(\n numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs\n )\n if numpy_ufunc.nout > 1:\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc(*inputs, **kwargs)\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n elif method == \"outer\":\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc.outer(*inputs, **kwargs)\n else:\n return NotImplemented", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.ndim_Array.__array__.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.ndim_Array.__array__.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1531, "end_line": 1586, "span_ids": ["Array._name", "Array.ndim", "Array.name_32", "Array.__array__", "Array._name_30", "Array.size", "Array:9", "Array.name", "Array.itemsize", "Array.__iter__", "Array.nbytes"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @cached_property\n def ndim(self):\n return len(self.shape)\n\n @cached_property\n def size(self):\n \"\"\"Number of elements in array\"\"\"\n return reduce(mul, self.shape, 1)\n\n @property\n def nbytes(self):\n \"\"\"Number of bytes in array\"\"\"\n return self.size * self.dtype.itemsize\n\n @property\n def itemsize(self):\n \"\"\"Length of one array element in bytes\"\"\"\n return self.dtype.itemsize\n\n @property\n def _name(self):\n return self.__name\n\n @_name.setter\n def _name(self, val):\n self.__name = val\n # Clear the key cache when the name is reset\n self._cached_keys = None\n self._reset_cache(\"_key_array\")\n\n @property\n def name(self):\n return self.__name\n\n @name.setter\n def name(self, val):\n raise TypeError(\n \"Cannot set name directly\\n\\n\"\n \"Name is used to relate the array to the task graph.\\n\"\n \"It is uncommon to need to change it, but if you do\\n\"\n \"please set ``._name``\"\n )\n\n def __iter__(self):\n for i in range(len(self)):\n yield self[i]\n\n __array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix\n\n def __array__(self, dtype=None, **kwargs):\n x = self.compute()\n if dtype and x.dtype != dtype:\n x = x.astype(dtype)\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function___Array.__array_function__.handle_nonmatching_names.return._HANDLED_FUNCTIONS_func_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function___Array.__array_function__.handle_nonmatching_names.return._HANDLED_FUNCTIONS_func_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1371, "end_line": 1392, "span_ids": ["Array.__array_function__"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __array_function__(self, func, types, args, kwargs):\n import dask.array as module\n\n def handle_nonmatching_names(func, args, kwargs):\n if func not in _HANDLED_FUNCTIONS:\n warnings.warn(\n \"The `{}` function is not implemented by Dask array. \"\n \"You may want to use the da.map_blocks function \"\n \"or something similar to silence this warning. \"\n \"Your code may stop working in a future release.\".format(\n func.__module__ + \".\" + func.__name__\n ),\n FutureWarning,\n )\n # Need to convert to array object (e.g. numpy.ndarray or\n # cupy.ndarray) as needed, so we can call the NumPy function\n # again and it gets the chance to dispatch to the right\n # implementation.\n args, kwargs = compute(args, kwargs)\n return func(*args, **kwargs)\n\n return _HANDLED_FUNCTIONS[func](*args, **kwargs)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function__._First_verify_that_all__Array.__array_function__.return.da_func_args_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__array_function__._First_verify_that_all__Array.__array_function__.return.da_func_args_kwargs_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1611, "end_line": 1636, "span_ids": ["Array.__array_function__"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __array_function__(self, func, types, args, kwargs):\n\n # First, verify that all types are handled by Dask. Otherwise, return NotImplemented.\n if not all(type is Array or is_valid_chunk_type(type) for type in types):\n return NotImplemented\n\n # Now try to find a matching function name. If that doesn't work, we may\n # be dealing with an alias or a function that's simply not in the Dask API.\n # Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.\n for submodule in func.__module__.split(\".\")[1:]:\n try:\n module = getattr(module, submodule)\n except AttributeError:\n return handle_nonmatching_names(func, args, kwargs)\n\n if not hasattr(module, func.__name__):\n return handle_nonmatching_names(func, args, kwargs)\n\n da_func = getattr(module, func.__name__)\n if da_func is func:\n return handle_nonmatching_names(func, args, kwargs)\n\n # If ``like`` is contained in ``da_func``'s signature, add ``like=self``\n # to the kwargs dictionary.\n if has_keyword(da_func, \"like\"):\n kwargs[\"like\"] = self\n\n return da_func(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._elemwise_Array.to_svg.return.svg_self_chunks_size_siz": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._elemwise_Array.to_svg.return.svg_self_chunks_size_siz", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1415, "end_line": 1447, "span_ids": ["Array.to_svg", "Array.store", "Array._elemwise"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def _elemwise(self):\n return elemwise\n\n @wraps(store)\n def store(self, target, **kwargs):\n r = store([self], [target], **kwargs)\n\n if kwargs.get(\"return_stored\", False):\n r = r[0]\n\n return r\n\n def to_svg(self, size=500):\n \"\"\"Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Examples\n --------\n >>> x.to_svg(size=500) # doctest: +SKIP\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n \"\"\"\n from .svg import svg\n\n return svg(self.chunks, size=size)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_hdf5_Array.to_hdf5.return.to_hdf5_filename_datapat": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_hdf5_Array.to_hdf5.return.to_hdf5_filename_datapat", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1449, "end_line": 1463, "span_ids": ["Array.to_hdf5"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def to_hdf5(self, filename, datapath, **kwargs):\n \"\"\"Store array in HDF5 file\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n return to_hdf5(filename, datapath, self, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_dask_dataframe_Array.to_dask_dataframe.return.from_dask_array_self_col": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_dask_dataframe_Array.to_dask_dataframe.return.from_dask_array_self_col", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1465, "end_line": 1494, "span_ids": ["Array.to_dask_dataframe"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def to_dask_dataframe(self, columns=None, index=None, meta=None):\n \"\"\"Convert dask Array to dask Dataframe\n\n Parameters\n ----------\n columns: list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether the array has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divsions is created.\n\n Specifying ``index`` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to use for partitions of\n the Dask dataframe. By default, pandas DataFrame is used.\n\n See Also\n --------\n dask.dataframe.from_dask_array\n \"\"\"\n from ..dataframe import from_dask_array\n\n return from_dask_array(self, columns=columns, index=index, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__getitem___Array.__getitem__.return.Array_graph_out_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__getitem___Array.__getitem__.return.Array_graph_out_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1705, "end_line": 1763, "span_ids": ["Array.__getitem__"], "tokens": 509}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __getitem__(self, index):\n # Field access, e.g. x['a'] or x[['a', 'b']]\n if isinstance(index, str) or (\n isinstance(index, list) and index and all(isinstance(i, str) for i in index)\n ):\n if isinstance(index, str):\n dt = self.dtype[index]\n else:\n dt = np.dtype(\n {\n \"names\": index,\n \"formats\": [self.dtype.fields[name][0] for name in index],\n \"offsets\": [self.dtype.fields[name][1] for name in index],\n \"itemsize\": self.dtype.itemsize,\n }\n )\n\n if dt.shape:\n new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))\n chunks = self.chunks + tuple((i,) for i in dt.shape)\n return self.map_blocks(\n getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis\n )\n else:\n return self.map_blocks(getitem, index, dtype=dt)\n\n if not isinstance(index, tuple):\n index = (index,)\n\n from .slicing import (\n normalize_index,\n slice_with_bool_dask_array,\n slice_with_int_dask_array,\n )\n\n index2 = normalize_index(index, self.shape)\n dependencies = {self.name}\n for i in index2:\n if isinstance(i, Array):\n dependencies.add(i.name)\n\n if any(isinstance(i, Array) and i.dtype.kind in \"iu\" for i in index2):\n self, index2 = slice_with_int_dask_array(self, index2)\n if any(isinstance(i, Array) and i.dtype == bool for i in index2):\n self, index2 = slice_with_bool_dask_array(self, index2)\n\n if all(isinstance(i, slice) and i == slice(None) for i in index2):\n return self\n\n out = \"getitem-\" + tokenize(self, index2)\n dsk, chunks = slice_array(out, self.name, self.chunks, index2, self.itemsize)\n\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])\n\n meta = meta_from_array(self._meta, ndim=len(chunks))\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return Array(graph, out, chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._vindex_Array._vindex.return._vindex_self_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._vindex_Array._vindex.return._vindex_self_key_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1594, "end_line": 1612, "span_ids": ["Array._vindex"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def _vindex(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n if any(k is None for k in key):\n raise IndexError(\n \"vindex does not support indexing with None (np.newaxis), \"\n \"got {}\".format(key)\n )\n if all(isinstance(k, slice) for k in key):\n if all(\n k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)\n ):\n return self\n raise IndexError(\n \"vindex requires at least one non-slice to vectorize over \"\n \"when the slices are not over the entire array (i.e, x[:]). \"\n \"Use normal slicing instead when only using slices. Got: {}\".format(key)\n )\n return _vindex(self, *key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.vindex_Array.vindex.return.IndexCallable_self__vinde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.vindex_Array.vindex.return.IndexCallable_self__vinde", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1674, "end_line": 1696, "span_ids": ["Array.vindex"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def vindex(self):\n \"\"\"Vectorized indexing with broadcasting.\n\n This is equivalent to numpy's advanced indexing, using arrays that are\n broadcast against each other. This allows for pointwise indexing:\n\n >>> import dask.array as da\n >>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> x = da.from_array(x, chunks=2)\n >>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()\n array([1, 5, 9])\n\n Mixed basic/advanced indexing with slices/arrays is also supported. The\n order of dimensions in the result follows those proposed for\n `ndarray.vindex `_:\n the subspace spanned by arrays is followed by all slices.\n\n Note: ``vindex`` provides more general functionality than standard\n indexing, but it also has fewer optimizations and can be significantly\n slower.\n \"\"\"\n return IndexCallable(self._vindex)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.partitions_Array.partitions.return.self_blocks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.partitions_Array.partitions.return.self_blocks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1941, "end_line": 1982, "span_ids": ["Array.partitions"], "tokens": 474}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def partitions(self):\n \"\"\"Slice an array by partitions. Alias of dask array .blocks attribute.\n\n This alias allows you to write agnostic code that works with both\n dask arrays and dask dataframes.\n\n This returns a ``Blockview`` object that provides an array-like interface\n to the blocks of a dask array. Numpy-style indexing of a ``Blockview`` object\n returns a selection of blocks as a new dask array.\n\n You can index ``array.blocks`` like a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.blocks.size). The dimensionality of the output array matches\n the dimension of this array, even if integer indices are passed.\n Slicing with ``np.newaxis`` or multiple lists is not supported.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(8, chunks=2)\n >>> x.partitions.shape # aliases x.numblocks\n (4,)\n >>> x.partitions[0].compute()\n array([0, 1])\n >>> x.partitions[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.partitions[::2].compute()\n array([0, 1, 4, 5])\n >>> x.partitions[[-1, 0]].compute()\n array([6, 7, 0, 1])\n >>> x.partitions.ravel() # doctest: +NORMALIZE_WHITESPACE\n [dask.array,\n dask.array,\n dask.array,\n dask.array]\n\n Returns\n -------\n An instance of ``da.array.Blockview``\n \"\"\"\n return self.blocks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.dot_Array.argtopk.return.argtopk_self_k_axis_axi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.dot_Array.argtopk.return.argtopk_self_k_axis_axi", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1984, "end_line": 2058, "span_ids": ["Array.T", "Array.argtopk", "Array.dot", "Array.choose", "Array.reshape", "Array.A", "Array.topk", "Array.transpose", "Array.ravel", "Array:15"], "tokens": 503}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def dot(self, other):\n from .routines import tensordot\n\n return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))\n\n @property\n def A(self):\n return self\n\n @property\n def T(self):\n return self.transpose()\n\n @derived_from(np.ndarray)\n def transpose(self, *axes):\n from .routines import transpose\n\n if not axes:\n axes = None\n elif len(axes) == 1 and isinstance(axes[0], Iterable):\n axes = axes[0]\n if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):\n # no transpose necessary\n return self\n else:\n return transpose(self, axes=axes)\n\n @derived_from(np.ndarray)\n def ravel(self):\n from .routines import ravel\n\n return ravel(self)\n\n flatten = ravel\n\n @derived_from(np.ndarray)\n def choose(self, choices):\n from .routines import choose\n\n return choose(self, choices)\n\n @derived_from(np.ndarray)\n def reshape(self, *shape, merge_chunks=True, limit=None):\n \"\"\"\n .. note::\n\n See :meth:`dask.array.reshape` for an explanation of\n the ``merge_chunks`` and `limit` keywords.\n \"\"\"\n from .reshape import reshape\n\n if len(shape) == 1 and not isinstance(shape[0], Number):\n shape = shape[0]\n return reshape(self, shape, merge_chunks=merge_chunks, limit=limit)\n\n def topk(self, k, axis=-1, split_every=None):\n \"\"\"The top k elements of an array.\n\n See :func:`dask.array.topk` for docstring.\n\n \"\"\"\n from .reductions import topk\n\n return topk(self, k, axis=axis, split_every=split_every)\n\n def argtopk(self, k, axis=-1, split_every=None):\n \"\"\"The indices of the top k elements of an array.\n\n See :func:`dask.array.argtopk` for docstring.\n\n \"\"\"\n from .reductions import argtopk\n\n return argtopk(self, k, axis=axis, split_every=split_every)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.astype_Array.astype.return.self_map_blocks_chunk_ast": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.astype_Array.astype.return.self_map_blocks_chunk_ast", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2060, "end_line": 2098, "span_ids": ["Array.astype"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def astype(self, dtype, **kwargs):\n \"\"\"Copy of the array, cast to a specified type.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'unsafe'\n for backwards compatibility.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n copy : bool, optional\n By default, astype always returns a newly allocated array. If this\n is set to False and the `dtype` requirement is satisfied, the input\n array is returned instead of a copy.\n \"\"\"\n # Scalars don't take `casting` or `copy` kwargs - as such we only pass\n # them to `map_blocks` if specified by user (different than defaults).\n extra = set(kwargs) - {\"casting\", \"copy\"}\n if extra:\n raise TypeError(\n f\"astype does not take the following keyword arguments: {list(extra)}\"\n )\n casting = kwargs.get(\"casting\", \"unsafe\")\n dtype = np.dtype(dtype)\n if self.dtype == dtype:\n return self\n elif not np.can_cast(self.dtype, dtype, casting=casting):\n raise TypeError(\n f\"Cannot cast array from {self.dtype!r} to {dtype!r} \"\n f\"according to the rule {casting!r}\"\n )\n return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__abs___Array.__sub__.return.elemwise_operator_sub_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__abs___Array.__sub__.return.elemwise_operator_sub_se", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1845, "end_line": 1955, "span_ids": ["Array.__ne__", "Array.__invert__", "Array.__rand__", "Array.__rmod__", "Array.__radd__", "Array.__lshift__", "Array.__rpow__", "Array.__ror__", "Array.__rdiv__", "Array.__le__", "Array.__mod__", "Array.__eq__", "Array.__div__", "Array.__rlshift__", "Array.__sub__", "Array.__mul__", "Array.__add__", "Array.__and__", "Array.__neg__", "Array.__ge__", "Array.__lt__", "Array.__gt__", "Array.__pos__", "Array.__rshift__", "Array.__rmul__", "Array.__rrshift__", "Array.__pow__", "Array.__abs__", "Array.__or__"], "tokens": 805}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __abs__(self):\n return elemwise(operator.abs, self)\n\n @check_if_handled_given_other\n def __add__(self, other):\n return elemwise(operator.add, self, other)\n\n @check_if_handled_given_other\n def __radd__(self, other):\n return elemwise(operator.add, other, self)\n\n @check_if_handled_given_other\n def __and__(self, other):\n return elemwise(operator.and_, self, other)\n\n @check_if_handled_given_other\n def __rand__(self, other):\n return elemwise(operator.and_, other, self)\n\n @check_if_handled_given_other\n def __div__(self, other):\n return elemwise(operator.div, self, other)\n\n @check_if_handled_given_other\n def __rdiv__(self, other):\n return elemwise(operator.div, other, self)\n\n @check_if_handled_given_other\n def __eq__(self, other):\n return elemwise(operator.eq, self, other)\n\n @check_if_handled_given_other\n def __gt__(self, other):\n return elemwise(operator.gt, self, other)\n\n @check_if_handled_given_other\n def __ge__(self, other):\n return elemwise(operator.ge, self, other)\n\n def __invert__(self):\n return elemwise(operator.invert, self)\n\n @check_if_handled_given_other\n def __lshift__(self, other):\n return elemwise(operator.lshift, self, other)\n\n @check_if_handled_given_other\n def __rlshift__(self, other):\n return elemwise(operator.lshift, other, self)\n\n @check_if_handled_given_other\n def __lt__(self, other):\n return elemwise(operator.lt, self, other)\n\n @check_if_handled_given_other\n def __le__(self, other):\n return elemwise(operator.le, self, other)\n\n @check_if_handled_given_other\n def __mod__(self, other):\n return elemwise(operator.mod, self, other)\n\n @check_if_handled_given_other\n def __rmod__(self, other):\n return elemwise(operator.mod, other, self)\n\n @check_if_handled_given_other\n def __mul__(self, other):\n return elemwise(operator.mul, self, other)\n\n @check_if_handled_given_other\n def __rmul__(self, other):\n return elemwise(operator.mul, other, self)\n\n @check_if_handled_given_other\n def __ne__(self, other):\n return elemwise(operator.ne, self, other)\n\n def __neg__(self):\n return elemwise(operator.neg, self)\n\n @check_if_handled_given_other\n def __or__(self, other):\n return elemwise(operator.or_, self, other)\n\n def __pos__(self):\n return self\n\n @check_if_handled_given_other\n def __ror__(self, other):\n return elemwise(operator.or_, other, self)\n\n @check_if_handled_given_other\n def __pow__(self, other):\n return elemwise(operator.pow, self, other)\n\n @check_if_handled_given_other\n def __rpow__(self, other):\n return elemwise(operator.pow, other, self)\n\n @check_if_handled_given_other\n def __rshift__(self, other):\n return elemwise(operator.rshift, self, other)\n\n @check_if_handled_given_other\n def __rrshift__(self, other):\n return elemwise(operator.rshift, other, self)\n\n @check_if_handled_given_other\n def __sub__(self, other):\n return elemwise(operator.sub, self, other)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__rsub___Array.sum.return.sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__rsub___Array.sum.return.sum_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1957, "end_line": 2056, "span_ids": ["Array.__rxor__", "Array.argmin", "Array.__matmul__", "Array.__rdivmod__", "Array.all", "Array.argmax", "Array.sum", "Array.__rfloordiv__", "Array.__truediv__", "Array.__floordiv__", "Array.__xor__", "Array.any", "Array.__rtruediv__", "Array.min", "Array.__divmod__", "Array.__rsub__", "Array.max", "Array.__rmatmul__"], "tokens": 795}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @check_if_handled_given_other\n def __rsub__(self, other):\n return elemwise(operator.sub, other, self)\n\n @check_if_handled_given_other\n def __truediv__(self, other):\n return elemwise(operator.truediv, self, other)\n\n @check_if_handled_given_other\n def __rtruediv__(self, other):\n return elemwise(operator.truediv, other, self)\n\n @check_if_handled_given_other\n def __floordiv__(self, other):\n return elemwise(operator.floordiv, self, other)\n\n @check_if_handled_given_other\n def __rfloordiv__(self, other):\n return elemwise(operator.floordiv, other, self)\n\n @check_if_handled_given_other\n def __xor__(self, other):\n return elemwise(operator.xor, self, other)\n\n @check_if_handled_given_other\n def __rxor__(self, other):\n return elemwise(operator.xor, other, self)\n\n @check_if_handled_given_other\n def __matmul__(self, other):\n from .routines import matmul\n\n return matmul(self, other)\n\n @check_if_handled_given_other\n def __rmatmul__(self, other):\n from .routines import matmul\n\n return matmul(other, self)\n\n @check_if_handled_given_other\n def __divmod__(self, other):\n from .ufunc import divmod\n\n return divmod(self, other)\n\n @check_if_handled_given_other\n def __rdivmod__(self, other):\n from .ufunc import divmod\n\n return divmod(other, self)\n\n @derived_from(np.ndarray)\n def any(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import any\n\n return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def all(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import all\n\n return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def min(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import min\n\n return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def max(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import max\n\n return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmin(self, axis=None, split_every=None, out=None):\n from .reductions import argmin\n\n return argmin(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmax(self, axis=None, split_every=None, out=None):\n from .reductions import argmax\n\n return argmax(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import sum\n\n return sum(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.trace_Array.var.return.var_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.trace_Array.var.return.var_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2058, "end_line": 2120, "span_ids": ["Array.std", "Array.trace", "Array.var", "Array.mean", "Array.prod"], "tokens": 402}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None):\n from .reductions import trace\n\n return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)\n\n @derived_from(np.ndarray)\n def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import prod\n\n return prod(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import mean\n\n return mean(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def std(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import std\n\n return std(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def var(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import var\n\n return var(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.moment_Array.map_blocks.return.map_blocks_func_self_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.moment_Array.map_blocks.return.map_blocks_func_self_a", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2122, "end_line": 2181, "span_ids": ["Array.moment", "Array.map_blocks"], "tokens": 415}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def moment(\n self,\n order,\n axis=None,\n dtype=None,\n keepdims=False,\n ddof=0,\n split_every=None,\n out=None,\n ):\n \"\"\"Calculate the nth centralized moment.\n\n Parameters\n ----------\n order : int\n Order of the moment that is returned, must be >= 2.\n axis : int, optional\n Axis along which the central moment is computed. The default is to\n compute the moment of the flattened array.\n dtype : data-type, optional\n Type to use in computing the moment. For arrays of integer type the\n default is float64; for arrays of float types it is the same as the\n array type.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the original array.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n N - ddof, where N represents the number of elements. By default\n ddof is zero.\n\n Returns\n -------\n moment : ndarray\n\n References\n ----------\n .. [1] Pebay, Philippe (2008), \"Formulas for Robust, One-Pass Parallel\n Computation of Covariances and Arbitrary-Order Statistical Moments\",\n Technical Report SAND2008-6212, Sandia National Laboratories.\n\n \"\"\"\n\n from .reductions import moment\n\n return moment(\n self,\n order,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @wraps(map_blocks)\n def map_blocks(self, func, *args, **kwargs):\n return map_blocks(func, self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.map_overlap_Array.map_overlap.return.map_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.map_overlap_Array.map_overlap.return.map_overlap_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2438, "end_line": 2525, "span_ids": ["Array.map_overlap"], "tokens": 1100}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):\n \"\"\"Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Note that this function will attempt to automatically determine the output\n array type before computing it, please refer to the ``meta`` keyword argument\n in :func:`map_blocks ` if you expect that the function will not succeed when\n operating on 0-d arrays.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block\n depth: int, tuple, or dict\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis\n boundary: str, tuple, dict\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n **kwargs:\n Other keyword arguments valid in :func:`map_blocks `.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = da.from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> import dask.array as da\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x.size, depth=1, boundary='reflect')\n >>> y.compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=np.array(()))\n >>> y\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>\n >>> y.compute()\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n\n >>> import cupy # doctest: +SKIP\n >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP\n >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=cupy.array(())) # doctest: +SKIP\n >>> y # doctest: +SKIP\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray>\n >>> y.compute() # doctest: +SKIP\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n \"\"\"\n from .overlap import map_overlap\n\n return map_overlap(\n func, self, depth=depth, boundary=boundary, trim=trim, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.view_Array.view.return.self_map_blocks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.view_Array.view.return.self_map_blocks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2289, "end_line": 2330, "span_ids": ["Array.view"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def view(self, dtype=None, order=\"C\"):\n \"\"\"Get a view of the array as a new data type\n\n Parameters\n ----------\n dtype:\n The dtype by which to view the array.\n The default, None, results in the view having the same data-type\n as the original array.\n order: string\n 'C' or 'F' (Fortran) ordering\n\n This reinterprets the bytes of the array under a new dtype. If that\n dtype does not have the same size as the original array then the shape\n will change.\n\n Beware that both numpy and dask.array can behave oddly when taking\n shape-changing views of arrays under Fortran ordering. Under some\n versions of NumPy this function will fail when taking shape-changing\n views of Fortran ordered arrays if the first dimension has chunks of\n size one.\n \"\"\"\n if dtype is None:\n dtype = self.dtype\n else:\n dtype = np.dtype(dtype)\n mult = self.dtype.itemsize / dtype.itemsize\n\n if order == \"C\":\n chunks = self.chunks[:-1] + (\n tuple(ensure_int(c * mult) for c in self.chunks[-1]),\n )\n elif order == \"F\":\n chunks = (\n tuple(ensure_int(c * mult) for c in self.chunks[0]),\n ) + self.chunks[1:]\n else:\n raise ValueError(\"Order must be one of 'C' or 'F'\")\n\n return self.map_blocks(\n chunk.view, dtype, order=order, dtype=dtype, chunks=chunks\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.swapaxes_Array.__deepcopy__.return.c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.swapaxes_Array.__deepcopy__.return.c", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2332, "end_line": 2356, "span_ids": ["Array.swapaxes", "Array.copy", "Array.__deepcopy__", "Array.round"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def swapaxes(self, axis1, axis2):\n from .routines import swapaxes\n\n return swapaxes(self, axis1, axis2)\n\n @derived_from(np.ndarray)\n def round(self, decimals=0):\n from .routines import round\n\n return round(self, decimals=decimals)\n\n def copy(self):\n \"\"\"\n Copy array. This is a no-op for dask.arrays, which are immutable\n \"\"\"\n if self.npartitions == 1:\n return self.map_blocks(M.copy)\n else:\n return Array(self.dask, self.name, self.chunks, meta=self)\n\n def __deepcopy__(self, memo):\n c = self.copy()\n memo[id(self)] = c\n return c", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_delayed_Array.to_delayed.return.np_array_L_dtype_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.to_delayed_Array.to_delayed.return.np_array_L_dtype_object_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2667, "end_line": 2688, "span_ids": ["Array.to_delayed"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into an array of :class:`dask.delayed.Delayed` objects, one per chunk.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n :class:`dask.delayed.Delayed` objects.\n\n See Also\n --------\n dask.array.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n layer = self.__dask_layers__()[0]\n if optimize_graph:\n graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph\n layer = \"delayed-\" + self.name\n graph = HighLevelGraph.from_collections(layer, graph, dependencies=())\n L = ndeepmap(self.ndim, lambda k: Delayed(k, graph, layer=layer), keys)\n return np.array(L, dtype=object)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.repeat_ensure_int.return.i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.repeat_ensure_int.return.i", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2621, "end_line": 2658, "span_ids": ["ensure_int", "Array.to_zarr", "Array.to_tiledb", "Array.repeat", "Array.nonzero"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def repeat(self, repeats, axis=None):\n from .creation import repeat\n\n return repeat(self, repeats, axis=axis)\n\n @derived_from(np.ndarray)\n def nonzero(self):\n from .routines import nonzero\n\n return nonzero(self)\n\n def to_zarr(self, *args, **kwargs):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n See function :func:`dask.array.to_zarr` for parameters.\n \"\"\"\n return to_zarr(self, *args, **kwargs)\n\n def to_tiledb(self, uri, *args, **kwargs):\n \"\"\"Save array to the TileDB storage manager\n\n See function :func:`dask.array.to_tiledb` for argument documentation.\n\n See https://docs.tiledb.io for details about the format and engine.\n \"\"\"\n from .tiledb_io import to_tiledb\n\n return to_tiledb(self, uri, *args, **kwargs)\n\n\ndef ensure_int(f):\n i = int(f)\n if i != f:\n raise ValueError(\"Could not coerce %f to integer\" % f)\n return i", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks_normalize_chunks.if_isinstance_chunks_lis.chunks.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks_normalize_chunks.if_isinstance_chunks_lis.chunks.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2661, "end_line": 2741, "span_ids": ["normalize_chunks"], "tokens": 757}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):\n \"\"\"Normalize chunks to tuple of tuples\n\n This takes in a variety of input types and information and produces a full\n tuple-of-tuples result for chunks, suitable to be passed to Array or\n rechunk or any other operation that creates a Dask array.\n\n Parameters\n ----------\n chunks: tuple, int, dict, or string\n The chunks to be normalized. See examples below for more details\n shape: Tuple[int]\n The shape of the array\n limit: int (optional)\n The maximum block size to target in bytes,\n if freedom is given to choose\n dtype: np.dtype\n previous_chunks: Tuple[Tuple[int]] optional\n Chunks from a previous array that we should use for inspiration when\n rechunking auto dimensions. If not provided but auto-chunking exists\n then auto-dimensions will prefer square-like chunk shapes.\n\n Examples\n --------\n Specify uniform chunk sizes\n\n >>> from dask.array.core import normalize_chunks\n >>> normalize_chunks((2, 2), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Also passes through fully explicit tuple-of-tuples\n\n >>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Cleans up lists to tuples\n\n >>> normalize_chunks([[2, 2], [3, 3]])\n ((2, 2), (3, 3))\n\n Expands integer inputs 10 -> (10, 10)\n\n >>> normalize_chunks(10, shape=(30, 5))\n ((10, 10, 10), (5,))\n\n Expands dict inputs\n\n >>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))\n ((2, 2, 2), (3, 3))\n\n The values -1 and None get mapped to full size\n\n >>> normalize_chunks((5, -1), shape=(10, 10))\n ((5, 5), (10,))\n\n Use the value \"auto\" to automatically determine chunk sizes along certain\n dimensions. This uses the ``limit=`` and ``dtype=`` keywords to\n determine how large to make the chunks. The term \"auto\" can be used\n anywhere an integer can be used. See array chunking documentation for more\n information.\n\n >>> normalize_chunks((\"auto\",), shape=(20,), limit=5, dtype='uint8')\n ((5, 5, 5, 5),)\n\n You can also use byte sizes (see :func:`dask.utils.parse_bytes`) in place of\n \"auto\" to ask for a particular size\n\n >>> normalize_chunks(\"1kiB\", shape=(2000,), dtype='float32')\n ((250, 250, 250, 250, 250, 250, 250, 250),)\n\n Respects null dimensions\n\n >>> normalize_chunks((), shape=(0, 0))\n ((0,), (0,))\n \"\"\"\n if dtype and not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n if chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n if isinstance(chunks, list):\n chunks = tuple(chunks)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks.if_isinstance_chunks_Nu_normalize_chunks.return.tuple_tuple_int_x_if_not": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_chunks.if_isinstance_chunks_Nu_normalize_chunks.return.tuple_tuple_int_x_if_not", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2500, "end_line": 2578, "span_ids": ["normalize_chunks"], "tokens": 721}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):\n # ... other code\n if isinstance(chunks, (Number, str)):\n chunks = (chunks,) * len(shape)\n if isinstance(chunks, dict):\n chunks = tuple(chunks.get(i, None) for i in range(len(shape)))\n if isinstance(chunks, np.ndarray):\n chunks = chunks.tolist()\n if not chunks and shape and all(s == 0 for s in shape):\n chunks = ((0,),) * len(shape)\n\n if (\n shape\n and len(shape) == 1\n and len(chunks) > 1\n and all(isinstance(c, (Number, str)) for c in chunks)\n ):\n chunks = (chunks,)\n\n if shape and len(chunks) != len(shape):\n raise ValueError(\n \"Chunks and shape must be of the same length/dimension. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))\n\n # If specifying chunk size in bytes, use that value to set the limit.\n # Verify there is only one consistent value of limit or chunk-bytes used.\n for c in chunks:\n if isinstance(c, str) and c != \"auto\":\n parsed = parse_bytes(c)\n if limit is None:\n limit = parsed\n elif parsed != limit:\n raise ValueError(\n \"Only one consistent value of limit or chunk is allowed.\"\n \"Used %s != %s\" % (parsed, limit)\n )\n # Substitute byte limits with 'auto' now that limit is set.\n chunks = tuple(\"auto\" if isinstance(c, str) and c != \"auto\" else c for c in chunks)\n\n if any(c == \"auto\" for c in chunks):\n chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)\n\n if shape is not None:\n chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))\n\n if chunks and shape is not None:\n chunks = sum(\n (\n blockdims_from_blockshape((s,), (c,))\n if not isinstance(c, (tuple, list))\n else (c,)\n for s, c in zip(shape, chunks)\n ),\n (),\n )\n for c in chunks:\n if not c:\n raise ValueError(\n \"Empty tuples are not allowed in chunks. Express \"\n \"zero length dimensions with 0(s) in chunks\"\n )\n\n if shape is not None:\n if len(chunks) != len(shape):\n raise ValueError(\n \"Input array has %d dimensions but the supplied \"\n \"chunks has only %d dimensions\" % (len(shape), len(chunks))\n )\n if not all(\n c == s or (math.isnan(c) or math.isnan(s))\n for c, s in zip(map(sum, chunks), shape)\n ):\n raise ValueError(\n \"Chunks do not add up to shape. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n\n return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__compute_multiplier_auto_chunks.largest_block.np_prod_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__compute_multiplier_auto_chunks.largest_block.np_prod_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2892, "end_line": 2969, "span_ids": ["_compute_multiplier", "auto_chunks"], "tokens": 579}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compute_multiplier(limit: int, dtype, largest_block: int, result):\n \"\"\"\n Utility function for auto_chunk, to fin how much larger or smaller the ideal\n chunk size is relative to what we have now.\n \"\"\"\n return (\n limit\n / dtype.itemsize\n / largest_block\n / np.prod(list(r if r != 0 else 1 for r in result.values()))\n )\n\n\ndef auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):\n \"\"\"Determine automatic chunks\n\n This takes in a chunks value that contains ``\"auto\"`` values in certain\n dimensions and replaces those values with concrete dimension sizes that try\n to get chunks to be of a certain size in bytes, provided by the ``limit=``\n keyword. If multiple dimensions are marked as ``\"auto\"`` then they will\n all respond to meet the desired byte limit, trying to respect the aspect\n ratio of their dimensions in ``previous_chunks=``, if given.\n\n Parameters\n ----------\n chunks: Tuple\n A tuple of either dimensions or tuples of explicit chunk dimensions\n Some entries should be \"auto\"\n shape: Tuple[int]\n limit: int, str\n The maximum allowable size of a chunk in bytes\n previous_chunks: Tuple[Tuple[int]]\n\n See also\n --------\n normalize_chunks: for full docstring and parameters\n \"\"\"\n if previous_chunks is not None:\n previous_chunks = tuple(\n c if isinstance(c, tuple) else (c,) for c in previous_chunks\n )\n chunks = list(chunks)\n\n autos = {i for i, c in enumerate(chunks) if c == \"auto\"}\n if not autos:\n return tuple(chunks)\n\n if limit is None:\n limit = config.get(\"array.chunk-size\")\n if isinstance(limit, str):\n limit = parse_bytes(limit)\n\n if dtype is None:\n raise TypeError(\"dtype must be known for auto-chunking\")\n\n if dtype.hasobject:\n raise NotImplementedError(\n \"Can not use auto rechunking with object dtype. \"\n \"We are unable to estimate the size in bytes of object data\"\n )\n\n for x in tuple(chunks) + tuple(shape):\n if (\n isinstance(x, Number)\n and np.isnan(x)\n or isinstance(x, tuple)\n and np.isnan(x).any()\n ):\n raise ValueError(\n \"Can not perform automatic rechunking with unknown \"\n \"(nan) chunk sizes.%s\" % unknown_chunk_message\n )\n\n limit = max(1, limit)\n\n largest_block = np.prod(\n [cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != \"auto\"]\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_auto_chunks.if_previous_chunks__auto_chunks.if_previous_chunks_.else_.return.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_auto_chunks.if_previous_chunks__auto_chunks.if_previous_chunks_.else_.return.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2660, "end_line": 2716, "span_ids": ["auto_chunks"], "tokens": 493}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):\n # ... other code\n\n if previous_chunks:\n # Base ideal ratio on the median chunk size of the previous chunks\n result = {a: np.median(previous_chunks[a]) for a in autos}\n\n ideal_shape = []\n for i, s in enumerate(shape):\n chunk_frequencies = frequencies(previous_chunks[i])\n mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])\n if mode > 1 and count >= len(previous_chunks[i]) / 2:\n ideal_shape.append(mode)\n else:\n ideal_shape.append(s)\n\n # How much larger or smaller the ideal chunk size is relative to what we have now\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n last_multiplier = 0\n last_autos = set()\n while (\n multiplier != last_multiplier or autos != last_autos\n ): # while things change\n last_multiplier = multiplier # record previous values\n last_autos = set(autos) # record previous values\n\n # Expand or contract each of the dimensions appropriately\n for a in sorted(autos):\n if ideal_shape[a] == 0:\n result[a] = 0\n continue\n proposed = result[a] * multiplier ** (1 / len(autos))\n if proposed > shape[a]: # we've hit the shape boundary\n autos.remove(a)\n largest_block *= shape[a]\n chunks[a] = shape[a]\n del result[a]\n else:\n result[a] = round_to(proposed, ideal_shape[a])\n\n # recompute how much multiplier we have left, repeat\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n for k, v in result.items():\n chunks[k] = v\n return tuple(chunks)\n\n else:\n size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))\n small = [i for i in autos if shape[i] < size]\n if small:\n for i in small:\n chunks[i] = (shape[i],)\n return auto_chunks(chunks, shape, limit, dtype)\n\n for i in autos:\n chunks[i] = round_to(size, shape[i])\n\n return tuple(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array.if_isinstance_x_Array__from_array.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array.if_isinstance_x_Array__from_array.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3258, "end_line": 3331, "span_ids": ["from_array"], "tokens": 649}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n inline_array=False,\n):\n if isinstance(x, Array):\n raise ValueError(\n \"Array is already a dask array. Use 'asarray' or \" \"'rechunk' instead.\"\n )\n elif is_dask_collection(x):\n warnings.warn(\n \"Passing an object to dask.array.from_array which is already a \"\n \"Dask collection. This can lead to unexpected behavior.\"\n )\n\n if isinstance(x, (list, tuple, memoryview) + np.ScalarType):\n x = np.array(x)\n\n if asarray is None:\n asarray = not hasattr(x, \"__array_function__\")\n\n previous_chunks = getattr(x, \"chunks\", None)\n\n chunks = normalize_chunks(\n chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks\n )\n\n if name in (None, True):\n token = tokenize(x, chunks, lock, asarray, fancy, getitem, inline_array)\n name = name or \"array-\" + token\n elif name is False:\n name = \"array-\" + str(uuid.uuid1())\n\n if lock is True:\n lock = SerializableLock()\n\n is_ndarray = type(x) is np.ndarray\n is_single_block = all(len(c) == 1 for c in chunks)\n # Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)\n # because np.matrix is a subclass of np.ndarray.\n if is_ndarray and not is_single_block and not lock:\n # eagerly slice numpy arrays to prevent memory blowup\n # GH5367, GH5601\n slices = slices_from_chunks(chunks)\n keys = product([name], *(range(len(bds)) for bds in chunks))\n values = [x[slc] for slc in slices]\n dsk = dict(zip(keys, values))\n\n elif is_ndarray and is_single_block:\n # No slicing needed\n dsk = {(name,) + (0,) * x.ndim: x}\n else:\n if getitem is None:\n if fancy:\n getitem = getter\n else:\n getitem = getter_nofancy\n\n dsk = graph_from_arraylike(\n x,\n chunks,\n x.shape,\n name,\n getitem=getitem,\n lock=lock,\n asarray=asarray,\n dtype=x.dtype,\n inline_array=inline_array,\n )\n\n # Workaround for TileDB, its indexing is 1-based,\n # and doesn't seems to support 0-length slicing\n if x.__class__.__module__.split(\".\")[0] == \"tiledb\" and hasattr(x, \"_ctx_\"):\n return Array(dsk, name, chunks, dtype=x.dtype)\n\n if meta is None:\n meta = x\n\n return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, \"dtype\", None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_zarr_from_zarr.return.from_array_z_chunks_nam": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_zarr_from_zarr.return.from_array_z_chunks_nam", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3298, "end_line": 3355, "span_ids": ["from_zarr"], "tokens": 504}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_zarr(\n url,\n component=None,\n storage_options=None,\n chunks=None,\n name=None,\n inline_array=False,\n **kwargs,\n):\n \"\"\"Load array from the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be loaded, something like ``'foo/bar'``.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n chunks: tuple of ints or tuples of ints\n Passed to :func:`dask.array.from_array`, allows setting the chunks on\n initialisation, if the chunking scheme in the on-disc dataset is not\n optimal for the calculations to follow.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n kwargs:\n Passed to :class:`zarr.core.Array`.\n inline_array : bool, default False\n Whether to inline the zarr Array in the values of the task graph.\n See :meth:`dask.array.from_array` for an explanation.\n\n See Also\n --------\n from_array\n \"\"\"\n import zarr\n\n storage_options = storage_options or {}\n if isinstance(url, zarr.Array):\n z = url\n elif isinstance(url, (str, os.PathLike)):\n if isinstance(url, os.PathLike):\n url = os.fspath(url)\n mapper = get_mapper(url, **storage_options)\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n else:\n mapper = url\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n chunks = chunks if chunks is not None else z.chunks\n if name is None:\n name = \"from-zarr-\" + tokenize(z, component, storage_options, chunks, **kwargs)\n return from_array(z, chunks, name=name, inline_array=inline_array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_zarr_to_zarr.return.arr_store_z_lock_False_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_zarr_to_zarr.return.arr_store_z_lock_False_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3394, "end_line": 3512, "span_ids": ["to_zarr"], "tokens": 860}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_zarr(\n arr,\n url,\n component=None,\n storage_options=None,\n overwrite=False,\n region=None,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n arr: dask.array\n Data to store\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be created/over-written.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n overwrite: bool\n If given array already exists, overwrite=False will cause an error,\n where overwrite=True will replace the existing data.\n region: tuple of slices or None\n The region of data that should be written if ``url`` is a zarr.Array.\n Not to be used with other types of ``url``.\n compute: bool\n See :func:`~dask.array.store` for more details.\n return_stored: bool\n See :func:`~dask.array.store` for more details.\n **kwargs:\n Passed to the :func:`zarr.creation.create` function, e.g., compression options.\n\n Raises\n ------\n ValueError\n If ``arr`` has unknown chunk sizes, which is not supported by Zarr.\n If ``region`` is specified and ``url`` is not a zarr.Array\n\n See Also\n --------\n dask.array.store\n dask.array.Array.compute_chunk_sizes\n\n \"\"\"\n import zarr\n\n if np.isnan(arr.shape).any():\n raise ValueError(\n \"Saving a dask array with unknown chunk sizes is not \"\n \"currently supported by Zarr.%s\" % unknown_chunk_message\n )\n\n if isinstance(url, zarr.Array):\n z = url\n if isinstance(z.store, (dict, MutableMapping)) and config.get(\n \"scheduler\", \"\"\n ) in (\"dask.distributed\", \"distributed\"):\n raise RuntimeError(\n \"Cannot store into in memory Zarr Array using \"\n \"the Distributed Scheduler.\"\n )\n\n if region is None:\n arr = arr.rechunk(z.chunks)\n regions = None\n else:\n from .slicing import new_blockdim, normalize_index\n\n old_chunks = normalize_chunks(z.chunks, z.shape)\n index = normalize_index(region, z.shape)\n chunks = tuple(\n tuple(new_blockdim(s, c, r))\n for s, c, r in zip(z.shape, old_chunks, index)\n )\n arr = arr.rechunk(chunks)\n regions = [region]\n return arr.store(\n z, lock=False, regions=regions, compute=compute, return_stored=return_stored\n )\n\n if region is not None:\n raise ValueError(\"Cannot use `region` keyword when url is not a `zarr.Array`.\")\n\n if not _check_regular_chunks(arr.chunks):\n raise ValueError(\n \"Attempt to save array to zarr with irregular \"\n \"chunking, please call `arr.rechunk(...)` first.\"\n )\n\n storage_options = storage_options or {}\n\n if isinstance(url, str):\n mapper = get_mapper(url, **storage_options)\n else:\n # assume the object passed is already a mapper\n mapper = url\n\n chunks = [c[0] for c in arr.chunks]\n\n z = zarr.create(\n shape=arr.shape,\n chunks=chunks,\n dtype=arr.dtype,\n store=mapper,\n path=component,\n overwrite=overwrite,\n **kwargs,\n )\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__check_regular_chunks__check_regular_chunks.return.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__check_regular_chunks__check_regular_chunks.return.True", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3069, "end_line": 3106, "span_ids": ["_check_regular_chunks"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _check_regular_chunks(chunkset):\n \"\"\"Check if the chunks are regular\n\n \"Regular\" in this context means that along every axis, the chunks all\n have the same size, except the last one, which may be smaller\n\n Parameters\n ----------\n chunkset: tuple of tuples of ints\n From the ``.chunks`` attribute of an ``Array``\n\n Returns\n -------\n True if chunkset passes, else False\n\n Examples\n --------\n >>> import dask.array as da\n >>> arr = da.zeros(10, chunks=(5, ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))\n >>> _check_regular_chunks(arr.chunks)\n False\n \"\"\"\n for chunks in chunkset:\n if len(chunks) == 1:\n continue\n if len(set(chunks[:-1])) > 1:\n return False\n if chunks[-1] > chunks[0]:\n return False\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_delayed_from_delayed.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_delayed_from_delayed.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3416, "end_line": 3447, "span_ids": ["from_delayed"], "tokens": 335}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_delayed(value, shape, dtype=None, meta=None, name=None):\n \"\"\"Create a dask array from a dask delayed value\n\n This routine is useful for constructing dask arrays in an ad-hoc fashion\n using dask delayed, particularly when combined with stack and concatenate.\n\n The dask array will consist of a single chunk.\n\n Examples\n --------\n >>> import dask\n >>> import dask.array as da\n >>> import numpy as np\n >>> value = dask.delayed(np.ones)(5)\n >>> array = da.from_delayed(value, (5,), dtype=float)\n >>> array\n dask.array\n >>> array.compute()\n array([1., 1., 1., 1., 1.])\n \"\"\"\n from ..delayed import Delayed, delayed\n\n if not isinstance(value, Delayed) and hasattr(value, \"key\"):\n value = delayed(value)\n\n name = name or \"from-value-\" + tokenize(value, shape, dtype, meta)\n dsk = {(name,) + (0,) * len(shape): value.key}\n chunks = tuple((d,) for d in shape)\n # TODO: value._key may not be the name of the layer in value.dask\n # This should be fixed after we build full expression graphs\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])\n return Array(graph, name, chunks, dtype=dtype, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_func_from_func.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_func_from_func.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3142, "end_line": 3167, "span_ids": ["from_func"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):\n \"\"\"Create dask array in a single block by calling a function\n\n Calling the provided function with func(*args, **kwargs) should return a\n NumPy array of the indicated shape and dtype.\n\n Examples\n --------\n\n >>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))\n >>> a.compute()\n array([0, 1, 2])\n\n This works particularly well when coupled with dask.array functions like\n concatenate and stack:\n\n >>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]\n >>> stack(arrays).compute()\n array([0, 1, 2, 3, 4])\n \"\"\"\n name = name or \"from_func-\" + tokenize(func, shape, dtype, args, kwargs)\n if args or kwargs:\n func = partial(func, *args, **kwargs)\n dsk = {(name,) + (0,) * len(shape): (func,)}\n chunks = tuple((i,) for i in shape)\n return Array(dsk, name, chunks, dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_common_blockdim_common_blockdim.return.tuple_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_common_blockdim_common_blockdim.return.tuple_out_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3557, "end_line": 3622, "span_ids": ["common_blockdim"], "tokens": 607}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def common_blockdim(blockdims):\n \"\"\"Find the common block dimensions from the list of block dimensions\n\n Currently only implements the simplest possible heuristic: the common\n block-dimension is the only one that does not span fully span a dimension.\n This is a conservative choice that allows us to avoid potentially very\n expensive rechunking.\n\n Assumes that each element of the input block dimensions has all the same\n sum (i.e., that they correspond to dimensions of the same size).\n\n Examples\n --------\n >>> common_blockdim([(3,), (2, 1)])\n (2, 1)\n >>> common_blockdim([(1, 2), (2, 1)])\n (1, 1, 1)\n >>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align\n \"\"\"\n if not any(blockdims):\n return ()\n non_trivial_dims = {d for d in blockdims if len(d) > 1}\n if len(non_trivial_dims) == 1:\n return first(non_trivial_dims)\n if len(non_trivial_dims) == 0:\n return max(blockdims, key=first)\n\n if np.isnan(sum(map(sum, blockdims))):\n raise ValueError(\n \"Arrays' chunk sizes (%s) are unknown.\\n\\n\"\n \"A possible solution:\\n\"\n \" x.compute_chunk_sizes()\" % blockdims\n )\n\n if len(set(map(sum, non_trivial_dims))) > 1:\n raise ValueError(\"Chunks do not add up to same value\", blockdims)\n\n # We have multiple non-trivial chunks on this axis\n # e.g. (5, 2) and (4, 3)\n\n # We create a single chunk tuple with the same total length\n # that evenly divides both, e.g. (4, 1, 2)\n\n # To accomplish this we walk down all chunk tuples together, finding the\n # smallest element, adding it to the output, and subtracting it from all\n # other elements and remove the element itself. We stop once we have\n # burned through all of the chunk tuples.\n # For efficiency's sake we reverse the lists so that we can pop off the end\n rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]\n total = sum(first(non_trivial_dims))\n i = 0\n\n out = []\n while i < total:\n m = min(c[-1] for c in rchunks)\n out.append(m)\n for c in rchunks:\n c[-1] -= m\n if c[-1] == 0:\n c.pop()\n i += m\n\n return tuple(out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block_block._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block_block._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3354, "end_line": 3441, "span_ids": ["block"], "tokens": 772}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def block(arrays, allow_unknown_chunksizes=False):\n \"\"\"\n Assemble an nd-array from nested lists of blocks.\n\n Blocks in the innermost lists are concatenated along the last\n dimension (-1), then these are concatenated along the second-last\n dimension (-2), and so on until the outermost list is reached\n\n Blocks can be of any dimension, but will not be broadcasted using the normal\n rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``\n the same for all blocks. This is primarily useful for working with scalars,\n and means that code like ``block([v, 1])`` is valid, where\n ``v.ndim == 1``.\n\n When the nested list is two levels deep, this allows block matrices to be\n constructed from their components.\n\n Parameters\n ----------\n arrays : nested list of array_like or scalars (but not tuples)\n If passed a single ndarray or scalar (a nested list of depth 0), this\n is returned unmodified (and not copied).\n\n Elements shapes must match along the appropriate axes (without\n broadcasting), but leading 1s will be prepended to the shape as\n necessary to make the dimensions match.\n\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Returns\n -------\n block_array : ndarray\n The array assembled from the given blocks.\n\n The dimensionality of the output is equal to the greatest of:\n * the dimensionality of all the inputs\n * the depth to which the input list is nested\n\n Raises\n ------\n ValueError\n * If list depths are mismatched - for instance, ``[[a, b], c]`` is\n illegal, and should be spelt ``[[a, b], [c]]``\n * If lists are empty - for instance, ``[[a, b], []]``\n\n See Also\n --------\n concatenate : Join a sequence of arrays together.\n stack : Stack arrays in sequence along a new dimension.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n vsplit : Split array into a list of multiple sub-arrays vertically.\n\n Notes\n -----\n\n When called with only scalars, ``block`` is equivalent to an ndarray\n call. So ``block([[1, 2], [3, 4]])`` is equivalent to\n ``array([[1, 2], [3, 4]])``.\n\n This function does not enforce that the blocks lie on a fixed grid.\n ``block([[a, b], [c, d]])`` is not restricted to arrays of the form::\n\n AAAbb\n AAAbb\n cccDD\n\n But is also allowed to produce, for some ``a, b, c, d``::\n\n AAAbb\n AAAbb\n cDDDD\n\n Since concatenation happens along the last axis first, `block` is _not_\n capable of producing the following directly::\n\n AAAbb\n cccbb\n cccDD\n\n Matlab's \"square bracket stacking\", ``[A, B, ...; p, q, ...]``, is\n equivalent to ``block([[A, B, ...], [p, q, ...]])``.\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block._This_was_copied_almost__block.return.rec_map_reduce_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_block._This_was_copied_almost__block.return.rec_map_reduce_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3829, "end_line": 3907, "span_ids": ["block"], "tokens": 633}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def block(arrays, allow_unknown_chunksizes=False):\n\n # This was copied almost verbatim from numpy.core.shape_base.block\n\n def atleast_nd(x, ndim):\n x = asanyarray(x)\n diff = max(ndim - x.ndim, 0)\n if diff == 0:\n return x\n else:\n return x[(None,) * diff + (Ellipsis,)]\n\n def format_index(index):\n return \"arrays\" + \"\".join(f\"[{i}]\" for i in index)\n\n rec = _Recurser(recurse_if=lambda x: type(x) is list)\n\n # ensure that the lists are all matched in depth\n list_ndim = None\n any_empty = False\n for index, value, entering in rec.walk(arrays):\n if type(value) is tuple:\n # not strictly necessary, but saves us from:\n # - more than one way to do things - no point treating tuples like\n # lists\n # - horribly confusing behaviour that results when tuples are\n # treated like ndarray\n raise TypeError(\n \"{} is a tuple. \"\n \"Only lists can be used to arrange blocks, and np.block does \"\n \"not allow implicit conversion from tuple to ndarray.\".format(\n format_index(index)\n )\n )\n if not entering:\n curr_depth = len(index)\n elif len(value) == 0:\n curr_depth = len(index) + 1\n any_empty = True\n else:\n continue\n\n if list_ndim is not None and list_ndim != curr_depth:\n raise ValueError(\n \"List depths are mismatched. First element was at depth {}, \"\n \"but there is an element at depth {} ({})\".format(\n list_ndim, curr_depth, format_index(index)\n )\n )\n list_ndim = curr_depth\n\n # do this here so we catch depth mismatches first\n if any_empty:\n raise ValueError(\"Lists cannot be empty\")\n\n # convert all the arrays to ndarrays\n arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)\n\n # determine the maximum dimension of the elements\n elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)\n ndim = max(list_ndim, elem_ndim)\n\n # first axis to concatenate along\n first_axis = ndim - list_ndim\n\n # Make all the elements the same dimension\n arrays = rec.map_reduce(\n arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list\n )\n\n # concatenate innermost lists on the right, outermost on the left\n return rec.map_reduce(\n arrays,\n f_reduce=lambda xs, axis: concatenate(\n list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes\n ),\n f_kwargs=lambda axis: dict(axis=(axis + 1)),\n axis=first_axis,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_load_store_chunk_load_chunk.return.load_store_chunk_None_ou": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_load_store_chunk_load_chunk.return.load_store_chunk_None_ou", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4034, "end_line": 4088, "span_ids": ["load_chunk", "load_store_chunk", "store_chunk"], "tokens": 368}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def load_store_chunk(x, out, index, lock, return_stored, load_stored):\n \"\"\"\n A function inserted in a Dask graph for storing a chunk.\n\n Parameters\n ----------\n x: array-like\n An array (potentially a NumPy one)\n out: array-like\n Where to store results too.\n index: slice-like\n Where to store result from ``x`` in ``out``.\n lock: Lock-like or False\n Lock to use before writing to ``out``.\n return_stored: bool\n Whether to return ``out``.\n load_stored: bool\n Whether to return the array stored in ``out``.\n Ignored if ``return_stored`` is not ``True``.\n\n Examples\n --------\n\n >>> a = np.ones((5, 6))\n >>> b = np.empty(a.shape)\n >>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)\n \"\"\"\n\n result = None\n if return_stored and not load_stored:\n result = out\n\n if lock:\n lock.acquire()\n try:\n if x is not None:\n if is_arraylike(x):\n out[index] = x\n else:\n out[index] = np.asanyarray(x)\n if return_stored and load_stored:\n result = out[index]\n finally:\n if lock:\n lock.release()\n\n return result\n\n\ndef store_chunk(x, out, index, lock, return_stored):\n return load_store_chunk(x, out, index, lock, return_stored, False)\n\n\ndef load_chunk(out, index, lock):\n return load_store_chunk(None, out, index, lock, True, True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_insert_to_ooc_insert_to_ooc.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_insert_to_ooc_insert_to_ooc.return.dsk", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4106, "end_line": 4174, "span_ids": ["insert_to_ooc"], "tokens": 532}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def insert_to_ooc(\n keys: list,\n chunks: tuple[tuple[int, ...], ...],\n out,\n name: str,\n *,\n lock: Lock | bool = True,\n region: slice | None = None,\n return_stored: bool = False,\n load_stored: bool = False,\n) -> dict:\n \"\"\"\n Creates a Dask graph for storing chunks from ``arr`` in ``out``.\n\n Parameters\n ----------\n keys: list\n Dask keys of the input array\n chunks: tuple\n Dask chunks of the input array\n out: array-like\n Where to store results to\n name: str\n First element of dask keys\n lock: Lock-like or bool, optional\n Whether to lock or with what (default is ``True``,\n which means a :class:`threading.Lock` instance).\n region: slice-like, optional\n Where in ``out`` to store ``arr``'s results\n (default is ``None``, meaning all of ``out``).\n return_stored: bool, optional\n Whether to return ``out``\n (default is ``False``, meaning ``None`` is returned).\n load_stored: bool, optional\n Whether to handling loading from ``out`` at the same time.\n Ignored if ``return_stored`` is not ``True``.\n (default is ``False``, meaning defer to ``return_stored``).\n\n Returns\n -------\n dask graph of store operation\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> insert_to_ooc(d.__dask_keys__(), d.chunks, a, \"store-123\") # doctest: +SKIP\n \"\"\"\n\n if lock is True:\n lock = Lock()\n\n slices = slices_from_chunks(chunks)\n if region:\n slices = [fuse_slice(region, slc) for slc in slices]\n\n if return_stored and load_stored:\n func = load_store_chunk\n args = (load_stored,)\n else:\n func = store_chunk\n args = ()\n\n dsk = {\n (name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args\n for t, slc in zip(core.flatten(keys), slices)\n }\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_retrieve_from_ooc_retrieve_from_ooc.return.load_dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_retrieve_from_ooc_retrieve_from_ooc.return.load_dsk", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4177, "end_line": 4205, "span_ids": ["retrieve_from_ooc"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def retrieve_from_ooc(\n keys: Collection[Hashable], dsk_pre: Mapping, dsk_post: Mapping\n) -> dict:\n \"\"\"\n Creates a Dask graph for loading stored ``keys`` from ``dsk``.\n\n Parameters\n ----------\n keys: Collection\n A sequence containing Dask graph keys to load\n dsk_pre: Mapping\n A Dask graph corresponding to a Dask Array before computation\n dsk_post: Mapping\n A Dask graph corresponding to a Dask Array after computation\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> g = insert_to_ooc(d.__dask_keys__(), d.chunks, a, \"store-123\")\n >>> retrieve_from_ooc(g.keys(), g, {k: k for k in g.keys()}) # doctest: +SKIP\n \"\"\"\n load_dsk = {\n (\"load-\" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1]\n for k in keys\n }\n\n return load_dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_is_scalar_for_elemwise_is_scalar_for_elemwise.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_is_scalar_for_elemwise_is_scalar_for_elemwise.return._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3887, "end_line": 3919, "span_ids": ["is_scalar_for_elemwise"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def is_scalar_for_elemwise(arg):\n \"\"\"\n\n >>> is_scalar_for_elemwise(42)\n True\n >>> is_scalar_for_elemwise('foo')\n True\n >>> is_scalar_for_elemwise(True)\n True\n >>> is_scalar_for_elemwise(np.array(42))\n True\n >>> is_scalar_for_elemwise([1, 2, 3])\n True\n >>> is_scalar_for_elemwise(np.array([1, 2, 3]))\n False\n >>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))\n False\n >>> is_scalar_for_elemwise(np.dtype('i4'))\n True\n \"\"\"\n # the second half of shape_condition is essentially just to ensure that\n # dask series / frame are treated as scalars in elemwise.\n maybe_shape = getattr(arg, \"shape\", None)\n shape_condition = not isinstance(maybe_shape, Iterable) or any(\n is_dask_collection(x) for x in maybe_shape\n )\n\n return (\n np.isscalar(arg)\n or shape_condition\n or isinstance(arg, np.dtype)\n or (isinstance(arg, np.ndarray) and arg.ndim == 0)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_shapes_broadcast_shapes.return.tuple_reversed_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_shapes_broadcast_shapes.return.tuple_reversed_out_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4384, "end_line": 4416, "span_ids": ["broadcast_shapes"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_shapes(*shapes):\n \"\"\"\n Determines output shape from broadcasting arrays.\n\n Parameters\n ----------\n shapes : tuples\n The shapes of the arguments.\n\n Returns\n -------\n output_shape : tuple\n\n Raises\n ------\n ValueError\n If the input shapes cannot be successfully broadcast together.\n \"\"\"\n if len(shapes) == 1:\n return shapes[0]\n out = []\n for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):\n if np.isnan(sizes).any():\n dim = np.nan\n else:\n dim = 0 if 0 in sizes else np.max(sizes)\n if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):\n raise ValueError(\n \"operands could not be broadcast together with \"\n \"shapes {}\".format(\" \".join(map(str, shapes)))\n )\n out.append(dim)\n return tuple(reversed(out))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_handle_out_handle_out.if_isinstance_out_Array_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_handle_out_handle_out.if_isinstance_out_Array_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4572, "end_line": 4591, "span_ids": ["handle_out"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def handle_out(out, result):\n \"\"\"Handle out parameters\n\n If out is a dask.array then this overwrites the contents of that array with\n the result\n \"\"\"\n out = _elemwise_normalize_out(out)\n if isinstance(out, Array):\n if out.shape != result.shape:\n raise ValueError(\n \"Mismatched shapes between result and out parameter. \"\n \"out=%s, result=%s\" % (str(out.shape), str(result.shape))\n )\n out._chunks = result.chunks\n out.dask = result.dask\n out._meta = result._meta\n out._name = result.name\n return out\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__enforce_dtype__enforce_dtype.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__enforce_dtype__enforce_dtype.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4073, "end_line": 4112, "span_ids": ["_enforce_dtype"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _enforce_dtype(*args, **kwargs):\n \"\"\"Calls a function and converts its result to the given dtype.\n\n The parameters have deliberately been given unwieldy names to avoid\n clashes with keyword arguments consumed by blockwise\n\n A dtype of `object` is treated as a special case and not enforced,\n because it is used as a dummy value in some places when the result will\n not be a block in an Array.\n\n Parameters\n ----------\n enforce_dtype : dtype\n Result dtype\n enforce_dtype_function : callable\n The wrapped function, which will be passed the remaining arguments\n \"\"\"\n dtype = kwargs.pop(\"enforce_dtype\")\n function = kwargs.pop(\"enforce_dtype_function\")\n\n result = function(*args, **kwargs)\n if hasattr(result, \"dtype\") and dtype != result.dtype and dtype != object:\n if not np.can_cast(result, dtype, casting=\"same_kind\"):\n raise ValueError(\n \"Inferred dtype from function %r was %r \"\n \"but got %r, which can't be cast using \"\n \"casting='same_kind'\"\n % (funcname(function), str(dtype), str(result.dtype))\n )\n if np.isscalar(result):\n # scalar astype method doesn't take the keyword arguments, so\n # have to convert via 0-dimensional array and back.\n result = result.astype(dtype)\n else:\n try:\n result = result.astype(dtype, copy=False)\n except TypeError:\n # Missing copy kwarg\n result = result.astype(dtype)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_to_broadcast_to.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_to_broadcast_to.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4636, "end_line": 4708, "span_ids": ["broadcast_to"], "tokens": 648}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_to(x, shape, chunks=None, meta=None):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n x : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n chunks : tuple, optional\n If provided, then the result will use these chunks instead of the same\n chunks as the source array. Setting chunks explicitly as part of\n broadcast_to is more efficient than rechunking afterwards. Chunks are\n only allowed to differ from the original shape along dimensions that\n are new on the result or have size 1 the input array.\n meta : empty ndarray\n empty ndarray created with same NumPy backend, ndim and dtype as the\n Dask Array being created (overrides dtype)\n\n Returns\n -------\n broadcast : dask array\n\n See Also\n --------\n :func:`numpy.broadcast_to`\n \"\"\"\n x = asarray(x)\n shape = tuple(shape)\n\n if meta is None:\n meta = meta_from_array(x)\n\n if x.shape == shape and (chunks is None or chunks == x.chunks):\n return x\n\n ndim_new = len(shape) - x.ndim\n if ndim_new < 0 or any(\n new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1\n ):\n raise ValueError(f\"cannot broadcast shape {x.shape} to shape {shape}\")\n\n if chunks is None:\n chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(\n bd if old > 1 else (new,)\n for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])\n )\n else:\n chunks = normalize_chunks(\n chunks, shape, dtype=x.dtype, previous_chunks=x.chunks\n )\n for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):\n if old_bd != new_bd and old_bd != (1,):\n raise ValueError(\n \"cannot broadcast chunks %s to chunks %s: \"\n \"new chunks must either be along a new \"\n \"dimension or a dimension of size 1\" % (x.chunks, chunks)\n )\n\n name = \"broadcast_to-\" + tokenize(x, shape, chunks)\n dsk = {}\n\n enumerated_chunks = product(*(enumerate(bds) for bds in chunks))\n for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):\n old_index = tuple(\n 0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])\n )\n old_key = (x.name,) + old_index\n new_key = (name,) + new_index\n dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, dtype=x.dtype, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_arrays_broadcast_arrays.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_broadcast_arrays_broadcast_arrays.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4711, "end_line": 4728, "span_ids": ["broadcast_arrays"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef broadcast_arrays(*args, subok=False):\n subok = bool(subok)\n\n to_array = asanyarray if subok else asarray\n args = tuple(to_array(e) for e in args)\n\n # Unify uneven chunking\n inds = [list(reversed(range(x.ndim))) for x in args]\n uc_args = concat(zip(args, inds))\n _, args = unify_chunks(*uc_args, warn=False)\n\n shape = broadcast_shapes(*(e.shape for e in args))\n chunks = broadcast_chunks(*(e.chunks for e in args))\n\n result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_offset_func_offset_func.return._offset": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_offset_func_offset_func.return._offset", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4537, "end_line": 4555, "span_ids": ["offset_func"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def offset_func(func, offset, *args):\n \"\"\"Offsets inputs by offset\n\n >>> double = lambda x: x * 2\n >>> f = offset_func(double, (10,))\n >>> f(1)\n 22\n >>> f(300)\n 620\n \"\"\"\n\n def _offset(*args):\n args2 = list(map(add, args, offset))\n return func(*args2)\n\n with contextlib.suppress(Exception):\n _offset.__name__ = \"offset_\" + func.__name__\n\n return _offset", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_chunks_from_arrays_chunks_from_arrays.return.tuple_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_chunks_from_arrays_chunks_from_arrays.return.tuple_result_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4752, "end_line": 4785, "span_ids": ["chunks_from_arrays"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def chunks_from_arrays(arrays):\n \"\"\"Chunks tuple from nested list of arrays\n\n >>> x = np.array([1, 2])\n >>> chunks_from_arrays([x, x])\n ((2, 2),)\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x], [x]])\n ((1, 1), (2,))\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x, x]])\n ((1,), (2, 2))\n\n >>> chunks_from_arrays([1, 1])\n ((1, 1),)\n \"\"\"\n if not arrays:\n return ()\n result = []\n dim = 0\n\n def shape(x):\n try:\n return x.shape\n except AttributeError:\n return (1,)\n\n while isinstance(arrays, (list, tuple)):\n result.append(tuple(shape(deepfirst(a))[dim] for a in arrays))\n arrays = arrays[0]\n dim += 1\n return tuple(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_transposelist_transposelist.return.reshapelist_newshape_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_transposelist_transposelist.return.reshapelist_newshape_res", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4297, "end_line": 4320, "span_ids": ["transposelist"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def transposelist(arrays, axes, extradims=0):\n \"\"\"Permute axes of nested list\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1])\n [[[1, 1], [1, 1], [1, 1]]]\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)\n [[[[1], [1]], [[1], [1]], [[1], [1]]]]\n \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n if extradims < 0:\n raise ValueError(\"`newdims` should be positive\")\n if len(axes) > len(set(axes)):\n raise ValueError(\"`axes` should be unique\")\n\n ndim = max(axes) + 1\n shape = shapelist(arrays)\n newshape = [\n shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)\n ]\n\n result = list(core.flatten(arrays))\n return reshapelist(newshape, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack_stack.keys.list_product_name_ra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack_stack.keys.list_product_name_ra", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4834, "end_line": 4925, "span_ids": ["stack"], "tokens": 767}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def stack(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Stack arrays along a new axis\n\n Given a sequence of dask arrays, form a new dask array by stacking them\n along a new dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [da.from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.stack(data, axis=0)\n >>> x.shape\n (3, 4, 4)\n\n >>> da.stack(data, axis=1).shape\n (4, 3, 4)\n\n >>> da.stack(data, axis=-1).shape\n (4, 4, 3)\n\n Result is a new dask Array\n\n See Also\n --------\n concatenate\n \"\"\"\n from . import wrap\n\n seq = [asarray(a, allow_unknown_chunksizes=allow_unknown_chunksizes) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to stack\")\n if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):\n idx = first(i for i in enumerate(seq) if i[1].shape != seq[0].shape)\n raise ValueError(\n \"Stacked arrays must have the same shape. The first array had shape \"\n f\"{seq[0].shape}, while array {idx[0] + 1} has shape {idx[1].shape}.\"\n )\n\n meta = np.stack([meta_from_array(a) for a in seq], axis=axis)\n seq = [x.astype(meta.dtype) for x in seq]\n\n ndim = meta.ndim - 1\n if axis < 0:\n axis = ndim + axis + 1\n shape = tuple(\n len(seq)\n if i == axis\n else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])\n for i in range(meta.ndim)\n )\n\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n\n ind = list(range(ndim))\n uc_args = list(concat((x, ind) for x in seq2))\n _, seq2 = unify_chunks(*uc_args)\n\n assert len({a.chunks for a in seq2}) == 1 # same chunks\n chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]\n\n names = [a.name for a in seq2]\n name = \"stack-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack.inputs_stack.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_stack.inputs_stack.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4417, "end_line": 4434, "span_ids": ["stack"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def stack(seq, axis=0, allow_unknown_chunksizes=False):\n # ... other code\n\n inputs = [\n (names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys\n ]\n values = [\n (\n getitem,\n inp,\n (slice(None, None, None),) * axis\n + (None,)\n + (slice(None, None, None),) * (ndim - axis),\n )\n for inp in inputs\n ]\n\n layer = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate3_concatenate3.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate3_concatenate3.return.result", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4947, "end_line": 5014, "span_ids": ["concatenate3"], "tokens": 545}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concatenate3(arrays):\n \"\"\"Recursive np.concatenate\n\n Input should be a nested list of numpy arrays arranged in the order they\n should appear in the array itself. Each array should have the same number\n of dimensions as the desired output and the nesting of the lists.\n\n >>> x = np.array([[1, 2]])\n >>> concatenate3([[x, x, x], [x, x, x]])\n array([[1, 2, 1, 2, 1, 2],\n [1, 2, 1, 2, 1, 2]])\n\n >>> concatenate3([[x, x], [x, x], [x, x]])\n array([[1, 2, 1, 2],\n [1, 2, 1, 2],\n [1, 2, 1, 2]])\n \"\"\"\n # We need this as __array_function__ may not exist on older NumPy versions.\n # And to reduce verbosity.\n NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, \"__array_function__\", None)\n\n arrays = concrete(arrays)\n if not arrays:\n return np.empty(0)\n\n advanced = max(\n core.flatten(arrays, container=(list, tuple)),\n key=lambda x: getattr(x, \"__array_priority__\", 0),\n )\n\n if not all(\n NDARRAY_ARRAY_FUNCTION\n is getattr(type(arr), \"__array_function__\", NDARRAY_ARRAY_FUNCTION)\n for arr in core.flatten(arrays, container=(list, tuple))\n ):\n try:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=tuple(range(x.ndim)))\n except TypeError:\n pass\n\n if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=list(range(x.ndim)))\n\n ndim = ndimlist(arrays)\n if not ndim:\n return arrays\n chunks = chunks_from_arrays(arrays)\n shape = tuple(map(sum, chunks))\n\n def dtype(x):\n try:\n return x.dtype\n except AttributeError:\n return type(x)\n\n result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))\n\n for (idx, arr) in zip(\n slices_from_chunks(chunks), core.flatten(arrays, container=(list, tuple))\n ):\n if hasattr(arr, \"ndim\"):\n while arr.ndim < ndim:\n arr = arr[None, ...]\n result[idx] = arr\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_axes_to_hdf5.with_h5py_File_filename_.store_list_data_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_axes_to_hdf5.with_h5py_File_filename_.store_list_data_values_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5017, "end_line": 5082, "span_ids": ["concatenate_axes", "to_hdf5"], "tokens": 561}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concatenate_axes(arrays, axes):\n \"\"\"Recursively call np.concatenate along axes\"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n\n extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))\n return concatenate3(transposelist(arrays, axes, extradims=extradims))\n\n\ndef to_hdf5(filename, *args, chunks=True, **kwargs):\n \"\"\"Store arrays in HDF5 file\n\n This saves several dask arrays into several datapaths in an HDF5 file.\n It creates the necessary datasets and handles clean file opening/closing.\n\n Parameters\n ----------\n chunks: tuple or ``True``\n Chunk shape, or ``True`` to pass the chunks from the dask array.\n Defaults to ``True``.\n\n Examples\n --------\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP\n\n or\n\n >>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x, chunks=(10,20,30)) # doctest: +SKIP\n\n This can also be used as a method on a single Array\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n if len(args) == 1 and isinstance(args[0], dict):\n data = args[0]\n elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):\n data = {args[0]: args[1]}\n else:\n raise ValueError(\"Please provide {'/data/path': array} dictionary\")\n\n import h5py\n\n with h5py.File(filename, mode=\"a\") as f:\n dsets = [\n f.require_dataset(\n dp,\n shape=x.shape,\n dtype=x.dtype,\n chunks=tuple(c[0] for c in x.chunks) if chunks is True else chunks,\n **kwargs,\n )\n for dp, x in data.items()\n ]\n store(list(data.values()), dsets)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_interleave_none_keyname.return._name_i_tuple_k_for_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_interleave_none_keyname.return._name_i_tuple_k_for_k", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4566, "end_line": 4592, "span_ids": ["interleave_none", "keyname"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def interleave_none(a, b):\n \"\"\"\n\n >>> interleave_none([0, None, 2, None], [1, 3])\n (0, 1, 2, 3)\n \"\"\"\n result = []\n i = j = 0\n n = len(a) + len(b)\n while i + j < n:\n if a[i] is not None:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n i += 1\n j += 1\n return tuple(result)\n\n\ndef keyname(name, i, okey):\n \"\"\"\n\n >>> keyname('x', 3, [None, None, 0, 2])\n ('x', 3, 0, 2)\n \"\"\"\n return (name, i) + tuple(k for k in okey if k is not None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex__vindex.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex__vindex.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4595, "end_line": 4649, "span_ids": ["_vindex"], "tokens": 570}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _vindex(x, *indexes):\n \"\"\"Point wise indexing with broadcasting.\n\n >>> x = np.arange(56).reshape((7, 8))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n\n >>> d = from_array(x, chunks=(3, 4))\n >>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])\n >>> result.compute()\n array([ 0, 9, 48, 7])\n \"\"\"\n indexes = replace_ellipsis(x.ndim, indexes)\n\n nonfancy_indexes = []\n reduced_indexes = []\n for i, ind in enumerate(indexes):\n if isinstance(ind, Number):\n nonfancy_indexes.append(ind)\n elif isinstance(ind, slice):\n nonfancy_indexes.append(ind)\n reduced_indexes.append(slice(None))\n else:\n nonfancy_indexes.append(slice(None))\n reduced_indexes.append(ind)\n\n nonfancy_indexes = tuple(nonfancy_indexes)\n reduced_indexes = tuple(reduced_indexes)\n\n x = x[nonfancy_indexes]\n\n array_indexes = {}\n for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):\n if not isinstance(ind, slice):\n ind = np.array(ind, copy=True)\n if ind.dtype.kind == \"b\":\n raise IndexError(\"vindex does not support indexing with boolean arrays\")\n if ((ind >= size) | (ind < -size)).any():\n raise IndexError(\n \"vindex key has entries out of bounds for \"\n \"indexing along axis %s of size %s: %r\" % (i, size, ind)\n )\n ind %= size\n array_indexes[i] = ind\n\n if array_indexes:\n x = _vindex_array(x, array_indexes)\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_array__vindex_array.return.result_1d_reshape_broadca": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_array__vindex_array.return.result_1d_reshape_broadca", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5171, "end_line": 5265, "span_ids": ["_vindex_array"], "tokens": 851}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _vindex_array(x, dict_indexes):\n \"\"\"Point wise indexing with only NumPy Arrays.\"\"\"\n\n try:\n broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())\n except ValueError as e:\n # note: error message exactly matches numpy\n shapes_str = \" \".join(str(a.shape) for a in dict_indexes.values())\n raise IndexError(\n \"shape mismatch: indexing arrays could not be \"\n \"broadcast together with shapes \" + shapes_str\n ) from e\n broadcast_shape = broadcast_indexes[0].shape\n\n lookup = dict(zip(dict_indexes, broadcast_indexes))\n flat_indexes = [\n lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)\n ]\n flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))\n\n flat_indexes = [\n list(index) if index is not None else index for index in flat_indexes\n ]\n bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]\n bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]\n axis = _get_axis(flat_indexes)\n token = tokenize(x, flat_indexes)\n out_name = \"vindex-merge-\" + token\n\n points = list()\n for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):\n block_idx = [bisect(b, ind) - 1 for b, ind in zip(bounds2, idx)]\n inblock_idx = [\n ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))\n ]\n points.append((i, tuple(block_idx), tuple(inblock_idx)))\n\n chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]\n chunks.insert(0, (len(points),) if points else (0,))\n chunks = tuple(chunks)\n\n if points:\n per_block = groupby(1, points)\n per_block = {k: v for k, v in per_block.items() if v}\n\n other_blocks = list(\n product(\n *[\n list(range(len(c))) if i is None else [None]\n for i, c in zip(flat_indexes, x.chunks)\n ]\n )\n )\n\n full_slices = [slice(None, None) if i is None else None for i in flat_indexes]\n\n name = \"vindex-slice-\" + token\n vindex_merge_name = \"vindex-merge-\" + token\n dsk = {}\n for okey in other_blocks:\n for i, key in enumerate(per_block):\n dsk[keyname(name, i, okey)] = (\n _vindex_transpose,\n (\n _vindex_slice,\n (x.name,) + interleave_none(okey, key),\n interleave_none(\n full_slices, list(zip(*pluck(2, per_block[key])))\n ),\n ),\n axis,\n )\n dsk[keyname(vindex_merge_name, 0, okey)] = (\n _vindex_merge,\n [list(pluck(0, per_block[key])) for key in per_block],\n [keyname(name, i, okey) for i in range(len(per_block))],\n )\n\n result_1d = Array(\n HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),\n out_name,\n chunks,\n x.dtype,\n meta=x._meta,\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n # output has a zero dimension, just create a new zero-shape array with the\n # same dtype\n from .wrap import empty\n\n result_1d = empty(\n tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__get_axis__vindex_transpose.return.block_transpose_axes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__get_axis__vindex_transpose.return.block_transpose_axes_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5068, "end_line": 5097, "span_ids": ["_get_axis", "_vindex_slice", "_vindex_transpose"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_axis(indexes):\n \"\"\"Get axis along which point-wise slicing results lie\n\n This is mostly a hack because I can't figure out NumPy's rule on this and\n can't be bothered to go reading.\n\n >>> _get_axis([[1, 2], None, [1, 2], None])\n 0\n >>> _get_axis([None, [1, 2], [1, 2], None])\n 1\n >>> _get_axis([None, None, [1, 2], [1, 2]])\n 2\n \"\"\"\n ndim = len(indexes)\n indexes = [slice(None, None) if i is None else [0] for i in indexes]\n x = np.empty((2,) * ndim)\n x2 = x[tuple(indexes)]\n return x2.shape.index(1)\n\n\ndef _vindex_slice(block, points):\n \"\"\"Pull out point-wise slices from block\"\"\"\n points = [p if isinstance(p, slice) else list(p) for p in points]\n return block[tuple(points)]\n\n\ndef _vindex_transpose(block, axis):\n \"\"\"Rotate block so that points are on the first dimension\"\"\"\n axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))\n return block.transpose(axes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_merge__vindex_merge.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__vindex_merge__vindex_merge.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5300, "end_line": 5330, "span_ids": ["_vindex_merge"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _vindex_merge(locations, values):\n \"\"\"\n\n >>> locations = [0], [2, 1]\n >>> values = [np.array([[1, 2, 3]]),\n ... np.array([[10, 20, 30], [40, 50, 60]])]\n\n >>> _vindex_merge(locations, values)\n array([[ 1, 2, 3],\n [40, 50, 60],\n [10, 20, 30]])\n \"\"\"\n locations = list(map(list, locations))\n values = list(values)\n\n n = sum(map(len, locations))\n\n shape = list(values[0].shape)\n shape[0] = n\n shape = tuple(shape)\n\n dtype = values[0].dtype\n\n x = np.empty_like(values[0], dtype=dtype, shape=shape)\n\n ind = [slice(None, None) for i in range(x.ndim)]\n for loc, val in zip(locations, values):\n ind[0] = loc\n x[tuple(ind)] = val\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_npy_stack_to_npy_stack.compute_as_if_collection_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_to_npy_stack_to_npy_stack.compute_as_if_collection_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5135, "end_line": 5184, "span_ids": ["to_npy_stack"], "tokens": 457}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_npy_stack(dirname, x, axis=0):\n \"\"\"Write dask array to a stack of .npy files\n\n This partitions the dask.array along one axis and stores each block along\n that axis as a single .npy file in the specified directory\n\n Examples\n --------\n >>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP\n >>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP\n\n The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``\n respectively, as is specified by the chunk size along the zeroth axis::\n\n $ tree data/\n data/\n |-- 0.npy\n |-- 1.npy\n |-- 2.npy\n |-- info\n\n The ``info`` file stores the dtype, chunks, and axis information of the array.\n You can load these stacks with the :func:`dask.array.from_npy_stack` function.\n\n >>> y = da.from_npy_stack('data/') # doctest: +SKIP\n\n See Also\n --------\n from_npy_stack\n \"\"\"\n\n chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))\n xx = x.rechunk(chunks)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n meta = {\"chunks\": chunks, \"dtype\": x.dtype, \"axis\": axis}\n\n with open(os.path.join(dirname, \"info\"), \"wb\") as f:\n pickle.dump(meta, f)\n\n name = \"to-npy-stack-\" + str(uuid.uuid1())\n dsk = {\n (name, i): (np.save, os.path.join(dirname, \"%d.npy\" % i), key)\n for i, key in enumerate(core.flatten(xx.__dask_keys__()))\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])\n compute_as_if_collection(Array, graph, list(dsk))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_full_like__get_like_function_shapes_chunks.return.shape_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_full_like__get_like_function_shapes_chunks.return.shape_chunks", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 184, "end_line": 249, "span_ids": ["_get_like_function_shapes_chunks", "full_like"], "tokens": 491}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def full_like(a, fill_value, order=\"C\", dtype=None, chunks=None, name=None, shape=None):\n \"\"\"\n Return a full array with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of\n the returned array.\n fill_value : scalar\n Fill value.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n chunks : sequence of ints\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n keyword arguments.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the same shape and type as `a`.\n\n See Also\n --------\n zeros_like : Return an array of zeros with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n empty_like : Return an empty array with shape and type of input.\n zeros : Return a new array setting values to zero.\n ones : Return a new array setting values to one.\n empty : Return a new uninitialized array.\n full : Fill a new array.\n \"\"\"\n\n a = asarray(a, name=False)\n shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)\n return full(\n shape,\n fill_value,\n dtype=(dtype or a.dtype),\n order=order,\n chunks=chunks,\n name=name,\n meta=a._meta,\n )\n\n\ndef _get_like_function_shapes_chunks(a, chunks, shape):\n \"\"\"\n Helper function for finding shapes and chunks for *_like()\n array creation functions.\n \"\"\"\n if shape is None:\n shape = a.shape\n if chunks is None:\n chunks = a.chunks\n elif chunks is None:\n chunks = \"auto\"\n return shape, chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linspace_linspace.if_retstep_.else_.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linspace_linspace.if_retstep_.else_.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 253, "end_line": 327, "span_ids": ["linspace"], "tokens": 524}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def linspace(\n start, stop, num=50, endpoint=True, retstep=False, chunks=\"auto\", dtype=None\n):\n \"\"\"\n Return `num` evenly spaced values over the closed interval [`start`,\n `stop`].\n\n Parameters\n ----------\n start : scalar\n The starting value of the sequence.\n stop : scalar\n The last value of the sequence.\n num : int, optional\n Number of samples to include in the returned dask array, including the\n endpoints. Default is 50.\n endpoint : bool, optional\n If True, ``stop`` is the last sample. Otherwise, it is not included.\n Default is True.\n retstep : bool, optional\n If True, return (samples, step), where step is the spacing between\n samples. Default is False.\n chunks : int\n The number of samples on each block. Note that the last block will have\n fewer samples if `num % blocksize != 0`\n dtype : dtype, optional\n The type of the output array.\n\n Returns\n -------\n samples : dask array\n step : float, optional\n Only returned if ``retstep`` is True. Size of spacing between samples.\n\n\n See Also\n --------\n dask.array.arange\n \"\"\"\n num = int(num)\n\n if dtype is None:\n dtype = np.linspace(0, 1, 1).dtype\n\n chunks = normalize_chunks(chunks, (num,), dtype=dtype)\n\n range_ = stop - start\n\n div = (num - 1) if endpoint else num\n if div == 0:\n div = 1\n\n step = float(range_) / div\n\n name = \"linspace-\" + tokenize((start, stop, num, endpoint, chunks, dtype))\n\n dsk = {}\n blockstart = start\n\n for i, bs in enumerate(chunks[0]):\n bs_space = bs - 1 if endpoint else bs\n blockstop = blockstart + (bs_space * step)\n task = (\n partial(chunk.linspace, endpoint=endpoint, dtype=dtype),\n blockstart,\n blockstop,\n bs,\n )\n blockstart = blockstart + (step * bs)\n dsk[(name, i)] = task\n\n if retstep:\n return Array(dsk, name, chunks, dtype=dtype), step\n else:\n return Array(dsk, name, chunks, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_arange_arange.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_arange_arange.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 329, "end_line": 414, "span_ids": ["arange"], "tokens": 662}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arange(*args, chunks=\"auto\", like=None, dtype=None, **kwargs):\n \"\"\"\n Return evenly spaced values from `start` to `stop` with step size `step`.\n\n The values are half-open [start, stop), so including start and excluding\n stop. This is basically the same as python's range function but for dask\n arrays.\n\n When using a non-integer step, such as 0.1, the results will often not be\n consistent. It is better to use linspace for these cases.\n\n Parameters\n ----------\n start : int, optional\n The starting value of the sequence. The default is 0.\n stop : int\n The end of the interval, this value is excluded from the interval.\n step : int, optional\n The spacing between the values. The default is 1 when not specified.\n The last value of the sequence.\n chunks : int\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n Defaults to \"auto\" which will automatically determine chunk sizes.\n dtype : numpy.dtype\n Output dtype. Omit to infer it from start, stop, step\n Defaults to ``None``.\n like : array type or ``None``\n Array to extract meta from. Defaults to ``None``.\n\n Returns\n -------\n samples : dask array\n\n See Also\n --------\n dask.array.linspace\n \"\"\"\n if len(args) == 1:\n start = 0\n stop = args[0]\n step = 1\n elif len(args) == 2:\n start = args[0]\n stop = args[1]\n step = 1\n elif len(args) == 3:\n start, stop, step = args\n else:\n raise TypeError(\n \"\"\"\n arange takes 3 positional arguments: arange([start], stop, [step])\n \"\"\"\n )\n\n num = int(max(np.ceil((stop - start) / step), 0))\n\n meta = meta_from_array(like) if like is not None else None\n\n if dtype is None:\n dtype = np.arange(start, stop, step * num if num else step).dtype\n\n chunks = normalize_chunks(chunks, (num,), dtype=dtype)\n\n if kwargs:\n raise TypeError(\"Unexpected keyword argument(s): %s\" % \",\".join(kwargs.keys()))\n\n name = \"arange-\" + tokenize((start, stop, step, chunks, dtype))\n dsk = {}\n elem_count = 0\n\n for i, bs in enumerate(chunks[0]):\n blockstart = start + (elem_count * step)\n blockstop = start + ((elem_count + bs) * step)\n task = (\n partial(chunk.arange, like=like),\n blockstart,\n blockstop,\n step,\n bs,\n dtype,\n )\n dsk[(name, i)] = task\n elem_count += bs\n\n return Array(dsk, name, chunks, dtype=dtype, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_meshgrid_meshgrid.return.grid": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_meshgrid_meshgrid.return.grid", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 417, "end_line": 452, "span_ids": ["meshgrid"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef meshgrid(*xi, sparse=False, indexing=\"xy\", **kwargs):\n sparse = bool(sparse)\n\n if \"copy\" in kwargs:\n raise NotImplementedError(\"`copy` not supported\")\n\n if kwargs:\n raise TypeError(\"unsupported keyword argument(s) provided\")\n\n if indexing not in (\"ij\", \"xy\"):\n raise ValueError(\"`indexing` must be `'ij'` or `'xy'`\")\n\n xi = [asarray(e) for e in xi]\n xi = [e.flatten() for e in xi]\n\n if indexing == \"xy\" and len(xi) > 1:\n xi[0], xi[1] = xi[1], xi[0]\n\n grid = []\n for i in range(len(xi)):\n s = len(xi) * [None]\n s[i] = slice(None)\n s = tuple(s)\n\n r = xi[i][s]\n\n grid.append(r)\n\n if not sparse:\n grid = broadcast_arrays(*grid)\n\n if indexing == \"xy\" and len(xi) > 1:\n grid[0], grid[1] = grid[1], grid[0]\n\n return grid", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_indices_indices.return.grid": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_indices_indices.return.grid", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 421, "end_line": 472, "span_ids": ["indices"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def indices(dimensions, dtype=int, chunks=\"auto\"):\n \"\"\"\n Implements NumPy's ``indices`` for Dask Arrays.\n\n Generates a grid of indices covering the dimensions provided.\n\n The final array has the shape ``(len(dimensions), *dimensions)``. The\n chunks are used to specify the chunking for axis 1 up to\n ``len(dimensions)``. The 0th axis always has chunks of length 1.\n\n Parameters\n ----------\n dimensions : sequence of ints\n The shape of the index grid.\n dtype : dtype, optional\n Type to use for the array. Default is ``int``.\n chunks : sequence of ints, str\n The size of each block. Must be one of the following forms:\n\n - A blocksize like (500, 1000)\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n\n Note that the last block will have fewer samples if ``len(array) % chunks != 0``.\n\n Returns\n -------\n grid : dask array\n \"\"\"\n dimensions = tuple(dimensions)\n dtype = np.dtype(dtype)\n chunks = normalize_chunks(chunks, shape=dimensions, dtype=dtype)\n\n if len(dimensions) != len(chunks):\n raise ValueError(\"Need same number of chunks as dimensions.\")\n\n xi = []\n for i in range(len(dimensions)):\n xi.append(arange(dimensions[i], dtype=dtype, chunks=(chunks[i],)))\n\n grid = []\n if np.prod(dimensions):\n grid = meshgrid(*xi, indexing=\"ij\")\n\n if grid:\n grid = stack(grid)\n else:\n grid = empty((len(dimensions),) + dimensions, dtype=dtype, chunks=(1,) + chunks)\n\n return grid", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_eye_eye.return.Array_eye_name_eye_shap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_eye_eye.return.Array_eye_name_eye_shap", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 510, "end_line": 568, "span_ids": ["eye"], "tokens": 517}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def eye(N, chunks=\"auto\", M=None, k=0, dtype=float):\n \"\"\"\n Return a 2-D Array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n chunks : int, str\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n M : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n I : Array of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n \"\"\"\n eye = {}\n if M is None:\n M = N\n if dtype is None:\n dtype = float\n\n if not isinstance(chunks, (int, str)):\n raise ValueError(\"chunks must be an int or string\")\n\n vchunks, hchunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype)\n chunks = vchunks[0]\n\n token = tokenize(N, chunks, M, k, dtype)\n name_eye = \"eye-\" + token\n\n for i, vchunk in enumerate(vchunks):\n for j, hchunk in enumerate(hchunks):\n if (j - i - 1) * chunks <= k <= (j - i + 1) * chunks:\n eye[name_eye, i, j] = (\n np.eye,\n vchunk,\n hchunk,\n k - (j - i) * chunks,\n dtype,\n )\n else:\n eye[name_eye, i, j] = (np.zeros, (vchunk, hchunk), dtype)\n return Array(eye, name_eye, shape=(N, M), chunks=(chunks, chunks), dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_repeat_repeat.return.concatenate_out_axis_axi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_repeat_repeat.return.concatenate_out_axis_axi", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 806, "end_line": 853, "span_ids": ["repeat"], "tokens": 401}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef repeat(a, repeats, axis=None):\n if axis is None:\n if a.ndim == 1:\n axis = 0\n else:\n raise NotImplementedError(\"Must supply an integer axis value\")\n\n if not isinstance(repeats, Integral):\n raise NotImplementedError(\"Only integer valued repeats supported\")\n\n if -a.ndim <= axis < 0:\n axis += a.ndim\n elif not 0 <= axis <= a.ndim - 1:\n raise ValueError(\"axis(=%d) out of bounds\" % axis)\n\n if repeats == 0:\n return a[tuple(slice(None) if d != axis else slice(0) for d in range(a.ndim))]\n elif repeats == 1:\n return a\n\n cchunks = cached_cumsum(a.chunks[axis], initial_zero=True)\n slices = []\n for c_start, c_stop in sliding_window(2, cchunks):\n ls = np.linspace(c_start, c_stop, repeats).round(0)\n for ls_start, ls_stop in sliding_window(2, ls):\n if ls_start != ls_stop:\n slices.append(slice(ls_start, ls_stop))\n\n all_slice = slice(None, None, None)\n slices = [\n (all_slice,) * axis + (s,) + (all_slice,) * (a.ndim - axis - 1) for s in slices\n ]\n\n slabs = [a[slc] for slc in slices]\n\n out = []\n for slab in slabs:\n chunks = list(slab.chunks)\n assert len(chunks[axis]) == 1\n chunks[axis] = (chunks[axis][0] * repeats,)\n chunks = tuple(chunks)\n result = slab.map_blocks(\n np.repeat, repeats, axis=axis, chunks=chunks, dtype=slab.dtype\n )\n out.append(result)\n\n return concatenate(out, axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tile_tile.return.empty_shape_shape_out_dt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tile_tile.return.empty_shape_shape_out_dt", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 856, "end_line": 879, "span_ids": ["tile"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef tile(A, reps):\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n if any(i < 0 for i in tup):\n raise ValueError(\"Negative `reps` are not allowed.\")\n c = asarray(A)\n\n if all(tup):\n for nrep in tup[::-1]:\n c = nrep * [c]\n return block(c)\n\n d = len(tup)\n if d < c.ndim:\n tup = (1,) * (c.ndim - d) + tup\n if c.ndim < d:\n shape = (1,) * (d - c.ndim) + c.shape\n else:\n shape = c.shape\n shape_out = tuple(s * t for s, t in zip(shape, tup))\n return empty(shape=shape_out, dtype=c.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_expand_pad_value_expand_pad_value.return.pad_value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_expand_pad_value_expand_pad_value.return.pad_value", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 882, "end_line": 916, "span_ids": ["expand_pad_value"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def expand_pad_value(array, pad_value):\n if isinstance(pad_value, Number):\n pad_value = array.ndim * ((pad_value, pad_value),)\n elif (\n isinstance(pad_value, Sequence)\n and all(isinstance(pw, Number) for pw in pad_value)\n and len(pad_value) == 1\n ):\n pad_value = array.ndim * ((pad_value[0], pad_value[0]),)\n elif (\n isinstance(pad_value, Sequence)\n and len(pad_value) == 2\n and all(isinstance(pw, Number) for pw in pad_value)\n ):\n pad_value = array.ndim * (tuple(pad_value),)\n elif (\n isinstance(pad_value, Sequence)\n and len(pad_value) == array.ndim\n and all(isinstance(pw, Sequence) for pw in pad_value)\n and all((len(pw) == 2) for pw in pad_value)\n and all(all(isinstance(w, Number) for w in pw) for pw in pad_value)\n ):\n pad_value = tuple(tuple(pw) for pw in pad_value)\n elif (\n isinstance(pad_value, Sequence)\n and len(pad_value) == 1\n and isinstance(pad_value[0], Sequence)\n and len(pad_value[0]) == 2\n and all(isinstance(pw, Number) for pw in pad_value[0])\n ):\n pad_value = array.ndim * (tuple(pad_value[0]),)\n else:\n raise TypeError(\"`pad_value` must be composed of integral typed values.\")\n\n return pad_value", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_get_pad_shapes_chunks_get_pad_shapes_chunks.return.pad_shapes_pad_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_get_pad_shapes_chunks_get_pad_shapes_chunks.return.pad_shapes_pad_chunks", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 919, "end_line": 935, "span_ids": ["get_pad_shapes_chunks"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_pad_shapes_chunks(array, pad_width, axes):\n \"\"\"\n Helper function for finding shapes and chunks of end pads.\n \"\"\"\n\n pad_shapes = [list(array.shape), list(array.shape)]\n pad_chunks = [list(array.chunks), list(array.chunks)]\n\n for d in axes:\n for i in range(2):\n pad_shapes[i][d] = pad_width[d][i]\n pad_chunks[i][d] = (pad_width[d][i],)\n\n pad_shapes = [tuple(s) for s in pad_shapes]\n pad_chunks = [tuple(c) for c in pad_chunks]\n\n return pad_shapes, pad_chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linear_ramp_chunk_linear_ramp_chunk.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_linear_ramp_chunk_linear_ramp_chunk.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 855, "end_line": 875, "span_ids": ["linear_ramp_chunk"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def linear_ramp_chunk(start, stop, num, dim, step):\n \"\"\"\n Helper function to find the linear ramp for a chunk.\n \"\"\"\n num1 = num + 1\n\n shape = list(start.shape)\n shape[dim] = num\n shape = tuple(shape)\n\n dtype = np.dtype(start.dtype)\n\n result = np.empty_like(start, shape=shape, dtype=dtype)\n for i in np.ndindex(start.shape):\n j = list(i)\n j[dim] = slice(None)\n j = tuple(j)\n\n result[j] = np.linspace(start[i], stop, num1, dtype=dtype)[1:][::step]\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_edge_pad_edge.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_edge_pad_edge.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 890, "end_line": 955, "span_ids": ["pad_edge"], "tokens": 498}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pad_edge(array, pad_width, mode, **kwargs):\n \"\"\"\n Helper function for padding edges.\n\n Handles the cases where the only the values on the edge are needed.\n \"\"\"\n\n kwargs = {k: expand_pad_value(array, v) for k, v in kwargs.items()}\n\n result = array\n for d in range(array.ndim):\n pad_shapes, pad_chunks = get_pad_shapes_chunks(result, pad_width, (d,))\n pad_arrays = [result, result]\n\n if mode == \"constant\":\n from .utils import asarray_safe\n\n constant_values = kwargs[\"constant_values\"][d]\n constant_values = [\n asarray_safe(c, like=meta_from_array(array), dtype=result.dtype)\n for c in constant_values\n ]\n\n pad_arrays = [\n broadcast_to(v, s, c)\n for v, s, c in zip(constant_values, pad_shapes, pad_chunks)\n ]\n elif mode in [\"edge\", \"linear_ramp\"]:\n pad_slices = [result.ndim * [slice(None)], result.ndim * [slice(None)]]\n pad_slices[0][d] = slice(None, 1, None)\n pad_slices[1][d] = slice(-1, None, None)\n pad_slices = [tuple(sl) for sl in pad_slices]\n\n pad_arrays = [result[sl] for sl in pad_slices]\n\n if mode == \"edge\":\n pad_arrays = [\n broadcast_to(a, s, c)\n for a, s, c in zip(pad_arrays, pad_shapes, pad_chunks)\n ]\n elif mode == \"linear_ramp\":\n end_values = kwargs[\"end_values\"][d]\n\n pad_arrays = [\n a.map_blocks(\n linear_ramp_chunk,\n ev,\n pw,\n chunks=c,\n dtype=result.dtype,\n dim=d,\n step=(2 * i - 1),\n )\n for i, (a, ev, pw, c) in enumerate(\n zip(pad_arrays, end_values, pad_width[d], pad_chunks)\n )\n ]\n elif mode == \"empty\":\n pad_arrays = [\n empty_like(array, shape=s, dtype=array.dtype, chunks=c)\n for s, c in zip(pad_shapes, pad_chunks)\n ]\n\n result = concatenate([pad_arrays[0], result, pad_arrays[1]], axis=d)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_reuse_pad_reuse.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_reuse_pad_reuse.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1025, "end_line": 1079, "span_ids": ["pad_reuse"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pad_reuse(array, pad_width, mode, **kwargs):\n \"\"\"\n Helper function for padding boundaries with values in the array.\n\n Handles the cases where the padding is constructed from values in\n the array. Namely by reflecting them or tiling them to create periodic\n boundary constraints.\n \"\"\"\n\n if mode in {\"reflect\", \"symmetric\"}:\n reflect_type = kwargs.get(\"reflect\", \"even\")\n if reflect_type == \"odd\":\n raise NotImplementedError(\"`pad` does not support `reflect_type` of `odd`.\")\n if reflect_type != \"even\":\n raise ValueError(\n \"unsupported value for reflect_type, must be one of (`even`, `odd`)\"\n )\n\n result = np.empty(array.ndim * (3,), dtype=object)\n for idx in np.ndindex(result.shape):\n select = []\n orient = []\n for i, s, pw in zip(idx, array.shape, pad_width):\n if mode == \"wrap\":\n pw = pw[::-1]\n\n if i < 1:\n if mode == \"reflect\":\n select.append(slice(1, pw[0] + 1, None))\n else:\n select.append(slice(None, pw[0], None))\n elif i > 1:\n if mode == \"reflect\":\n select.append(slice(s - pw[1] - 1, s - 1, None))\n else:\n select.append(slice(s - pw[1], None, None))\n else:\n select.append(slice(None))\n\n if i != 1 and mode in [\"reflect\", \"symmetric\"]:\n orient.append(slice(None, None, -1))\n else:\n orient.append(slice(None))\n\n select = tuple(select)\n orient = tuple(orient)\n\n if mode == \"wrap\":\n idx = tuple(2 - i for i in idx)\n\n result[idx] = array[select][orient]\n\n result = block(result.tolist())\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_stats_pad_stats.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_stats_pad_stats.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1082, "end_line": 1145, "span_ids": ["pad_stats"], "tokens": 471}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pad_stats(array, pad_width, mode, stat_length):\n \"\"\"\n Helper function for padding boundaries with statistics from the array.\n\n In cases where the padding requires computations of statistics from part\n or all of the array, this function helps compute those statistics as\n requested and then adds those statistics onto the boundaries of the array.\n \"\"\"\n\n if mode == \"median\":\n raise NotImplementedError(\"`pad` does not support `mode` of `median`.\")\n\n stat_length = expand_pad_value(array, stat_length)\n\n result = np.empty(array.ndim * (3,), dtype=object)\n for idx in np.ndindex(result.shape):\n axes = []\n select = []\n pad_shape = []\n pad_chunks = []\n for d, (i, s, c, w, l) in enumerate(\n zip(idx, array.shape, array.chunks, pad_width, stat_length)\n ):\n if i < 1:\n axes.append(d)\n select.append(slice(None, l[0], None))\n pad_shape.append(w[0])\n pad_chunks.append(w[0])\n elif i > 1:\n axes.append(d)\n select.append(slice(s - l[1], None, None))\n pad_shape.append(w[1])\n pad_chunks.append(w[1])\n else:\n select.append(slice(None))\n pad_shape.append(s)\n pad_chunks.append(c)\n\n axes = tuple(axes)\n select = tuple(select)\n pad_shape = tuple(pad_shape)\n pad_chunks = tuple(pad_chunks)\n\n result_idx = array[select]\n if axes:\n if mode == \"maximum\":\n result_idx = result_idx.max(axis=axes, keepdims=True)\n elif mode == \"mean\":\n result_idx = result_idx.mean(axis=axes, keepdims=True)\n elif mode == \"minimum\":\n result_idx = result_idx.min(axis=axes, keepdims=True)\n\n result_idx = broadcast_to(result_idx, pad_shape, chunks=pad_chunks)\n\n if mode == \"mean\":\n if np.issubdtype(array.dtype, np.integer):\n result_idx = rint(result_idx)\n result_idx = result_idx.astype(array.dtype)\n\n result[idx] = result_idx\n\n result = block(result.tolist())\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_wrapped_pad_func_pad_udf.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_wrapped_pad_func_pad_udf.return.result", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1148, "end_line": 1187, "span_ids": ["pad_udf", "wrapped_pad_func"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrapped_pad_func(array, pad_func, iaxis_pad_width, iaxis, pad_func_kwargs):\n result = np.empty_like(array)\n for i in np.ndindex(array.shape[:iaxis] + array.shape[iaxis + 1 :]):\n i = i[:iaxis] + (slice(None),) + i[iaxis:]\n result[i] = pad_func(array[i], iaxis_pad_width, iaxis, pad_func_kwargs)\n\n return result\n\n\ndef pad_udf(array, pad_width, mode, **kwargs):\n \"\"\"\n Helper function for padding boundaries with a user defined function.\n\n In cases where the padding requires a custom user defined function be\n applied to the array, this function assists in the prepping and\n application of this function to the Dask Array to construct the desired\n boundaries.\n \"\"\"\n\n result = pad_edge(array, pad_width, \"constant\", constant_values=0)\n\n chunks = result.chunks\n for d in range(result.ndim):\n result = result.rechunk(\n chunks[:d] + (result.shape[d : d + 1],) + chunks[d + 1 :]\n )\n\n result = result.map_blocks(\n wrapped_pad_func,\n name=\"pad\",\n dtype=result.dtype,\n pad_func=mode,\n iaxis_pad_width=pad_width[d],\n iaxis=d,\n pad_func_kwargs=kwargs,\n )\n\n result = result.rechunk(chunks)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_pad_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1111, "end_line": 1160, "span_ids": ["pad"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef pad(array, pad_width, mode=\"constant\", **kwargs):\n array = asarray(array)\n\n pad_width = expand_pad_value(array, pad_width)\n\n if callable(mode):\n return pad_udf(array, pad_width, mode, **kwargs)\n\n # Make sure that no unsupported keywords were passed for the current mode\n allowed_kwargs = {\n \"empty\": [],\n \"edge\": [],\n \"wrap\": [],\n \"constant\": [\"constant_values\"],\n \"linear_ramp\": [\"end_values\"],\n \"maximum\": [\"stat_length\"],\n \"mean\": [\"stat_length\"],\n \"median\": [\"stat_length\"],\n \"minimum\": [\"stat_length\"],\n \"reflect\": [\"reflect_type\"],\n \"symmetric\": [\"reflect_type\"],\n }\n try:\n unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])\n except KeyError as e:\n raise ValueError(f\"mode '{mode}' is not supported\") from e\n if unsupported_kwargs:\n raise ValueError(\n \"unsupported keyword arguments for mode '{}': {}\".format(\n mode, unsupported_kwargs\n )\n )\n\n if mode in {\"maximum\", \"mean\", \"median\", \"minimum\"}:\n stat_length = kwargs.get(\"stat_length\", tuple((n, n) for n in array.shape))\n return pad_stats(array, pad_width, mode, stat_length)\n elif mode == \"constant\":\n kwargs.setdefault(\"constant_values\", 0)\n return pad_edge(array, pad_width, mode, **kwargs)\n elif mode == \"linear_ramp\":\n kwargs.setdefault(\"end_values\", 0)\n return pad_edge(array, pad_width, mode, **kwargs)\n elif mode in {\"edge\", \"empty\"}:\n return pad_edge(array, pad_width, mode)\n elif mode in [\"reflect\", \"symmetric\", \"wrap\"]:\n return pad_reuse(array, pad_width, mode, **kwargs)\n\n assert False, \"unreachable\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_np_chunk_einsum.return.chunk_reshape_chunk_shape": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_np_chunk_einsum.return.chunk_reshape_chunk_shape", "embedding": null, "metadata": {"file_path": "dask/array/einsumfuncs.py", "file_name": "einsumfuncs.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["imports", "chunk_einsum"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nfrom numpy.compat import basestring\n\nfrom ..utils import derived_from\nfrom .core import asarray, blockwise, einsum_lookup\n\neinsum_symbols = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\neinsum_symbols_set = set(einsum_symbols)\n\n\ndef chunk_einsum(*operands, **kwargs):\n subscripts = kwargs.pop(\"subscripts\")\n ncontract_inds = kwargs.pop(\"ncontract_inds\")\n dtype = kwargs.pop(\"kernel_dtype\")\n einsum = einsum_lookup.dispatch(type(operands[0]))\n chunk = einsum(subscripts, *operands, dtype=dtype, **kwargs)\n\n # Avoid concatenate=True in blockwise by adding 1's\n # for the contracted dimensions\n return chunk.reshape(chunk.shape + (1,) * ncontract_inds)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py__This_function_duplicate_parse_einsum_input._Parse_ellipses": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py__This_function_duplicate_parse_einsum_input._Parse_ellipses", "embedding": null, "metadata": {"file_path": "dask/array/einsumfuncs.py", "file_name": "einsumfuncs.py", "file_type": "text/x-python", "category": "implementation", "start_line": 23, "end_line": 109, "span_ids": ["chunk_einsum", "parse_einsum_input"], "tokens": 690}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# This function duplicates numpy's _parse_einsum_input() function\ndef parse_einsum_input(operands):\n \"\"\"\n A reproduction of numpy's _parse_einsum_input()\n which in itself is a reproduction of\n c side einsum parsing in python.\n\n Returns\n -------\n input_strings : str\n Parsed input strings\n output_string : str\n Parsed output string\n operands : list of array_like\n The operands to use in the numpy contraction\n Examples\n --------\n The operand list is simplified to reduce printing:\n >> a = np.random.rand(4, 4)\n >> b = np.random.rand(4, 4, 4)\n >> __parse_einsum_input(('...a,...a->...', a, b))\n ('za,xza', 'xz', [a, b])\n >> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))\n ('za,xza', 'xz', [a, b])\n \"\"\"\n\n if len(operands) == 0:\n raise ValueError(\"No input operands\")\n\n if isinstance(operands[0], basestring):\n subscripts = operands[0].replace(\" \", \"\")\n operands = [asarray(o) for o in operands[1:]]\n\n # Ensure all characters are valid\n for s in subscripts:\n if s in \".,->\":\n continue\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n\n else:\n tmp_operands = list(operands)\n operand_list = []\n subscript_list = []\n for p in range(len(operands) // 2):\n operand_list.append(tmp_operands.pop(0))\n subscript_list.append(tmp_operands.pop(0))\n\n output_list = tmp_operands[-1] if len(tmp_operands) else None\n operands = [asarray(v) for v in operand_list]\n subscripts = \"\"\n last = len(subscript_list) - 1\n for num, sub in enumerate(subscript_list):\n for s in sub:\n if s is Ellipsis:\n subscripts += \"...\"\n elif isinstance(s, int):\n subscripts += einsum_symbols[s]\n else:\n raise TypeError(\n \"For this input type lists must contain \"\n \"either int or Ellipsis\"\n )\n if num != last:\n subscripts += \",\"\n\n if output_list is not None:\n subscripts += \"->\"\n for s in output_list:\n if s is Ellipsis:\n subscripts += \"...\"\n elif isinstance(s, int):\n subscripts += einsum_symbols[s]\n else:\n raise TypeError(\n \"For this input type lists must contain \"\n \"either int or Ellipsis\"\n )\n # Check for proper \"->\"\n if (\"-\" in subscripts) or (\">\" in subscripts):\n invalid = (subscripts.count(\"-\") > 1) or (subscripts.count(\">\") > 1)\n if invalid or (subscripts.count(\"->\") != 1):\n raise ValueError(\"Subscripts can only contain one '->'.\")\n\n # Parse ellipses\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_parse_einsum_input.if_in_subscripts__parse_einsum_input.return._input_subscripts_output": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_parse_einsum_input.if_in_subscripts__parse_einsum_input.return._input_subscripts_output", "embedding": null, "metadata": {"file_path": "dask/array/einsumfuncs.py", "file_name": "einsumfuncs.py", "file_type": "text/x-python", "category": "implementation", "start_line": 110, "end_line": 193, "span_ids": ["parse_einsum_input"], "tokens": 710}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def parse_einsum_input(operands):\n # ... other code\n if \".\" in subscripts:\n used = subscripts.replace(\".\", \"\").replace(\",\", \"\").replace(\"->\", \"\")\n unused = list(einsum_symbols_set - set(used))\n ellipse_inds = \"\".join(unused)\n longest = 0\n\n if \"->\" in subscripts:\n input_tmp, output_sub = subscripts.split(\"->\")\n split_subscripts = input_tmp.split(\",\")\n out_sub = True\n else:\n split_subscripts = subscripts.split(\",\")\n out_sub = False\n\n for num, sub in enumerate(split_subscripts):\n if \".\" in sub:\n if (sub.count(\".\") != 3) or (sub.count(\"...\") != 1):\n raise ValueError(\"Invalid Ellipses.\")\n\n # Take into account numerical values\n if operands[num].shape == ():\n ellipse_count = 0\n else:\n ellipse_count = max(operands[num].ndim, 1)\n ellipse_count -= len(sub) - 3\n\n if ellipse_count > longest:\n longest = ellipse_count\n\n if ellipse_count < 0:\n raise ValueError(\"Ellipses lengths do not match.\")\n elif ellipse_count == 0:\n split_subscripts[num] = sub.replace(\"...\", \"\")\n else:\n rep_inds = ellipse_inds[-ellipse_count:]\n split_subscripts[num] = sub.replace(\"...\", rep_inds)\n\n subscripts = \",\".join(split_subscripts)\n if longest == 0:\n out_ellipse = \"\"\n else:\n out_ellipse = ellipse_inds[-longest:]\n\n if out_sub:\n subscripts += \"->\" + output_sub.replace(\"...\", out_ellipse)\n else:\n # Special care for outputless ellipses\n output_subscript = \"\"\n tmp_subscripts = subscripts.replace(\",\", \"\")\n for s in sorted(set(tmp_subscripts)):\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n if tmp_subscripts.count(s) == 1:\n output_subscript += s\n normal_inds = \"\".join(sorted(set(output_subscript) - set(out_ellipse)))\n\n subscripts += \"->\" + out_ellipse + normal_inds\n\n # Build output string if does not exist\n if \"->\" in subscripts:\n input_subscripts, output_subscript = subscripts.split(\"->\")\n else:\n input_subscripts = subscripts\n # Build output subscripts\n tmp_subscripts = subscripts.replace(\",\", \"\")\n output_subscript = \"\"\n for s in sorted(set(tmp_subscripts)):\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n if tmp_subscripts.count(s) == 1:\n output_subscript += s\n\n # Make sure output subscripts are in the input\n for char in output_subscript:\n if char not in input_subscripts:\n raise ValueError(\"Output character %s did not appear in the input\" % char)\n\n # Make sure number operands is equivalent to the number of terms\n if len(input_subscripts.split(\",\")) != len(operands):\n raise ValueError(\n \"Number of einsum subscripts must be equal to the number of operands.\"\n )\n\n return (input_subscripts, output_subscript, operands)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_einsum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/einsumfuncs.py_einsum_", "embedding": null, "metadata": {"file_path": "dask/array/einsumfuncs.py", "file_name": "einsumfuncs.py", "file_type": "text/x-python", "category": "implementation", "start_line": 196, "end_line": 256, "span_ids": ["einsum"], "tokens": 483}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef einsum(*operands, dtype=None, optimize=False, split_every=None, **kwargs):\n \"\"\"Dask added an additional keyword-only argument ``split_every``.\n\n split_every: int >= 2 or dict(axis: int), optional\n Determines the depth of the recursive aggregation.\n Deafults to ``None`` which would let dask heuristically\n decide a good default.\n \"\"\"\n\n einsum_dtype = dtype\n\n inputs, outputs, ops = parse_einsum_input(operands)\n subscripts = \"->\".join((inputs, outputs))\n\n # Infer the output dtype from operands\n if dtype is None:\n dtype = np.result_type(*[o.dtype for o in ops])\n\n if optimize is not False:\n # Avoid computation of dask arrays within np.einsum_path\n # by passing in small numpy arrays broadcasted\n # up to the right shape\n fake_ops = [np.broadcast_to(o.dtype.type(0), shape=o.shape) for o in ops]\n optimize, _ = np.einsum_path(subscripts, *fake_ops, optimize=optimize)\n\n inputs = [tuple(i) for i in inputs.split(\",\")]\n\n # Set of all indices\n all_inds = {a for i in inputs for a in i}\n\n # Which indices are contracted?\n contract_inds = all_inds - set(outputs)\n ncontract_inds = len(contract_inds)\n\n # Introduce the contracted indices into the blockwise product\n # so that we get numpy arrays, not lists\n result = blockwise(\n chunk_einsum,\n tuple(outputs) + tuple(contract_inds),\n *(a for ap in zip(ops, inputs) for a in ap),\n # blockwise parameters\n adjust_chunks={ind: 1 for ind in contract_inds},\n dtype=dtype,\n # np.einsum parameters\n subscripts=subscripts,\n kernel_dtype=einsum_dtype,\n ncontract_inds=ncontract_inds,\n optimize=optimize,\n **kwargs,\n )\n\n # Now reduce over any extra contraction dimensions\n if ncontract_inds > 0:\n size = len(outputs)\n return result.sum(\n axis=list(range(size, size + ncontract_inds)), split_every=split_every\n )\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_inspect__hfft_out_chunks.return.chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_inspect__hfft_out_chunks.return.chunks", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 79, "span_ids": ["_hfft_out_chunks", "imports", "_rfft_out_chunks", "_irfft_out_chunks", "_fft_out_chunks"], "tokens": 533}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import inspect\nfrom collections.abc import Sequence\n\nimport numpy as np\n\ntry:\n import scipy\n import scipy.fftpack\nexcept ImportError:\n scipy = None\n\nfrom ..utils import derived_from, skip_doctest\nfrom .core import concatenate as _concatenate\nfrom .creation import arange as _arange\n\nchunk_error = (\n \"Dask array only supports taking an FFT along an axis that \\n\"\n \"has a single chunk. An FFT operation was tried on axis %s \\n\"\n \"which has chunks %s. To change the array's chunks use \"\n \"dask.Array.rechunk.\"\n)\n\nfft_preamble = \"\"\"\n Wrapping of %s\n\n The axis along which the FFT is applied must have only one chunk. To change\n the array's chunking use dask.Array.rechunk.\n\n The %s docstring follows below:\n\n \"\"\"\n\n\ndef _fft_out_chunks(a, s, axes):\n \"\"\"For computing the output chunks of [i]fft*\"\"\"\n if s is None:\n return a.chunks\n chunks = list(a.chunks)\n for i, axis in enumerate(axes):\n chunks[axis] = (s[i],)\n return chunks\n\n\ndef _rfft_out_chunks(a, s, axes):\n \"\"\"For computing the output chunks of rfft*\"\"\"\n if s is None:\n s = [a.chunks[axis][0] for axis in axes]\n s = list(s)\n s[-1] = s[-1] // 2 + 1\n chunks = list(a.chunks)\n for i, axis in enumerate(axes):\n chunks[axis] = (s[i],)\n return chunks\n\n\ndef _irfft_out_chunks(a, s, axes):\n \"\"\"For computing the output chunks of irfft*\"\"\"\n if s is None:\n s = [a.chunks[axis][0] for axis in axes]\n s[-1] = 2 * (s[-1] - 1)\n chunks = list(a.chunks)\n for i, axis in enumerate(axes):\n chunks[axis] = (s[i],)\n return chunks\n\n\ndef _hfft_out_chunks(a, s, axes):\n assert len(axes) == 1\n\n axis = axes[0]\n\n if s is None:\n s = [2 * (a.chunks[axis][0] - 1)]\n\n n = s[0]\n\n chunks = list(a.chunks)\n chunks[axis] = (n,)\n return chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__ihfft_out_chunks__out_chunk_fns._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__ihfft_out_chunks__out_chunk_fns._", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 83, "end_line": 111, "span_ids": ["impl:11", "_ihfft_out_chunks"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _ihfft_out_chunks(a, s, axes):\n assert len(axes) == 1\n\n axis = axes[0]\n\n if s is None:\n s = [a.chunks[axis][0]]\n else:\n assert len(s) == 1\n\n n = s[0]\n\n chunks = list(a.chunks)\n if n % 2 == 0:\n m = (n // 2) + 1\n else:\n m = (n + 1) // 2\n chunks[axis] = (m,)\n return chunks\n\n\n_out_chunk_fns = {\n \"fft\": _fft_out_chunks,\n \"ifft\": _fft_out_chunks,\n \"rfft\": _rfft_out_chunks,\n \"irfft\": _irfft_out_chunks,\n \"hfft\": _hfft_out_chunks,\n \"ihfft\": _ihfft_out_chunks,\n}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap_fft_wrap.try_.except_KeyError_.raise_ValueError_Given_u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap_fft_wrap.try_.except_KeyError_.raise_ValueError_Given_u", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 114, "end_line": 155, "span_ids": ["fft_wrap"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fft_wrap(fft_func, kind=None, dtype=None):\n \"\"\"Wrap 1D, 2D, and ND real and complex FFT functions\n\n Takes a function that behaves like ``numpy.fft`` functions and\n a specified kind to match it to that are named after the functions\n in the ``numpy.fft`` API.\n\n Supported kinds include:\n\n * fft\n * fft2\n * fftn\n * ifft\n * ifft2\n * ifftn\n * rfft\n * rfft2\n * rfftn\n * irfft\n * irfft2\n * irfftn\n * hfft\n * ihfft\n\n Examples\n --------\n >>> import dask.array.fft as dff\n >>> parallel_fft = dff.fft_wrap(np.fft.fft)\n >>> parallel_ifft = dff.fft_wrap(np.fft.ifft)\n \"\"\"\n if scipy is not None:\n if fft_func is scipy.fftpack.rfft:\n raise ValueError(\"SciPy's `rfft` doesn't match the NumPy API.\")\n elif fft_func is scipy.fftpack.irfft:\n raise ValueError(\"SciPy's `irfft` doesn't match the NumPy API.\")\n\n if kind is None:\n kind = fft_func.__name__\n try:\n out_chunk_fn = _out_chunk_fns[kind.rstrip(\"2n\")]\n except KeyError:\n raise ValueError(\"Given unknown `kind` %s.\" % kind)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.func_fft_wrap.func.return.a_map_blocks_fft_func_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.func_fft_wrap.func.return.a_map_blocks_fft_func_a", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 156, "end_line": 191, "span_ids": ["fft_wrap"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fft_wrap(fft_func, kind=None, dtype=None):\n # ... other code\n\n def func(a, s=None, axes=None):\n if axes is None:\n if kind.endswith(\"2\"):\n axes = (-2, -1)\n elif kind.endswith(\"n\"):\n if s is None:\n axes = tuple(range(a.ndim))\n else:\n axes = tuple(range(len(s)))\n else:\n axes = (-1,)\n else:\n if len(set(axes)) < len(axes):\n raise ValueError(\"Duplicate axes not allowed.\")\n\n _dtype = dtype\n if _dtype is None:\n sample = np.ones(a.ndim * (8,), dtype=a.dtype)\n try:\n _dtype = fft_func(sample, axes=axes).dtype\n except TypeError:\n _dtype = fft_func(sample).dtype\n\n for each_axis in axes:\n if len(a.chunks[each_axis]) != 1:\n raise ValueError(chunk_error % (each_axis, a.chunks[each_axis]))\n\n chunks = out_chunk_fn(a, s, axes)\n\n args = (s, axes)\n if kind.endswith(\"fft\"):\n axis = None if axes is None else axes[0]\n n = None if s is None else s[0]\n args = (n, axis)\n\n return a.map_blocks(fft_func, *args, dtype=_dtype, chunks=chunks)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.if_kind_endswith_fft__fft_wrap.return.func": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_wrap.if_kind_endswith_fft__fft_wrap.return.func", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 193, "end_line": 215, "span_ids": ["fft_wrap"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fft_wrap(fft_func, kind=None, dtype=None):\n # ... other code\n\n if kind.endswith(\"fft\"):\n _func = func\n\n def func(a, n=None, axis=None): # type: ignore\n s = None\n if n is not None:\n s = (n,)\n\n axes = None\n if axis is not None:\n axes = (axis,)\n\n return _func(a, s, axes)\n\n func_mod = inspect.getmodule(fft_func)\n func_name = fft_func.__name__\n func_fullname = func_mod.__name__ + \".\" + func_name\n if fft_func.__doc__ is not None:\n func.__doc__ = fft_preamble % (2 * (func_fullname,))\n func.__doc__ += fft_func.__doc__\n func.__doc__ = skip_doctest(func.__doc__)\n func.__name__ = func_name\n return func", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_rfftfreq.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py_fft_rfftfreq.return.r", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 217, "end_line": 258, "span_ids": ["fftfreq", "impl:13", "rfftfreq", "_fftfreq_block"], "tokens": 331}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "fft = fft_wrap(np.fft.fft)\nfft2 = fft_wrap(np.fft.fft2)\nfftn = fft_wrap(np.fft.fftn)\nifft = fft_wrap(np.fft.ifft)\nifft2 = fft_wrap(np.fft.ifft2)\nifftn = fft_wrap(np.fft.ifftn)\nrfft = fft_wrap(np.fft.rfft)\nrfft2 = fft_wrap(np.fft.rfft2)\nrfftn = fft_wrap(np.fft.rfftn)\nirfft = fft_wrap(np.fft.irfft)\nirfft2 = fft_wrap(np.fft.irfft2)\nirfftn = fft_wrap(np.fft.irfftn)\nhfft = fft_wrap(np.fft.hfft)\nihfft = fft_wrap(np.fft.ihfft)\n\n\ndef _fftfreq_block(i, n, d):\n r = i.copy()\n r[i >= (n + 1) // 2] -= n\n r /= n * d\n return r\n\n\n@derived_from(np.fft)\ndef fftfreq(n, d=1.0, chunks=\"auto\"):\n n = int(n)\n d = float(d)\n\n r = _arange(n, dtype=float, chunks=chunks)\n\n return r.map_blocks(_fftfreq_block, dtype=float, n=n, d=d)\n\n\n@derived_from(np.fft)\ndef rfftfreq(n, d=1.0, chunks=\"auto\"):\n n = int(n)\n d = float(d)\n\n r = _arange(n // 2 + 1, dtype=float, chunks=chunks)\n r /= n * d\n\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__fftshift_helper_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/fft.py__fftshift_helper_", "embedding": null, "metadata": {"file_path": "dask/array/fft.py", "file_name": "fft.py", "file_type": "text/x-python", "category": "implementation", "start_line": 261, "end_line": 296, "span_ids": ["ifftshift", "_fftshift_helper", "fftshift"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _fftshift_helper(x, axes=None, inverse=False):\n if axes is None:\n axes = list(range(x.ndim))\n elif not isinstance(axes, Sequence):\n axes = (axes,)\n\n y = x\n for i in axes:\n n = y.shape[i]\n n_2 = (n + int(inverse is False)) // 2\n\n l = y.ndim * [slice(None)]\n l[i] = slice(None, n_2)\n l = tuple(l)\n\n r = y.ndim * [slice(None)]\n r[i] = slice(n_2, None)\n r = tuple(r)\n\n y = _concatenate([y[r], y[l]], axis=i)\n\n if len(x.chunks[i]) == 1:\n y = y.rechunk({i: x.chunks[i]})\n\n return y\n\n\n@derived_from(np.fft)\ndef fftshift(x, axes=None):\n return _fftshift_helper(x, axes=axes, inverse=False)\n\n\n@derived_from(np.fft)\ndef ifftshift(x, axes=None):\n return _fftshift_helper(x, axes=axes, inverse=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__parse_gufunc_signature__parse_gufunc_signature.return.ins_outs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__parse_gufunc_signature__parse_gufunc_signature.return.ins_outs", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 26, "end_line": 54, "span_ids": ["_parse_gufunc_signature"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _parse_gufunc_signature(signature):\n \"\"\"\n Parse string signatures for a generalized universal function.\n\n Arguments\n ---------\n signature : string\n Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)``\n for ``np.matmul``.\n\n Returns\n -------\n Tuple of input and output core dimensions parsed from the signature, each\n of the form List[Tuple[str, ...]], except for one output. For one output\n core dimension is not a list, but of the form Tuple[str, ...]\n \"\"\"\n signature = re.sub(r\"\\s+\", \"\", signature)\n if not re.match(_SIGNATURE, signature):\n raise ValueError(f\"Not a valid gufunc signature: {signature}\")\n in_txt, out_txt = signature.split(\"->\")\n ins = [\n tuple(re.findall(_DIMENSION_NAME, arg)) for arg in re.findall(_ARGUMENT, in_txt)\n ]\n outs = [\n tuple(re.findall(_DIMENSION_NAME, arg))\n for arg in re.findall(_ARGUMENT, out_txt)\n ]\n outs = outs[0] if ((len(outs) == 1) and (out_txt[-1] != \",\")) else outs\n return ins, outs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes__validate_normalize_axes._Assert_we_have_as_many_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes__validate_normalize_axes._Assert_we_have_as_many_", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 58, "end_line": 143, "span_ids": ["_validate_normalize_axes"], "tokens": 784}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _validate_normalize_axes(axes, axis, keepdims, input_coredimss, output_coredimss):\n \"\"\"\n Validates logic of `axes`/`axis`/`keepdims` arguments and normalize them.\n Refer to [1]_ for details\n\n Arguments\n ---------\n axes: List of tuples\n axis: int\n keepdims: bool\n input_coredimss: List of Tuple of dims\n output_coredimss: List of Tuple of dims\n\n Returns\n -------\n input_axes: List of tuple of int\n output_axes: List of tuple of int\n\n References\n ----------\n .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html#optional-keyword-arguments\n \"\"\"\n nin = len(input_coredimss)\n nout = 1 if not isinstance(output_coredimss, list) else len(output_coredimss)\n\n if axes is not None and axis is not None:\n raise ValueError(\n \"Only one of `axis` or `axes` keyword arguments should be given\"\n )\n if axes and not isinstance(axes, list):\n raise ValueError(\"`axes` has to be of type list\")\n\n output_coredimss = output_coredimss if nout > 1 else [output_coredimss]\n filtered_core_dims = list(filter(len, input_coredimss))\n nr_outputs_with_coredims = len([True for x in output_coredimss if len(x) > 0])\n\n if keepdims:\n if nr_outputs_with_coredims > 0:\n raise ValueError(\"`keepdims` can only be used for scalar outputs\")\n output_coredimss = len(output_coredimss) * [filtered_core_dims[0]]\n\n core_dims = input_coredimss + output_coredimss\n if axis is not None:\n if not isinstance(axis, int):\n raise ValueError(\"`axis` argument has to be an integer value\")\n if filtered_core_dims:\n cd0 = filtered_core_dims[0]\n if len(cd0) != 1:\n raise ValueError(\n \"`axis` can be used only, if one core dimension is present\"\n )\n for cd in filtered_core_dims:\n if cd0 != cd:\n raise ValueError(\n \"To use `axis`, all core dimensions have to be equal\"\n )\n\n # Expand dafaults or axis\n if axes is None:\n if axis is not None:\n axes = [(axis,) if cd else tuple() for cd in core_dims]\n else:\n axes = [tuple(range(-len(icd), 0)) for icd in core_dims]\n elif not isinstance(axes, list):\n raise ValueError(\"`axes` argument has to be a list\")\n axes = [(a,) if isinstance(a, int) else a for a in axes]\n\n if (\n (nr_outputs_with_coredims == 0)\n and (nin != len(axes))\n and (nin + nout != len(axes))\n ) or ((nr_outputs_with_coredims > 0) and (nin + nout != len(axes))):\n raise ValueError(\n \"The number of `axes` entries is not equal the number of input and output arguments\"\n )\n\n # Treat outputs\n output_axes = axes[nin:]\n output_axes = (\n output_axes\n if output_axes\n else [tuple(range(-len(ocd), 0)) for ocd in output_coredimss]\n )\n input_axes = axes[:nin]\n\n # Assert we have as many axes as output core dimensions\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes.for_idx_iax_icd_in_en__validate_normalize_axes.return.input_axes_output_axes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py__validate_normalize_axes.for_idx_iax_icd_in_en__validate_normalize_axes.return.input_axes_output_axes", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 144, "end_line": 172, "span_ids": ["_validate_normalize_axes"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _validate_normalize_axes(axes, axis, keepdims, input_coredimss, output_coredimss):\n # ... other code\n for idx, (iax, icd) in enumerate(zip(input_axes, input_coredimss)):\n if len(iax) != len(icd):\n raise ValueError(\n \"The number of `axes` entries for argument #{} is not equal \"\n \"the number of respective input core dimensions in signature\".format(\n idx\n )\n )\n if not keepdims:\n for idx, (oax, ocd) in enumerate(zip(output_axes, output_coredimss)):\n if len(oax) != len(ocd):\n raise ValueError(\n \"The number of `axes` entries for argument #{} is not equal \"\n \"the number of respective output core dimensions in signature\".format(\n idx\n )\n )\n else:\n if input_coredimss:\n icd0 = input_coredimss[0]\n for icd in input_coredimss:\n if icd0 != icd:\n raise ValueError(\n \"To use `keepdims`, all core dimensions have to be equal\"\n )\n iax0 = input_axes[0]\n output_axes = [iax0 for _ in output_coredimss]\n\n return input_axes, output_axes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc_apply_gufunc._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc_apply_gufunc._", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 174, "end_line": 289, "span_ids": ["apply_gufunc"], "tokens": 1304}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_gufunc(\n func,\n signature,\n *args,\n axes=None,\n axis=None,\n keepdims=False,\n output_dtypes=None,\n output_sizes=None,\n vectorize=None,\n allow_rechunk=False,\n meta=None,\n **kwargs,\n):\n \"\"\"\n Apply a generalized ufunc or similar python function to arrays.\n\n ``signature`` determines if the function consumes or produces core\n dimensions. The remaining dimensions in given input arrays (``*args``)\n are considered loop dimensions and are required to broadcast\n naturally against each other.\n\n In other terms, this function is like ``np.vectorize``, but for\n the blocks of dask arrays. If the function itself shall also\n be vectorized use ``vectorize=True`` for convenience.\n\n Parameters\n ----------\n func : callable\n Function to call like ``func(*args, **kwargs)`` on input arrays\n (``*args``) that returns an array or tuple of arrays. If multiple\n arguments with non-matching dimensions are supplied, this function is\n expected to vectorize (broadcast) over axes of positional arguments in\n the style of NumPy universal functions [1]_ (if this is not the case,\n set ``vectorize=True``). If this function returns multiple outputs,\n ``output_core_dims`` has to be set as well.\n signature: string\n Specifies what core dimensions are consumed and produced by ``func``.\n According to the specification of numpy.gufunc signature [2]_\n *args : numeric\n Input arrays or scalars to the callable function.\n axes: List of tuples, optional, keyword only\n A list of tuples with indices of axes a generalized ufunc should operate on.\n For instance, for a signature of ``\"(i,j),(j,k)->(i,k)\"`` appropriate for\n matrix multiplication, the base elements are two-dimensional matrices\n and these are taken to be stored in the two last axes of each argument. The\n corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``.\n For simplicity, for generalized ufuncs that operate on 1-dimensional arrays\n (vectors), a single integer is accepted instead of a single-element tuple,\n and for generalized ufuncs for which all outputs are scalars, the output\n tuples can be omitted.\n axis: int, optional, keyword only\n A single axis over which a generalized ufunc should operate. This is a short-cut\n for ufuncs that operate over a single, shared core dimension, equivalent to passing\n in axes with entries of (axis,) for each single-core-dimension argument and ``()`` for\n all others. For instance, for a signature ``\"(i),(i)->()\"``, it is equivalent to passing\n in ``axes=[(axis,), (axis,), ()]``.\n keepdims: bool, optional, keyword only\n If this is set to True, axes which are reduced over will be left in the result as\n a dimension with size one, so that the result will broadcast correctly against the\n inputs. This option can only be used for generalized ufuncs that operate on inputs\n that all have the same number of core dimensions and with outputs that have no core\n dimensions , i.e., with signatures like ``\"(i),(i)->()\"`` or ``\"(m,m)->()\"``.\n If used, the location of the dimensions in the output can be controlled with axes\n and axis.\n output_dtypes : Optional, dtype or list of dtypes, keyword only\n Valid numpy dtype specification or list thereof.\n If not given, a call of ``func`` with a small set of data\n is performed in order to try to automatically determine the\n output dtypes.\n output_sizes : dict, optional, keyword only\n Optional mapping from dimension names to sizes for outputs. Only used if\n new core dimensions (not found on inputs) appear on outputs.\n vectorize: bool, keyword only\n If set to ``True``, ``np.vectorize`` is applied to ``func`` for\n convenience. Defaults to ``False``.\n allow_rechunk: Optional, bool, keyword only\n Allows rechunking, otherwise chunk sizes need to match and core\n dimensions are to consist only of one chunk.\n Warning: enabling this can increase memory usage significantly.\n Defaults to ``False``.\n meta: Optional, tuple, keyword only\n tuple of empty ndarrays describing the shape and dtype of the output of the gufunc.\n Defaults to ``None``.\n **kwargs : dict\n Extra keyword arguments to pass to `func`\n\n Returns\n -------\n Single dask.array.Array or tuple of dask.array.Array\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> def stats(x):\n ... return np.mean(x, axis=-1), np.std(x, axis=-1)\n >>> a = da.random.normal(size=(10,20,30), chunks=(5, 10, 30))\n >>> mean, std = da.apply_gufunc(stats, \"(i)->(),()\", a)\n >>> mean.compute().shape\n (10, 20)\n\n\n >>> def outer_product(x, y):\n ... return np.einsum(\"i,j->ij\", x, y)\n >>> a = da.random.normal(size=( 20,30), chunks=(10, 30))\n >>> b = da.random.normal(size=(10, 1,40), chunks=(5, 1, 40))\n >>> c = da.apply_gufunc(outer_product, \"(i),(j)->(i,j)\", a, b, vectorize=True)\n >>> c.compute().shape\n (10, 20, 30, 40)\n\n References\n ----------\n .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html\n .. [2] https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc_gufunc._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc_gufunc._", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 505, "end_line": 606, "span_ids": ["gufunc"], "tokens": 1284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class gufunc:\n \"\"\"\n Binds `pyfunc` into ``dask.array.apply_gufunc`` when called.\n\n Parameters\n ----------\n pyfunc : callable\n Function to call like ``func(*args, **kwargs)`` on input arrays\n (``*args``) that returns an array or tuple of arrays. If multiple\n arguments with non-matching dimensions are supplied, this function is\n expected to vectorize (broadcast) over axes of positional arguments in\n the style of NumPy universal functions [1]_ (if this is not the case,\n set ``vectorize=True``). If this function returns multiple outputs,\n ``output_core_dims`` has to be set as well.\n signature : String, keyword only\n Specifies what core dimensions are consumed and produced by ``func``.\n According to the specification of numpy.gufunc signature [2]_\n axes: List of tuples, optional, keyword only\n A list of tuples with indices of axes a generalized ufunc should operate on.\n For instance, for a signature of ``\"(i,j),(j,k)->(i,k)\"`` appropriate for\n matrix multiplication, the base elements are two-dimensional matrices\n and these are taken to be stored in the two last axes of each argument. The\n corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``.\n For simplicity, for generalized ufuncs that operate on 1-dimensional arrays\n (vectors), a single integer is accepted instead of a single-element tuple,\n and for generalized ufuncs for which all outputs are scalars, the output\n tuples can be omitted.\n axis: int, optional, keyword only\n A single axis over which a generalized ufunc should operate. This is a short-cut\n for ufuncs that operate over a single, shared core dimension, equivalent to passing\n in axes with entries of (axis,) for each single-core-dimension argument and ``()`` for\n all others. For instance, for a signature ``\"(i),(i)->()\"``, it is equivalent to passing\n in ``axes=[(axis,), (axis,), ()]``.\n keepdims: bool, optional, keyword only\n If this is set to True, axes which are reduced over will be left in the result as\n a dimension with size one, so that the result will broadcast correctly against the\n inputs. This option can only be used for generalized ufuncs that operate on inputs\n that all have the same number of core dimensions and with outputs that have no core\n dimensions , i.e., with signatures like ``\"(i),(i)->()\"`` or ``\"(m,m)->()\"``.\n If used, the location of the dimensions in the output can be controlled with axes\n and axis.\n output_dtypes : Optional, dtype or list of dtypes, keyword only\n Valid numpy dtype specification or list thereof.\n If not given, a call of ``func`` with a small set of data\n is performed in order to try to automatically determine the\n output dtypes.\n output_sizes : dict, optional, keyword only\n Optional mapping from dimension names to sizes for outputs. Only used if\n new core dimensions (not found on inputs) appear on outputs.\n vectorize: bool, keyword only\n If set to ``True``, ``np.vectorize`` is applied to ``func`` for\n convenience. Defaults to ``False``.\n allow_rechunk: Optional, bool, keyword only\n Allows rechunking, otherwise chunk sizes need to match and core\n dimensions are to consist only of one chunk.\n Warning: enabling this can increase memory usage significantly.\n Defaults to ``False``.\n meta: Optional, tuple, keyword only\n tuple of empty ndarrays describing the shape and dtype of the output of the gufunc.\n Defaults to ``None``.\n\n Returns\n -------\n Wrapped function\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> a = da.random.normal(size=(10,20,30), chunks=(5, 10, 30))\n >>> def stats(x):\n ... return np.mean(x, axis=-1), np.std(x, axis=-1)\n >>> gustats = da.gufunc(stats, signature=\"(i)->(),()\", output_dtypes=(float, float))\n >>> mean, std = gustats(a)\n >>> mean.compute().shape\n (10, 20)\n\n >>> a = da.random.normal(size=( 20,30), chunks=(10, 30))\n >>> b = da.random.normal(size=(10, 1,40), chunks=(5, 1, 40))\n >>> def outer_product(x, y):\n ... return np.einsum(\"i,j->ij\", x, y)\n >>> guouter_product = da.gufunc(outer_product, signature=\"(i),(j)->(i,j)\", output_dtypes=float, vectorize=True)\n >>> c = guouter_product(a, b)\n >>> c.compute().shape\n (10, 20, 30, 40)\n\n >>> a = da.ones((1, 5, 10), chunks=(-1, -1, -1))\n >>> def stats(x):\n ... return np.atleast_1d(x.mean()), np.atleast_1d(x.max())\n >>> meta = (np.array((), dtype=np.float64), np.array((), dtype=np.float64))\n >>> gustats = da.gufunc(stats, signature=\"(i,j)->(),()\", meta=meta)\n >>> result = gustats(a)\n >>> result[0].compute().shape\n (1,)\n >>> result[1].compute().shape\n (1,)\n\n References\n ----------\n .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html\n .. [2] https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc.__init___gufunc.__call__.return.apply_gufunc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_gufunc.__init___gufunc.__call__.return.apply_gufunc_", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 628, "end_line": 687, "span_ids": ["gufunc.__call__", "gufunc.__init__"], "tokens": 371}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class gufunc:\n\n def __init__(\n self,\n pyfunc,\n *,\n signature=None,\n vectorize=False,\n axes=None,\n axis=None,\n keepdims=False,\n output_sizes=None,\n output_dtypes=None,\n allow_rechunk=False,\n meta=None,\n ):\n self.pyfunc = pyfunc\n self.signature = signature\n self.vectorize = vectorize\n self.axes = axes\n self.axis = axis\n self.keepdims = keepdims\n self.output_sizes = output_sizes\n self.output_dtypes = output_dtypes\n self.allow_rechunk = allow_rechunk\n self.meta = meta\n\n self.__doc__ = \"\"\"\n Bound ``dask.array.gufunc``\n func: ``{func}``\n signature: ``'{signature}'``\n\n Parameters\n ----------\n *args : numpy/dask arrays or scalars\n Arrays to which to apply to ``func``. Core dimensions as specified in\n ``signature`` need to come last.\n **kwargs : dict\n Extra keyword arguments to pass to ``func``\n\n Returns\n -------\n Single dask.array.Array or tuple of dask.array.Array\n \"\"\".format(\n func=str(self.pyfunc), signature=self.signature\n )\n\n def __call__(self, *args, allow_rechunk=False, **kwargs):\n return apply_gufunc(\n self.pyfunc,\n self.signature,\n *args,\n vectorize=self.vectorize,\n axes=self.axes,\n axis=self.axis,\n keepdims=self.keepdims,\n output_sizes=self.output_sizes,\n output_dtypes=self.output_dtypes,\n allow_rechunk=self.allow_rechunk or allow_rechunk,\n meta=self.meta,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc_as_gufunc._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc_as_gufunc._", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 644, "end_line": 726, "span_ids": ["as_gufunc"], "tokens": 1031}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def as_gufunc(signature=None, **kwargs):\n \"\"\"\n Decorator for ``dask.array.gufunc``.\n\n Parameters\n ----------\n signature : String\n Specifies what core dimensions are consumed and produced by ``func``.\n According to the specification of numpy.gufunc signature [2]_\n axes: List of tuples, optional, keyword only\n A list of tuples with indices of axes a generalized ufunc should operate on.\n For instance, for a signature of ``\"(i,j),(j,k)->(i,k)\"`` appropriate for\n matrix multiplication, the base elements are two-dimensional matrices\n and these are taken to be stored in the two last axes of each argument. The\n corresponding axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``.\n For simplicity, for generalized ufuncs that operate on 1-dimensional arrays\n (vectors), a single integer is accepted instead of a single-element tuple,\n and for generalized ufuncs for which all outputs are scalars, the output\n tuples can be omitted.\n axis: int, optional, keyword only\n A single axis over which a generalized ufunc should operate. This is a short-cut\n for ufuncs that operate over a single, shared core dimension, equivalent to passing\n in axes with entries of (axis,) for each single-core-dimension argument and ``()`` for\n all others. For instance, for a signature ``\"(i),(i)->()\"``, it is equivalent to passing\n in ``axes=[(axis,), (axis,), ()]``.\n keepdims: bool, optional, keyword only\n If this is set to True, axes which are reduced over will be left in the result as\n a dimension with size one, so that the result will broadcast correctly against the\n inputs. This option can only be used for generalized ufuncs that operate on inputs\n that all have the same number of core dimensions and with outputs that have no core\n dimensions , i.e., with signatures like ``\"(i),(i)->()\"`` or ``\"(m,m)->()\"``.\n If used, the location of the dimensions in the output can be controlled with axes\n and axis.\n output_dtypes : Optional, dtype or list of dtypes, keyword only\n Valid numpy dtype specification or list thereof.\n If not given, a call of ``func`` with a small set of data\n is performed in order to try to automatically determine the\n output dtypes.\n output_sizes : dict, optional, keyword only\n Optional mapping from dimension names to sizes for outputs. Only used if\n new core dimensions (not found on inputs) appear on outputs.\n vectorize: bool, keyword only\n If set to ``True``, ``np.vectorize`` is applied to ``func`` for\n convenience. Defaults to ``False``.\n allow_rechunk: Optional, bool, keyword only\n Allows rechunking, otherwise chunk sizes need to match and core\n dimensions are to consist only of one chunk.\n Warning: enabling this can increase memory usage significantly.\n Defaults to ``False``.\n meta: Optional, tuple, keyword only\n tuple of empty ndarrays describing the shape and dtype of the output of the gufunc.\n Defaults to ``None``.\n\n Returns\n -------\n Decorator for `pyfunc` that itself returns a `gufunc`.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> a = da.random.normal(size=(10,20,30), chunks=(5, 10, 30))\n >>> @da.as_gufunc(\"(i)->(),()\", output_dtypes=(float, float))\n ... def stats(x):\n ... return np.mean(x, axis=-1), np.std(x, axis=-1)\n >>> mean, std = stats(a)\n >>> mean.compute().shape\n (10, 20)\n\n >>> a = da.random.normal(size=( 20,30), chunks=(10, 30))\n >>> b = da.random.normal(size=(10, 1,40), chunks=(5, 1, 40))\n >>> @da.as_gufunc(\"(i),(j)->(i,j)\", output_dtypes=float, vectorize=True)\n ... def outer_product(x, y):\n ... return np.einsum(\"i,j->ij\", x, y)\n >>> c = outer_product(a, b)\n >>> c.compute().shape\n (10, 20, 30, 40)\n\n References\n ----------\n .. [1] https://docs.scipy.org/doc/numpy/reference/ufuncs.html\n .. [2] https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc._allowedkeys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_as_gufunc._allowedkeys_", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 727, "end_line": 759, "span_ids": ["as_gufunc"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def as_gufunc(signature=None, **kwargs):\n _allowedkeys = {\n \"vectorize\",\n \"axes\",\n \"axis\",\n \"keepdims\",\n \"output_sizes\",\n \"output_dtypes\",\n \"allow_rechunk\",\n \"meta\",\n }\n if kwargs.keys() - _allowedkeys:\n raise TypeError(\"Unsupported keyword argument(s) provided\")\n\n def _as_gufunc(pyfunc):\n return gufunc(pyfunc, signature=signature, **kwargs)\n\n _as_gufunc.__doc__ = \"\"\"\n Decorator to make ``dask.array.gufunc``\n signature: ``'{signature}'``\n\n Parameters\n ----------\n pyfunc : callable\n Function matching signature ``'{signature}'``.\n\n Returns\n -------\n ``dask.array.gufunc``\n \"\"\".format(\n signature=signature\n )\n return _as_gufunc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_operator__nanmin.return.k_1_if_np_isnan_k_0_else": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_operator__nanmin.return.k_1_if_np_isnan_k_0_else", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 34, "span_ids": ["_nanmin", "imports", "_cumsum_part", "_cumsum_blocks"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\nfrom functools import partial\nfrom numbers import Number\n\nimport numpy as np\nimport tlz as toolz\n\nfrom ..base import tokenize, wait\nfrom ..blockwise import blockwise\nfrom ..delayed import delayed\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import apply, derived_from\nfrom .core import Array, concatenate, dotmany, from_delayed\nfrom .creation import eye\nfrom .random import RandomState\nfrom .utils import array_safe, meta_from_array, solve_triangular_safe, svd_flip\n\n\ndef _cumsum_blocks(it):\n total = 0\n for x in it:\n total_previous = total\n total += x\n yield (total_previous, total)\n\n\ndef _cumsum_part(last, new):\n return (last[1], last[1] + new)\n\n\ndef _nanmin(m, n):\n k_0 = min([m, n])\n k_1 = m if np.isnan(n) else n\n return k_1 if np.isnan(k_0) else k_0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__wrapped_qr__wrapped_qr.if_a_shape_0_0_.else_.return.np_linalg_qr_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__wrapped_qr__wrapped_qr.if_a_shape_0_0_.else_.return.np_linalg_qr_a_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 37, "end_line": 49, "span_ids": ["_wrapped_qr"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _wrapped_qr(a):\n \"\"\"\n A wrapper for np.linalg.qr that handles arrays with 0 rows\n\n Notes: Created for tsqr so as to manage cases with uncertain\n array dimensions. In particular, the case where arrays have\n (uncertain) chunks with 0 rows.\n \"\"\"\n # workaround may be removed when numpy stops rejecting edge cases\n if a.shape[0] == 0:\n return np.zeros_like(a, shape=(0, 0)), np.zeros_like(a, shape=(0, a.shape[1]))\n else:\n return np.linalg.qr(a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr_tsqr.layers.data___dask_graph___lay": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr_tsqr.layers.data___dask_graph___lay", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 51, "end_line": 129, "span_ids": ["tsqr"], "tokens": 756}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n \"\"\"Direct Tall-and-Skinny QR algorithm\n\n As presented in:\n\n A. Benson, D. Gleich, and J. Demmel.\n Direct QR factorizations for tall-and-skinny matrices in\n MapReduce architectures.\n IEEE International Conference on Big Data, 2013.\n https://arxiv.org/abs/1301.1071\n\n This algorithm is used to compute both the QR decomposition and the\n Singular Value Decomposition. It requires that the input array have a\n single column of blocks, each of which fit in memory.\n\n Parameters\n ----------\n data: Array\n compute_svd: bool\n Whether to compute the SVD rather than the QR decomposition\n _max_vchunk_size: Integer\n Used internally in recursion to set the maximum row dimension\n of chunks in subsequent recursive calls.\n\n Notes\n -----\n With ``k`` blocks of size ``(m, n)``, this algorithm has memory use that\n scales as ``k * n * n``.\n\n The implementation here is the recursive variant due to the ultimate\n need for one \"single core\" QR decomposition. In the non-recursive version\n of the algorithm, given ``k`` blocks, after ``k`` ``m * n`` QR\n decompositions, there will be a \"single core\" QR decomposition that will\n have to work with a ``(k * n, n)`` matrix.\n\n Here, recursion is applied as necessary to ensure that ``k * n`` is not\n larger than ``m`` (if ``m / n >= 2``). In particular, this is done\n to ensure that single core computations do not have to work on blocks\n larger than ``(m, n)``.\n\n Where blocks are irregular, the above logic is applied with the \"height\" of\n the \"tallest\" block used in place of ``m``.\n\n Consider use of the ``rechunk`` method to control this behavior.\n Taller blocks will reduce overall memory use (assuming that many of them\n still fit in memory at once).\n\n See Also\n --------\n dask.array.linalg.qr\n Powered by this algorithm\n dask.array.linalg.svd\n Powered by this algorithm\n dask.array.linalg.sfqr\n Variant for short-and-fat arrays\n \"\"\"\n nr, nc = len(data.chunks[0]), len(data.chunks[1])\n cr_max, cc = max(data.chunks[0]), data.chunks[1][0]\n\n if not (data.ndim == 2 and nc == 1): # Is a matrix # Only one column block\n raise ValueError(\n \"Input must have the following properties:\\n\"\n \" 1. Have two dimensions\\n\"\n \" 2. Have only one column of blocks\\n\\n\"\n \"Note: This function (tsqr) supports QR decomposition in the case of\\n\"\n \"tall-and-skinny matrices (single column chunk/block; see qr)\\n\"\n \"Current shape: {},\\nCurrent chunksize: {}\".format(\n data.shape, data.chunksize\n )\n )\n\n token = \"-\" + tokenize(data, compute_svd)\n\n m, n = data.shape\n numblocks = (nr, 1)\n\n qq, rr = np.linalg.qr(np.ones(shape=(1, 1), dtype=data.dtype))\n\n layers = data.__dask_graph__().layers.copy()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.dependencies_tsqr.can_distribute.chunks_well_defined_and_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.dependencies_tsqr.can_distribute.chunks_well_defined_and_i", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 131, "end_line": 175, "span_ids": ["tsqr"], "tokens": 523}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n # ... other code\n dependencies = data.__dask_graph__().dependencies.copy()\n\n # Block qr\n name_qr_st1 = \"qr\" + token\n dsk_qr_st1 = blockwise(\n _wrapped_qr,\n name_qr_st1,\n \"ij\",\n data.name,\n \"ij\",\n numblocks={data.name: numblocks},\n )\n layers[name_qr_st1] = dsk_qr_st1\n dependencies[name_qr_st1] = set(data.__dask_layers__())\n\n # Block qr[0]\n name_q_st1 = \"getitem\" + token + \"-q1\"\n dsk_q_st1 = {\n (name_q_st1, i, 0): (operator.getitem, (name_qr_st1, i, 0), 0)\n for i in range(numblocks[0])\n }\n layers[name_q_st1] = dsk_q_st1\n dependencies[name_q_st1] = {name_qr_st1}\n\n # Block qr[1]\n name_r_st1 = \"getitem\" + token + \"-r1\"\n dsk_r_st1 = {\n (name_r_st1, i, 0): (operator.getitem, (name_qr_st1, i, 0), 1)\n for i in range(numblocks[0])\n }\n layers[name_r_st1] = dsk_r_st1\n dependencies[name_r_st1] = {name_qr_st1}\n\n # Next step is to obtain a QR decomposition for the stacked R factors, so either:\n # - gather R factors into a single core and do a QR decomposition\n # - recurse with tsqr (if single core computation too large and a-priori \"meaningful\n # reduction\" possible, meaning that chunks have to be well defined)\n\n single_core_compute_m = nr * cc\n chunks_well_defined = not any(np.isnan(c) for cs in data.chunks for c in cs)\n prospective_blocks = np.ceil(single_core_compute_m / cr_max)\n meaningful_reduction_possible = (\n cr_max if _max_vchunk_size is None else _max_vchunk_size\n ) >= 2 * cc\n can_distribute = chunks_well_defined and int(prospective_blocks) > 1\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an_tsqr.if_chunks_well_defined_an.dependencies_name_q_st3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an_tsqr.if_chunks_well_defined_an.dependencies_name_q_st3_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 177, "end_line": 268, "span_ids": ["tsqr"], "tokens": 908}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n # ... other code\n\n if chunks_well_defined and meaningful_reduction_possible and can_distribute:\n # stack chunks into blocks and recurse using tsqr\n\n # Prepare to stack chunks into blocks (from block qr[1])\n all_blocks = []\n curr_block = []\n curr_block_sz = 0\n for idx, a_m in enumerate(data.chunks[0]):\n m_q = a_m\n n_q = min(m_q, cc)\n m_r = n_q\n # n_r = cc\n if curr_block_sz + m_r > cr_max:\n all_blocks.append(curr_block)\n curr_block = []\n curr_block_sz = 0\n curr_block.append((idx, m_r))\n curr_block_sz += m_r\n if len(curr_block) > 0:\n all_blocks.append(curr_block)\n\n # R_stacked\n name_r_stacked = \"stack\" + token + \"-r1\"\n dsk_r_stacked = {\n (name_r_stacked, i, 0): (\n np.vstack,\n (tuple, [(name_r_st1, idx, 0) for idx, _ in sub_block_info]),\n )\n for i, sub_block_info in enumerate(all_blocks)\n }\n layers[name_r_stacked] = dsk_r_stacked\n dependencies[name_r_stacked] = {name_r_st1}\n\n # retrieve R_stacked for recursion with tsqr\n vchunks_rstacked = tuple(\n sum(map(lambda x: x[1], sub_block_info)) for sub_block_info in all_blocks\n )\n graph = HighLevelGraph(layers, dependencies)\n # dsk.dependencies[name_r_stacked] = {data.name}\n r_stacked_meta = meta_from_array(\n data, len((sum(vchunks_rstacked), n)), dtype=rr.dtype\n )\n r_stacked = Array(\n graph,\n name_r_stacked,\n shape=(sum(vchunks_rstacked), n),\n chunks=(vchunks_rstacked, n),\n meta=r_stacked_meta,\n )\n\n # recurse\n q_inner, r_inner = tsqr(r_stacked, _max_vchunk_size=cr_max)\n layers = toolz.merge(q_inner.dask.layers, r_inner.dask.layers)\n dependencies = toolz.merge(q_inner.dask.dependencies, r_inner.dask.dependencies)\n\n # Q_inner: \"unstack\"\n name_q_st2 = \"getitem\" + token + \"-q2\"\n dsk_q_st2 = {\n (name_q_st2, j, 0): (\n operator.getitem,\n (q_inner.name, i, 0),\n ((slice(e[0], e[1])), (slice(0, n))),\n )\n for i, sub_block_info in enumerate(all_blocks)\n for j, e in zip(\n [x[0] for x in sub_block_info],\n _cumsum_blocks([x[1] for x in sub_block_info]),\n )\n }\n layers[name_q_st2] = dsk_q_st2\n dependencies[name_q_st2] = set(q_inner.__dask_layers__())\n\n # R: R_inner\n name_r_st2 = \"r-inner\" + token\n dsk_r_st2 = {(name_r_st2, 0, 0): (r_inner.name, 0, 0)}\n layers[name_r_st2] = dsk_r_st2\n dependencies[name_r_st2] = set(r_inner.__dask_layers__())\n\n # Q: Block qr[0] (*) Q_inner\n name_q_st3 = \"dot\" + token + \"-q3\"\n dsk_q_st3 = blockwise(\n np.dot,\n name_q_st3,\n \"ij\",\n name_q_st1,\n \"ij\",\n name_q_st2,\n \"ij\",\n numblocks={name_q_st1: numblocks, name_q_st2: numblocks},\n )\n layers[name_q_st3] = dsk_q_st3\n dependencies[name_q_st3] = {name_q_st1, name_q_st2}\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an.else__tsqr.if_chunks_well_defined_an.else_.dependencies_name_r_st2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_chunks_well_defined_an.else__tsqr.if_chunks_well_defined_an.else_.dependencies_name_r_st2_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 269, "end_line": 386, "span_ids": ["tsqr"], "tokens": 1379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n\n if chunks_well_defined and meaningful_reduction_possible and can_distribute:\n # stack chunks into blocks and recurse using tsqr\n\n # Prepare to stack chunks into blocks (from block qr[1])\n # ... other code\n else:\n # Do single core computation\n\n # Stacking for in-core QR computation\n to_stack = [(name_r_st1, i, 0) for i in range(numblocks[0])]\n name_r_st1_stacked = \"stack\" + token + \"-r1\"\n dsk_r_st1_stacked = {(name_r_st1_stacked, 0, 0): (np.vstack, (tuple, to_stack))}\n layers[name_r_st1_stacked] = dsk_r_st1_stacked\n dependencies[name_r_st1_stacked] = {name_r_st1}\n\n # In-core QR computation\n name_qr_st2 = \"qr\" + token + \"-qr2\"\n dsk_qr_st2 = blockwise(\n np.linalg.qr,\n name_qr_st2,\n \"ij\",\n name_r_st1_stacked,\n \"ij\",\n numblocks={name_r_st1_stacked: (1, 1)},\n )\n layers[name_qr_st2] = dsk_qr_st2\n dependencies[name_qr_st2] = {name_r_st1_stacked}\n\n # In-core qr[0]\n name_q_st2_aux = \"getitem\" + token + \"-q2-aux\"\n dsk_q_st2_aux = {\n (name_q_st2_aux, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 0)\n }\n layers[name_q_st2_aux] = dsk_q_st2_aux\n dependencies[name_q_st2_aux] = {name_qr_st2}\n\n chucks_are_all_known = not any(np.isnan(c) for cs in data.chunks for c in cs)\n if chucks_are_all_known:\n # when chunks are all known...\n # obtain slices on q from in-core compute (e.g.: (slice(10, 20), slice(0, 5)))\n q2_block_sizes = [min(e, n) for e in data.chunks[0]]\n block_slices = [\n (slice(e[0], e[1]), slice(0, n)) for e in _cumsum_blocks(q2_block_sizes)\n ]\n dsk_q_blockslices = {}\n deps = set()\n else:\n # when chunks are not already known...\n\n # request shape information: vertical chunk sizes & column dimension (n)\n name_q2bs = \"shape\" + token + \"-q2\"\n dsk_q2_shapes = {\n (name_q2bs, i): (min, (getattr, (data.name, i, 0), \"shape\"))\n for i in range(numblocks[0])\n }\n name_n = \"getitem\" + token + \"-n\"\n dsk_n = {\n name_n: (operator.getitem, (getattr, (data.name, 0, 0), \"shape\"), 1)\n }\n\n # cumulative sums (start, end)\n name_q2cs = \"cumsum\" + token + \"-q2\"\n dsk_q2_cumsum = {(name_q2cs, 0): [0, (name_q2bs, 0)]}\n\n for i in range(1, numblocks[0]):\n dsk_q2_cumsum[(name_q2cs, i)] = (\n _cumsum_part,\n (name_q2cs, i - 1),\n (name_q2bs, i),\n )\n\n # obtain slices on q from in-core compute (e.g.: (slice(10, 20), slice(0, 5)))\n name_blockslice = \"slice\" + token + \"-q\"\n dsk_block_slices = {\n (name_blockslice, i): (\n tuple,\n [(apply, slice, (name_q2cs, i)), (slice, 0, name_n)],\n )\n for i in range(numblocks[0])\n }\n\n dsk_q_blockslices = toolz.merge(\n dsk_n, dsk_q2_shapes, dsk_q2_cumsum, dsk_block_slices\n )\n\n deps = {data.name}\n block_slices = [(name_blockslice, i) for i in range(numblocks[0])]\n\n layers[\"q-blocksizes\" + token] = dsk_q_blockslices\n dependencies[\"q-blocksizes\" + token] = deps\n\n # In-core qr[0] unstacking\n name_q_st2 = \"getitem\" + token + \"-q2\"\n dsk_q_st2 = {\n (name_q_st2, i, 0): (operator.getitem, (name_q_st2_aux, 0, 0), b)\n for i, b in enumerate(block_slices)\n }\n layers[name_q_st2] = dsk_q_st2\n if chucks_are_all_known:\n dependencies[name_q_st2] = {name_q_st2_aux}\n else:\n dependencies[name_q_st2] = {name_q_st2_aux, \"q-blocksizes\" + token}\n\n # Q: Block qr[0] (*) In-core qr[0]\n name_q_st3 = \"dot\" + token + \"-q3\"\n dsk_q_st3 = blockwise(\n np.dot,\n name_q_st3,\n \"ij\",\n name_q_st1,\n \"ij\",\n name_q_st2,\n \"ij\",\n numblocks={name_q_st1: numblocks, name_q_st2: numblocks},\n )\n layers[name_q_st3] = dsk_q_st3\n dependencies[name_q_st3] = {name_q_st1, name_q_st2}\n\n # R: In-core qr[1]\n name_r_st2 = \"getitem\" + token + \"-r2\"\n dsk_r_st2 = {(name_r_st2, 0, 0): (operator.getitem, (name_qr_st2, 0, 0), 1)}\n layers[name_r_st2] = dsk_r_st2\n dependencies[name_r_st2] = {name_qr_st2}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_not_compute_svd__tsqr.if_not_compute_svd_.else_.return.u_s_vh": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_tsqr.if_not_compute_svd__tsqr.if_not_compute_svd_.else_.return.u_s_vh", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 393, "end_line": 510, "span_ids": ["tsqr"], "tokens": 1327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tsqr(data, compute_svd=False, _max_vchunk_size=None):\n # ... other code\n\n if not compute_svd:\n is_unknown_m = np.isnan(data.shape[0]) or any(\n np.isnan(c) for c in data.chunks[0]\n )\n is_unknown_n = np.isnan(data.shape[1]) or any(\n np.isnan(c) for c in data.chunks[1]\n )\n\n if is_unknown_m and is_unknown_n:\n # assumption: m >= n\n q_shape = data.shape\n q_chunks = (data.chunks[0], (np.nan,))\n r_shape = (np.nan, np.nan)\n r_chunks = ((np.nan,), (np.nan,))\n elif is_unknown_m and not is_unknown_n:\n # assumption: m >= n\n q_shape = data.shape\n q_chunks = (data.chunks[0], (n,))\n r_shape = (n, n)\n r_chunks = (n, n)\n elif not is_unknown_m and is_unknown_n:\n # assumption: m >= n\n q_shape = data.shape\n q_chunks = (data.chunks[0], (np.nan,))\n r_shape = (np.nan, np.nan)\n r_chunks = ((np.nan,), (np.nan,))\n else:\n q_shape = (\n data.shape\n if data.shape[0] >= data.shape[1]\n else (data.shape[0], data.shape[0])\n )\n q_chunks = (\n data.chunks\n if data.shape[0] >= data.shape[1]\n else (data.chunks[0], data.chunks[0])\n )\n r_shape = (n, n) if data.shape[0] >= data.shape[1] else data.shape\n r_chunks = r_shape\n\n # dsk.dependencies[name_q_st3] = {data.name}\n # dsk.dependencies[name_r_st2] = {data.name}\n graph = HighLevelGraph(layers, dependencies)\n q_meta = meta_from_array(data, len(q_shape), qq.dtype)\n r_meta = meta_from_array(data, len(r_shape), rr.dtype)\n q = Array(graph, name_q_st3, shape=q_shape, chunks=q_chunks, meta=q_meta)\n r = Array(graph, name_r_st2, shape=r_shape, chunks=r_chunks, meta=r_meta)\n return q, r\n else:\n # In-core SVD computation\n name_svd_st2 = \"svd\" + token + \"-2\"\n dsk_svd_st2 = blockwise(\n np.linalg.svd,\n name_svd_st2,\n \"ij\",\n name_r_st2,\n \"ij\",\n numblocks={name_r_st2: (1, 1)},\n )\n # svd[0]\n name_u_st2 = \"getitem\" + token + \"-u2\"\n dsk_u_st2 = {(name_u_st2, 0, 0): (operator.getitem, (name_svd_st2, 0, 0), 0)}\n # svd[1]\n name_s_st2 = \"getitem\" + token + \"-s2\"\n dsk_s_st2 = {(name_s_st2, 0): (operator.getitem, (name_svd_st2, 0, 0), 1)}\n # svd[2]\n name_v_st2 = \"getitem\" + token + \"-v2\"\n dsk_v_st2 = {(name_v_st2, 0, 0): (operator.getitem, (name_svd_st2, 0, 0), 2)}\n # Q * U\n name_u_st4 = \"getitem\" + token + \"-u4\"\n dsk_u_st4 = blockwise(\n dotmany,\n name_u_st4,\n \"ij\",\n name_q_st3,\n \"ik\",\n name_u_st2,\n \"kj\",\n numblocks={name_q_st3: numblocks, name_u_st2: (1, 1)},\n )\n\n layers[name_svd_st2] = dsk_svd_st2\n dependencies[name_svd_st2] = {name_r_st2}\n layers[name_u_st2] = dsk_u_st2\n dependencies[name_u_st2] = {name_svd_st2}\n layers[name_u_st4] = dsk_u_st4\n dependencies[name_u_st4] = {name_q_st3, name_u_st2}\n layers[name_s_st2] = dsk_s_st2\n dependencies[name_s_st2] = {name_svd_st2}\n layers[name_v_st2] = dsk_v_st2\n dependencies[name_v_st2] = {name_svd_st2}\n\n uu, ss, vvh = np.linalg.svd(np.ones(shape=(1, 1), dtype=data.dtype))\n\n k = _nanmin(m, n) # avoid RuntimeWarning with np.nanmin([m, n])\n\n m_u = m\n n_u = int(k) if not np.isnan(k) else k\n n_s = n_u\n m_vh = n_u\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned: but basically n\n graph = HighLevelGraph(layers, dependencies)\n u_meta = meta_from_array(data, len((m_u, n_u)), uu.dtype)\n s_meta = meta_from_array(data, len((n_s,)), ss.dtype)\n vh_meta = meta_from_array(data, len((d_vh, d_vh)), vvh.dtype)\n u = Array(\n graph,\n name_u_st4,\n shape=(m_u, n_u),\n chunks=(data.chunks[0], (n_u,)),\n meta=u_meta,\n )\n s = Array(graph, name_s_st2, shape=(n_s,), chunks=((n_s,),), meta=s_meta)\n vh = Array(\n graph, name_v_st2, shape=(d_vh, d_vh), chunks=((n,), (n,)), meta=vh_meta\n )\n return u, s, vh", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr_sfqr.name_R_1.prefix_R_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr_sfqr.name_R_1.prefix_R_1_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 513, "end_line": 585, "span_ids": ["sfqr"], "tokens": 748}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sfqr(data, name=None):\n \"\"\"Direct Short-and-Fat QR\n\n Currently, this is a quick hack for non-tall-and-skinny matrices which\n are one chunk tall and (unless they are one chunk wide) have chunks\n that are wider than they are tall\n\n Q [R_1 R_2 ...] = [A_1 A_2 ...]\n\n it computes the factorization Q R_1 = A_1, then computes the other\n R_k's in parallel.\n\n Parameters\n ----------\n data: Array\n\n See Also\n --------\n dask.array.linalg.qr\n Main user API that uses this function\n dask.array.linalg.tsqr\n Variant for tall-and-skinny case\n \"\"\"\n nr, nc = len(data.chunks[0]), len(data.chunks[1])\n cr, cc = data.chunks[0][0], data.chunks[1][0]\n\n if not (\n (data.ndim == 2)\n and (nr == 1) # Is a matrix\n and ( # Has exactly one block row\n (cr <= cc)\n or (nc == 1) # Chunking dimension on rows is at least that on cols or...\n )\n ): # ... only one block col\n raise ValueError(\n \"Input must have the following properties:\\n\"\n \" 1. Have two dimensions\\n\"\n \" 2. Have only one row of blocks\\n\"\n \" 3. Either one column of blocks or (first) chunk size on cols\\n\"\n \" is at most that on rows (e.g.: for a 5x20 matrix,\\n\"\n \" chunks=((5), (8,4,8)) is fine, but chunks=((5), (4,8,8)) is not;\\n\"\n \" still, prefer something simple like chunks=(5,10) or chunks=5)\\n\\n\"\n \"Note: This function (sfqr) supports QR decomposition in the case\\n\"\n \"of short-and-fat matrices (single row chunk/block; see qr)\"\n )\n\n prefix = name or \"sfqr-\" + tokenize(data)\n prefix += \"_\"\n\n m, n = data.shape\n\n qq, rr = np.linalg.qr(np.ones(shape=(1, 1), dtype=data.dtype))\n\n layers = data.__dask_graph__().layers.copy()\n dependencies = data.__dask_graph__().dependencies.copy()\n\n # data = A = [A_1 A_rest]\n name_A_1 = prefix + \"A_1\"\n name_A_rest = prefix + \"A_rest\"\n layers[name_A_1] = {(name_A_1, 0, 0): (data.name, 0, 0)}\n dependencies[name_A_1] = set(data.__dask_layers__())\n layers[name_A_rest] = {\n (name_A_rest, 0, idx): (data.name, 0, 1 + idx) for idx in range(nc - 1)\n }\n if len(layers[name_A_rest]) > 0:\n dependencies[name_A_rest] = set(data.__dask_layers__())\n else:\n dependencies[name_A_rest] = set()\n\n # Q R_1 = A_1\n name_Q_R1 = prefix + \"Q_R_1\"\n name_Q = prefix + \"Q\"\n name_R_1 = prefix + \"R_1\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr.layers_name_Q_R1_nam_sfqr.return.Q_R": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_sfqr.layers_name_Q_R1_nam_sfqr.return.Q_R", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 586, "end_line": 618, "span_ids": ["sfqr"], "tokens": 418}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sfqr(data, name=None):\n # ... other code\n layers[name_Q_R1] = {(name_Q_R1, 0, 0): (np.linalg.qr, (name_A_1, 0, 0))}\n dependencies[name_Q_R1] = {name_A_1}\n\n layers[name_Q] = {(name_Q, 0, 0): (operator.getitem, (name_Q_R1, 0, 0), 0)}\n dependencies[name_Q] = {name_Q_R1}\n\n layers[name_R_1] = {(name_R_1, 0, 0): (operator.getitem, (name_Q_R1, 0, 0), 1)}\n dependencies[name_R_1] = {name_Q_R1}\n\n graph = HighLevelGraph(layers, dependencies)\n\n Q_meta = meta_from_array(data, len((m, min(m, n))), dtype=qq.dtype)\n R_1_meta = meta_from_array(data, len((min(m, n), cc)), dtype=rr.dtype)\n Q = Array(graph, name_Q, shape=(m, min(m, n)), chunks=(m, min(m, n)), meta=Q_meta)\n R_1 = Array(graph, name_R_1, shape=(min(m, n), cc), chunks=(cr, cc), meta=R_1_meta)\n\n # R = [R_1 Q'A_rest]\n Rs = [R_1]\n\n if nc > 1:\n A_rest_meta = meta_from_array(data, len((min(m, n), n - cc)), dtype=rr.dtype)\n A_rest = Array(\n graph,\n name_A_rest,\n shape=(min(m, n), n - cc),\n chunks=(cr, data.chunks[1][1:]),\n meta=A_rest_meta,\n )\n Rs.append(Q.T.dot(A_rest))\n\n R = concatenate(Rs, axis=1)\n\n return Q, R", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_level_compression_level.return.min_max_min_subspace_size": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_level_compression_level.return.min_max_min_subspace_size", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 621, "end_line": 651, "span_ids": ["compression_level"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compression_level(n, q, n_oversamples=10, min_subspace_size=20):\n \"\"\"Compression level to use in svd_compressed\n\n Given the size ``n`` of a space, compress that that to one of size\n ``q`` plus n_oversamples.\n\n The oversampling allows for greater flexibility in finding an\n appropriate subspace, a low value is often enough (10 is already a\n very conservative choice, it can be further reduced).\n ``q + oversampling`` should not be larger than ``n``. In this\n specific implementation, ``q + n_oversamples`` is at least\n ``min_subspace_size``.\n\n Parameters\n ----------\n n: int\n Column/row dimension of original matrix\n q: int\n Size of the desired subspace (the actual size will be bigger,\n because of oversampling, see ``da.linalg.compression_level``)\n n_oversamples: int, default=10\n Number of oversamples used for generating the sampling matrix.\n min_subspace_size : int, default=20\n Minimum subspace size.\n\n Examples\n --------\n >>> compression_level(100, 10)\n 20\n \"\"\"\n return min(max(min_subspace_size, q + n_oversamples), n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_matrix_compression_matrix.return.q_T": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_compression_matrix_compression_matrix.return.q_T", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 654, "end_line": 742, "span_ids": ["compression_matrix"], "tokens": 765}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compression_matrix(\n data,\n q,\n iterator=\"power\",\n n_power_iter=0,\n n_oversamples=10,\n seed=None,\n compute=False,\n):\n \"\"\"Randomly sample matrix to find most active subspace\n\n This compression matrix returned by this algorithm can be used to\n compute both the QR decomposition and the Singular Value\n Decomposition.\n\n Parameters\n ----------\n data: Array\n q: int\n Size of the desired subspace (the actual size will be bigger,\n because of oversampling, see ``da.linalg.compression_level``)\n iterator: {'power', 'QR'}, default='power'\n Define the technique used for iterations to cope with flat\n singular spectra or when the input matrix is very large.\n n_power_iter: int\n Number of power iterations, useful when the singular values\n decay slowly. Error decreases exponentially as `n_power_iter`\n increases. In practice, set `n_power_iter` <= 4.\n n_oversamples: int, default=10\n Number of oversamples used for generating the sampling matrix.\n This value increases the size of the subspace computed, which is more\n accurate at the cost of efficiency. Results are rarely sensitive to this choice\n though and in practice a value of 10 is very commonly high enough.\n compute : bool\n Whether or not to compute data at each use.\n Recomputing the input while performing several passes reduces memory\n pressure, but means that we have to compute the input multiple times.\n This is a good choice if the data is larger than memory and cheap to\n recreate.\n\n References\n ----------\n N. Halko, P. G. Martinsson, and J. A. Tropp.\n Finding structure with randomness: Probabilistic algorithms for\n constructing approximate matrix decompositions.\n SIAM Rev., Survey and Review section, Vol. 53, num. 2,\n pp. 217-288, June 2011\n https://arxiv.org/abs/0909.4061\n \"\"\"\n if iterator not in [\"power\", \"QR\"]:\n raise ValueError(\n f\"Iterator '{iterator}' not valid, must one one of ['power', 'QR']\"\n )\n m, n = data.shape\n comp_level = compression_level(min(m, n), q, n_oversamples=n_oversamples)\n if isinstance(seed, RandomState):\n state = seed\n else:\n state = RandomState(seed)\n datatype = np.float64\n if (data.dtype).type in {np.float32, np.complex64}:\n datatype = np.float32\n omega = state.standard_normal(\n size=(n, comp_level), chunks=(data.chunks[1], (comp_level,))\n ).astype(datatype, copy=False)\n mat_h = data.dot(omega)\n if iterator == \"power\":\n for i in range(n_power_iter):\n if compute:\n mat_h = mat_h.persist()\n wait(mat_h)\n tmp = data.T.dot(mat_h)\n if compute:\n tmp = tmp.persist()\n wait(tmp)\n mat_h = data.dot(tmp)\n q, _ = tsqr(mat_h)\n else:\n q, _ = tsqr(mat_h)\n for i in range(n_power_iter):\n if compute:\n q = q.persist()\n wait(q)\n q, _ = tsqr(data.T.dot(q))\n if compute:\n q = q.persist()\n wait(q)\n q, _ = tsqr(data.dot(q))\n return q.T", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_compressed_svd_compressed.return.u_s_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_compressed_svd_compressed.return.u_s_v", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 740, "end_line": 826, "span_ids": ["svd_compressed"], "tokens": 719}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svd_compressed(\n a,\n k,\n iterator=\"power\",\n n_power_iter=0,\n n_oversamples=10,\n seed=None,\n compute=False,\n coerce_signs=True,\n):\n \"\"\"Randomly compressed rank-k thin Singular Value Decomposition.\n\n This computes the approximate singular value decomposition of a large\n array. This algorithm is generally faster than the normal algorithm\n but does not provide exact results. One can balance between\n performance and accuracy with input parameters (see below).\n\n Parameters\n ----------\n a: Array\n Input array\n k: int\n Rank of the desired thin SVD decomposition.\n iterator: {'power', 'QR'}, default='power'\n Define the technique used for iterations to cope with flat\n singular spectra or when the input matrix is very large.\n n_power_iter: int, default=0\n Number of power iterations, useful when the singular values\n decay slowly. Error decreases exponentially as `n_power_iter`\n increases. In practice, set `n_power_iter` <= 4.\n n_oversamples: int, default=10\n Number of oversamples used for generating the sampling matrix.\n This value increases the size of the subspace computed, which is more\n accurate at the cost of efficiency. Results are rarely sensitive to this choice\n though and in practice a value of 10 is very commonly high enough.\n compute : bool\n Whether or not to compute data at each use.\n Recomputing the input while performing several passes reduces memory\n pressure, but means that we have to compute the input multiple times.\n This is a good choice if the data is larger than memory and cheap to\n recreate.\n coerce_signs : bool\n Whether or not to apply sign coercion to singular vectors in\n order to maintain deterministic results, by default True.\n\n\n Examples\n --------\n >>> u, s, v = svd_compressed(x, 20) # doctest: +SKIP\n\n Returns\n -------\n u: Array, unitary / orthogonal\n s: Array, singular values in decreasing order (largest first)\n v: Array, unitary / orthogonal\n\n References\n ----------\n N. Halko, P. G. Martinsson, and J. A. Tropp.\n Finding structure with randomness: Probabilistic algorithms for\n constructing approximate matrix decompositions.\n SIAM Rev., Survey and Review section, Vol. 53, num. 2,\n pp. 217-288, June 2011\n https://arxiv.org/abs/0909.4061\n \"\"\"\n comp = compression_matrix(\n a,\n k,\n iterator=iterator,\n n_power_iter=n_power_iter,\n n_oversamples=n_oversamples,\n seed=seed,\n compute=compute,\n )\n if compute:\n comp = comp.persist()\n wait(comp)\n a_compressed = comp.dot(a)\n v, s, u = tsqr(a_compressed.T, compute_svd=True)\n u = comp.T.dot(u.T)\n v = v.T\n u = u[:, :k]\n s = s[:k]\n v = v[:k, :]\n if coerce_signs:\n u, v = svd_flip(u, v)\n return u, s, v", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_qr_qr.if_len_a_chunks_1_1_.else_.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_qr_qr.if_len_a_chunks_1_1_.else_.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 761, "end_line": 797, "span_ids": ["qr"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def qr(a):\n \"\"\"\n Compute the qr factorization of a matrix.\n\n Parameters\n ----------\n a : Array\n\n Returns\n -------\n q: Array, orthonormal\n r: Array, upper-triangular\n\n Examples\n --------\n >>> q, r = da.linalg.qr(x) # doctest: +SKIP\n\n See Also\n --------\n numpy.linalg.qr: Equivalent NumPy Operation\n dask.array.linalg.tsqr: Implementation for tall-and-skinny arrays\n dask.array.linalg.sfqr: Implementation for short-and-fat arrays\n \"\"\"\n\n if len(a.chunks[1]) == 1 and len(a.chunks[0]) > 1:\n return tsqr(a)\n elif len(a.chunks[0]) == 1:\n return sfqr(a)\n else:\n raise NotImplementedError(\n \"qr currently supports only tall-and-skinny (single column chunk/block; see tsqr)\\n\"\n \"and short-and-fat (single row chunk/block; see sfqr) matrices\\n\\n\"\n \"Consider use of the rechunk method. For example,\\n\\n\"\n \"x.rechunk({0: -1, 1: 'auto'}) or x.rechunk({0: 'auto', 1: -1})\\n\\n\"\n \"which rechunk one shorter axis to a single chunk, while allowing\\n\"\n \"the other axis to automatically grow/shrink appropriately.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_svd._Single_chunk_case": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd_svd._Single_chunk_case", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 800, "end_line": 860, "span_ids": ["svd"], "tokens": 533}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svd(a, coerce_signs=True):\n \"\"\"\n Compute the singular value decomposition of a matrix.\n\n Parameters\n ----------\n a : (M, N) Array\n coerce_signs : bool\n Whether or not to apply sign coercion to singular vectors in\n order to maintain deterministic results, by default True.\n\n Examples\n --------\n\n >>> u, s, v = da.linalg.svd(x) # doctest: +SKIP\n\n Returns\n -------\n\n u : (M, K) Array, unitary / orthogonal\n Left-singular vectors of `a` (in columns) with shape (M, K)\n where K = min(M, N).\n s : (K,) Array, singular values in decreasing order (largest first)\n Singular values of `a`.\n v : (K, N) Array, unitary / orthogonal\n Right-singular vectors of `a` (in rows) with shape (K, N)\n where K = min(M, N).\n\n Warnings\n --------\n\n SVD is only supported for arrays with chunking in one dimension.\n This requires that all inputs either contain a single column\n of chunks (tall-and-skinny) or a single row of chunks (short-and-fat).\n For arrays with chunking in both dimensions, see da.linalg.svd_compressed.\n\n See Also\n --------\n\n np.linalg.svd : Equivalent NumPy Operation\n da.linalg.svd_compressed : Randomized SVD for fully chunked arrays\n dask.array.linalg.tsqr : QR factorization for tall-and-skinny arrays\n dask.array.utils.svd_flip : Sign normalization for singular vectors\n \"\"\"\n nb = a.numblocks\n if a.ndim != 2:\n raise ValueError(\n \"Array must be 2D.\\n\"\n \"Input shape: {}\\n\"\n \"Input ndim: {}\\n\".format(a.shape, a.ndim)\n )\n if nb[0] > 1 and nb[1] > 1:\n raise NotImplementedError(\n \"Array must be chunked in one dimension only. \"\n \"This function (svd) only supports tall-and-skinny or short-and-fat \"\n \"matrices (see da.linalg.svd_compressed for SVD on fully chunked arrays).\\n\"\n \"Input shape: {}\\n\"\n \"Input numblocks: {}\\n\".format(a.shape, nb)\n )\n\n # Single-chunk case\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu_lu.dsk._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu_lu.dsk._", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 977, "end_line": 1025, "span_ids": ["lu"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lu(a):\n \"\"\"\n Compute the lu decomposition of a matrix.\n\n Examples\n --------\n\n >>> p, l, u = da.linalg.lu(x) # doctest: +SKIP\n\n Returns\n -------\n\n p: Array, permutation matrix\n l: Array, lower triangular matrix with unit diagonal.\n u: Array, upper triangular matrix\n \"\"\"\n import scipy.linalg\n\n if a.ndim != 2:\n raise ValueError(\"Dimension must be 2 to perform lu decomposition\")\n\n xdim, ydim = a.shape\n if xdim != ydim:\n raise ValueError(\"Input must be a square matrix to perform lu decomposition\")\n if not len(set(a.chunks[0] + a.chunks[1])) == 1:\n msg = (\n \"All chunks must be a square matrix to perform lu decomposition. \"\n \"Use .rechunk method to change the size of chunks.\"\n )\n raise ValueError(msg)\n\n vdim = len(a.chunks[0])\n hdim = len(a.chunks[1])\n\n token = tokenize(a)\n name_lu = \"lu-lu-\" + token\n\n name_p = \"lu-p-\" + token\n name_l = \"lu-l-\" + token\n name_u = \"lu-u-\" + token\n\n # for internal calculation\n name_p_inv = \"lu-p-inv-\" + token\n name_l_permuted = \"lu-l-permute-\" + token\n name_u_transposed = \"lu-u-transpose-\" + token\n name_plu_dot = \"lu-plu-dot-\" + token\n name_lu_dot = \"lu-lu-dot-\" + token\n\n dsk = {}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.for_i_in_range_min_vdim__lu.for_i_in_range_min_vdim_.for_k_in_range_i_1_vdi.dsk_name_lu_k_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.for_i_in_range_min_vdim__lu.for_i_in_range_min_vdim_.for_k_in_range_i_1_vdi.dsk_name_lu_k_i_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 947, "end_line": 989, "span_ids": ["lu"], "tokens": 487}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lu(a):\n # ... other code\n for i in range(min(vdim, hdim)):\n target = (a.name, i, i)\n if i > 0:\n prevs = []\n for p in range(i):\n prev = name_plu_dot, i, p, p, i\n dsk[prev] = (np.dot, (name_l_permuted, i, p), (name_u, p, i))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n # diagonal block\n dsk[name_lu, i, i] = (scipy.linalg.lu, target)\n\n # sweep to horizontal\n for j in range(i + 1, hdim):\n target = (np.dot, (name_p_inv, i, i), (a.name, i, j))\n if i > 0:\n prevs = []\n for p in range(i):\n prev = name_lu_dot, i, p, p, j\n dsk[prev] = (np.dot, (name_l, i, p), (name_u, p, j))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[name_lu, i, j] = (_solve_triangular_lower, (name_l, i, i), target)\n\n # sweep to vertical\n for k in range(i + 1, vdim):\n target = (a.name, k, i)\n if i > 0:\n prevs = []\n for p in range(i):\n prev = name_plu_dot, k, p, p, i\n dsk[prev] = (np.dot, (name_l_permuted, k, p), (name_u, p, i))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n # solving x.dot(u) = target is equal to u.T.dot(x.T) = target.T\n dsk[name_lu, k, i] = (\n np.transpose,\n (\n _solve_triangular_lower,\n (name_u_transposed, i, i),\n (np.transpose, target),\n ),\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.None_4_lu.return.p_l_u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_lu.None_4_lu.return.p_l_u", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 991, "end_line": 1031, "span_ids": ["lu"], "tokens": 692}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lu(a):\n # ... other code\n\n for i in range(min(vdim, hdim)):\n for j in range(min(vdim, hdim)):\n if i == j:\n dsk[name_p, i, j] = (operator.getitem, (name_lu, i, j), 0)\n dsk[name_l, i, j] = (operator.getitem, (name_lu, i, j), 1)\n dsk[name_u, i, j] = (operator.getitem, (name_lu, i, j), 2)\n\n # permuted l is required to be propagated to i > j blocks\n dsk[name_l_permuted, i, j] = (np.dot, (name_p, i, j), (name_l, i, j))\n dsk[name_u_transposed, i, j] = (np.transpose, (name_u, i, j))\n # transposed permutation matrix is equal to its inverse\n dsk[name_p_inv, i, j] = (np.transpose, (name_p, i, j))\n elif i > j:\n dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))\n # calculations are performed using permuted l,\n # thus the result should be reverted by inverted (=transposed) p\n # to have the same row order as diagonal blocks\n dsk[name_l, i, j] = (np.dot, (name_p_inv, i, i), (name_lu, i, j))\n dsk[name_u, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))\n dsk[name_l_permuted, i, j] = (name_lu, i, j)\n else:\n dsk[name_p, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))\n dsk[name_l, i, j] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))\n dsk[name_u, i, j] = (name_lu, i, j)\n # l_permuted is not referred in upper triangulars\n\n pp, ll, uu = scipy.linalg.lu(np.ones(shape=(1, 1), dtype=a.dtype))\n pp_meta = meta_from_array(a, dtype=pp.dtype)\n ll_meta = meta_from_array(a, dtype=ll.dtype)\n uu_meta = meta_from_array(a, dtype=uu.dtype)\n\n graph = HighLevelGraph.from_collections(name_p, dsk, dependencies=[a])\n p = Array(graph, name_p, shape=a.shape, chunks=a.chunks, meta=pp_meta)\n\n graph = HighLevelGraph.from_collections(name_l, dsk, dependencies=[a])\n l = Array(graph, name_l, shape=a.shape, chunks=a.chunks, meta=ll_meta)\n\n graph = HighLevelGraph.from_collections(name_u, dsk, dependencies=[a])\n u = Array(graph, name_u, shape=a.shape, chunks=a.chunks, meta=uu_meta)\n\n return p, l, u", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_triangular_solve_triangular.return.Array_graph_name_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_triangular_solve_triangular.return.Array_graph_name_shape_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1113, "end_line": 1207, "span_ids": ["solve_triangular"], "tokens": 862}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def solve_triangular(a, b, lower=False):\n \"\"\"\n Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.\n\n Parameters\n ----------\n a : (M, M) array_like\n A triangular matrix\n b : (M,) or (M, N) array_like\n Right-hand side matrix in `a x = b`\n lower : bool, optional\n Use only data contained in the lower triangle of `a`.\n Default is to use upper triangle.\n\n Returns\n -------\n x : (M,) or (M, N) array\n Solution to the system `a x = b`. Shape of return matches `b`.\n \"\"\"\n\n if a.ndim != 2:\n raise ValueError(\"a must be 2 dimensional\")\n if b.ndim <= 2:\n if a.shape[1] != b.shape[0]:\n raise ValueError(\"a.shape[1] and b.shape[0] must be equal\")\n if a.chunks[1] != b.chunks[0]:\n msg = (\n \"a.chunks[1] and b.chunks[0] must be equal. \"\n \"Use .rechunk method to change the size of chunks.\"\n )\n raise ValueError(msg)\n else:\n raise ValueError(\"b must be 1 or 2 dimensional\")\n\n vchunks = len(a.chunks[1])\n hchunks = 1 if b.ndim == 1 else len(b.chunks[1])\n token = tokenize(a, b, lower)\n name = \"solve-triangular-\" + token\n\n # for internal calculation\n # (name, i, j, k, l) corresponds to a_ij.dot(b_kl)\n name_mdot = \"solve-tri-dot-\" + token\n\n def _b_init(i, j):\n if b.ndim == 1:\n return b.name, i\n else:\n return b.name, i, j\n\n def _key(i, j):\n if b.ndim == 1:\n return name, i\n else:\n return name, i, j\n\n dsk = {}\n if lower:\n for i in range(vchunks):\n for j in range(hchunks):\n target = _b_init(i, j)\n if i > 0:\n prevs = []\n for k in range(i):\n prev = name_mdot, i, k, k, j\n dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[_key(i, j)] = (_solve_triangular_lower, (a.name, i, i), target)\n else:\n for i in range(vchunks):\n for j in range(hchunks):\n target = _b_init(i, j)\n if i < vchunks - 1:\n prevs = []\n for k in range(i + 1, vchunks):\n prev = name_mdot, i, k, k, j\n dsk[prev] = (np.dot, (a.name, i, k), _key(k, j))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[_key(i, j)] = (\n solve_triangular_safe,\n (a.name, i, i),\n target,\n )\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[a, b])\n\n a_meta = meta_from_array(a)\n b_meta = meta_from_array(b)\n res = _solve_triangular_lower(\n array_safe([[1, 0], [1, 2]], dtype=a.dtype, like=a_meta),\n array_safe([0, 1], dtype=b.dtype, like=b_meta),\n )\n meta = meta_from_array(a, b.ndim, dtype=res.dtype)\n return Array(graph, name, shape=b.shape, chunks=b.chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_solve.return.solve_triangular_u_uy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_solve_solve.return.solve_triangular_u_uy_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1129, "end_line": 1157, "span_ids": ["solve"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def solve(a, b, sym_pos=False):\n \"\"\"\n Solve the equation ``a x = b`` for ``x``. By default, use LU\n decomposition and forward / backward substitutions. When ``sym_pos`` is\n ``True``, use Cholesky decomposition.\n\n Parameters\n ----------\n a : (M, M) array_like\n A square matrix.\n b : (M,) or (M, N) array_like\n Right-hand side matrix in ``a x = b``.\n sym_pos : bool\n Assume a is symmetric and positive definite. If ``True``, use Cholesky\n decomposition.\n\n Returns\n -------\n x : (M,) or (M, N) Array\n Solution to the system ``a x = b``. Shape of the return matches the\n shape of `b`.\n \"\"\"\n if sym_pos:\n l, u = _cholesky(a)\n else:\n p, l, u = lu(a)\n b = p.T.dot(b)\n uy = solve_triangular(l, b, lower=True)\n return solve_triangular(u, uy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__cholesky__cholesky.return.lower_upper": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__cholesky__cholesky.return.lower_upper", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1277, "end_line": 1355, "span_ids": ["_cholesky"], "tokens": 875}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cholesky(a):\n \"\"\"\n Private function to perform Cholesky decomposition, which returns both\n lower and upper triangulars.\n \"\"\"\n\n if a.ndim != 2:\n raise ValueError(\"Dimension must be 2 to perform cholesky decomposition\")\n\n xdim, ydim = a.shape\n if xdim != ydim:\n raise ValueError(\n \"Input must be a square matrix to perform cholesky decomposition\"\n )\n if not len(set(a.chunks[0] + a.chunks[1])) == 1:\n msg = (\n \"All chunks must be a square matrix to perform cholesky decomposition. \"\n \"Use .rechunk method to change the size of chunks.\"\n )\n raise ValueError(msg)\n\n vdim = len(a.chunks[0])\n hdim = len(a.chunks[1])\n\n token = tokenize(a)\n name = \"cholesky-\" + token\n\n # (name_lt_dot, i, j, k, l) corresponds to l_ij.dot(l_kl.T)\n name_lt_dot = \"cholesky-lt-dot-\" + token\n # because transposed results are needed for calculation,\n # we can build graph for upper triangular simultaneously\n name_upper = \"cholesky-upper-\" + token\n\n # calculates lower triangulars because subscriptions get simpler\n dsk = {}\n for i in range(vdim):\n for j in range(hdim):\n if i < j:\n dsk[name, i, j] = (\n partial(np.zeros_like, shape=(a.chunks[0][i], a.chunks[1][j])),\n meta_from_array(a),\n )\n dsk[name_upper, j, i] = (name, i, j)\n elif i == j:\n target = (a.name, i, j)\n if i > 0:\n prevs = []\n for p in range(i):\n prev = name_lt_dot, i, p, i, p\n dsk[prev] = (np.dot, (name, i, p), (name_upper, p, i))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[name, i, i] = (_cholesky_lower, target)\n dsk[name_upper, i, i] = (np.transpose, (name, i, i))\n else:\n # solving x.dot(L11.T) = (A21 - L20.dot(L10.T)) is equal to\n # L11.dot(x.T) = A21.T - L10.dot(L20.T)\n # L11.dot(x.T) = A12 - L10.dot(L02)\n target = (a.name, j, i)\n if j > 0:\n prevs = []\n for p in range(j):\n prev = name_lt_dot, j, p, i, p\n dsk[prev] = (np.dot, (name, j, p), (name_upper, p, i))\n prevs.append(prev)\n target = (operator.sub, target, (sum, prevs))\n dsk[name_upper, j, i] = (_solve_triangular_lower, (name, j, j), target)\n dsk[name, i, j] = (np.transpose, (name_upper, j, i))\n\n graph_upper = HighLevelGraph.from_collections(name_upper, dsk, dependencies=[a])\n graph_lower = HighLevelGraph.from_collections(name, dsk, dependencies=[a])\n a_meta = meta_from_array(a)\n cho = np.linalg.cholesky(array_safe([[1, 2], [2, 5]], dtype=a.dtype, like=a_meta))\n meta = meta_from_array(a, dtype=cho.dtype)\n\n lower = Array(graph_lower, name, shape=a.shape, chunks=a.chunks, meta=meta)\n # do not use .T, because part of transposed blocks are already calculated\n upper = Array(graph_upper, name_upper, shape=a.shape, chunks=a.chunks, meta=meta)\n return lower, upper", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_norm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_norm_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1363, "end_line": 1440, "span_ids": ["norm"], "tokens": 763}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np.linalg)\ndef norm(x, ord=None, axis=None, keepdims=False):\n if axis is None:\n axis = tuple(range(x.ndim))\n elif isinstance(axis, Number):\n axis = (int(axis),)\n else:\n axis = tuple(axis)\n\n if len(axis) > 2:\n raise ValueError(\"Improper number of dimensions to norm.\")\n\n if ord == \"fro\":\n ord = None\n if len(axis) == 1:\n raise ValueError(\"Invalid norm order for vectors.\")\n\n # Coerce to double precision.\n r = x.astype(np.promote_types(x.dtype, float))\n\n if ord is None:\n r = (abs(r) ** 2).sum(axis=axis, keepdims=keepdims) ** 0.5\n elif ord == \"nuc\":\n if len(axis) == 1:\n raise ValueError(\"Invalid norm order for vectors.\")\n if x.ndim > 2:\n raise NotImplementedError(\"SVD based norm not implemented for ndim > 2\")\n\n r = svd(x)[1][None].sum(keepdims=keepdims)\n elif ord == np.inf:\n r = abs(r)\n if len(axis) == 1:\n r = r.max(axis=axis, keepdims=keepdims)\n else:\n r = r.sum(axis=axis[1], keepdims=True).max(axis=axis[0], keepdims=True)\n if keepdims is False:\n r = r.squeeze(axis=axis)\n elif ord == -np.inf:\n r = abs(r)\n if len(axis) == 1:\n r = r.min(axis=axis, keepdims=keepdims)\n else:\n r = r.sum(axis=axis[1], keepdims=True).min(axis=axis[0], keepdims=True)\n if keepdims is False:\n r = r.squeeze(axis=axis)\n elif ord == 0:\n if len(axis) == 2:\n raise ValueError(\"Invalid norm order for matrices.\")\n\n r = (r != 0).astype(r.dtype).sum(axis=axis, keepdims=keepdims)\n elif ord == 1:\n r = abs(r)\n if len(axis) == 1:\n r = r.sum(axis=axis, keepdims=keepdims)\n else:\n r = r.sum(axis=axis[0], keepdims=True).max(axis=axis[1], keepdims=True)\n if keepdims is False:\n r = r.squeeze(axis=axis)\n elif len(axis) == 2 and ord == -1:\n r = abs(r).sum(axis=axis[0], keepdims=True).min(axis=axis[1], keepdims=True)\n if keepdims is False:\n r = r.squeeze(axis=axis)\n elif len(axis) == 2 and ord == 2:\n if x.ndim > 2:\n raise NotImplementedError(\"SVD based norm not implemented for ndim > 2\")\n r = svd(x)[1][None].max(keepdims=keepdims)\n elif len(axis) == 2 and ord == -2:\n if x.ndim > 2:\n raise NotImplementedError(\"SVD based norm not implemented for ndim > 2\")\n r = svd(x)[1][None].min(keepdims=keepdims)\n else:\n if len(axis) == 2:\n raise ValueError(\"Invalid norm order for matrices.\")\n\n r = (abs(r) ** ord).sum(axis=axis, keepdims=keepdims) ** (1.0 / ord)\n\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_where_masked_where.return.blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_where_masked_where.return.blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 161, "end_line": 175, "span_ids": ["masked_where"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np.ma)\ndef masked_where(condition, a):\n cshape = getattr(condition, \"shape\", ())\n if cshape and cshape != a.shape:\n raise IndexError(\n \"Inconsistant shape between the condition and the \"\n \"input (got %s and %s)\" % (cshape, a.shape)\n )\n condition = asanyarray(condition)\n a = asanyarray(a)\n ainds = tuple(range(a.ndim))\n cinds = tuple(range(condition.ndim))\n return blockwise(\n np.ma.masked_where, ainds, condition, cinds, a, ainds, dtype=a.dtype\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_values__masked_array.return.np_ma_masked_array_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_values__masked_array.return.np_ma_masked_array_data_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 88, "end_line": 119, "span_ids": ["getmaskarray", "_masked_array", "masked_values", "getdata", "fix_invalid"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np.ma)\ndef masked_values(x, value, rtol=1e-05, atol=1e-08, shrink=True):\n x = asanyarray(x)\n if getattr(value, \"shape\", ()):\n raise ValueError(\"da.ma.masked_values doesn't support array `value`s\")\n return map_blocks(\n np.ma.masked_values, x, value, rtol=rtol, atol=atol, shrink=shrink\n )\n\n\n@derived_from(np.ma)\ndef fix_invalid(a, fill_value=None):\n a = asanyarray(a)\n return a.map_blocks(np.ma.fix_invalid, fill_value=fill_value)\n\n\n@derived_from(np.ma)\ndef getdata(a):\n a = asanyarray(a)\n return a.map_blocks(np.ma.getdata)\n\n\n@derived_from(np.ma)\ndef getmaskarray(a):\n a = asanyarray(a)\n return a.map_blocks(np.ma.getmaskarray)\n\n\ndef _masked_array(data, mask=np.ma.nomask, masked_dtype=None, **kwargs):\n if \"chunks\" in kwargs:\n del kwargs[\"chunks\"] # A Dask kwarg, not NumPy.\n return np.ma.masked_array(data, mask=mask, dtype=masked_dtype, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_array_masked_array.return.blockwise__masked_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_masked_array_masked_array.return.blockwise__masked_array_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 211, "end_line": 238, "span_ids": ["masked_array"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np.ma)\ndef masked_array(data, mask=np.ma.nomask, fill_value=None, **kwargs):\n data = asanyarray(data)\n inds = tuple(range(data.ndim))\n arginds = [inds, data, inds]\n\n if getattr(fill_value, \"shape\", ()):\n raise ValueError(\"non-scalar fill_value not supported\")\n kwargs[\"fill_value\"] = fill_value\n\n if mask is not np.ma.nomask:\n mask = asanyarray(mask)\n if mask.size == 1:\n mask = mask.reshape((1,) * data.ndim)\n elif data.shape != mask.shape:\n raise np.ma.MaskError(\n \"Mask and data not compatible: data shape \"\n \"is %s, and mask shape is \"\n \"%s.\" % (repr(data.shape), repr(mask.shape))\n )\n arginds.extend([mask, inds])\n\n if \"dtype\" in kwargs:\n kwargs[\"masked_dtype\"] = kwargs[\"dtype\"]\n else:\n kwargs[\"dtype\"] = data.dtype\n\n return blockwise(_masked_array, *arginds, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__set_fill_value_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py__set_fill_value_", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 242, "end_line": 263, "span_ids": ["_set_fill_value", "average", "set_fill_value"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _set_fill_value(x, fill_value):\n if isinstance(x, np.ma.masked_array):\n x = x.copy()\n np.ma.set_fill_value(x, fill_value=fill_value)\n return x\n\n\n@derived_from(np.ma)\ndef set_fill_value(a, fill_value):\n a = asanyarray(a)\n if getattr(fill_value, \"shape\", ()):\n raise ValueError(\"da.ma.set_fill_value doesn't support array `value`s\")\n fill_value = np.ma.core._check_fill_value(fill_value, a.dtype)\n res = a.map_blocks(_set_fill_value, fill_value)\n a.dask = res.dask\n a._name = res.name\n\n\n@derived_from(np.ma)\ndef average(a, axis=None, weights=None, returned=False):\n return _average(a, axis, weights, returned, is_masked=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser__Recurser.map_reduce.return.f_x_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser__Recurser.map_reduce.return.f_x_kwargs_", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 49, "end_line": 109, "span_ids": ["_Recurser.__init__", "_Recurser", "_Recurser.map_reduce"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Recurser:\n \"\"\"\n Utility class for recursing over nested iterables\n \"\"\"\n\n # This was copied almost verbatim from numpy.core.shape_base._Recurser\n\n def __init__(self, recurse_if):\n self.recurse_if = recurse_if\n\n def map_reduce(\n self,\n x,\n f_map=lambda x, **kwargs: x,\n f_reduce=lambda x, **kwargs: x,\n f_kwargs=lambda **kwargs: kwargs,\n **kwargs,\n ):\n \"\"\"\n Iterate over the nested list, applying:\n * ``f_map`` (T -> U) to items\n * ``f_reduce`` (Iterable[U] -> U) to mapped items\n\n For instance, ``map_reduce([[1, 2], 3, 4])`` is::\n\n f_reduce([\n f_reduce([\n f_map(1),\n f_map(2)\n ]),\n f_map(3),\n f_map(4)\n ]])\n\n\n State can be passed down through the calls with `f_kwargs`,\n to iterables of mapped items. When kwargs are passed, as in\n ``map_reduce([[1, 2], 3, 4], **kw)``, this becomes::\n\n kw1 = f_kwargs(**kw)\n kw2 = f_kwargs(**kw1)\n f_reduce([\n f_reduce([\n f_map(1), **kw2)\n f_map(2, **kw2)\n ], **kw1),\n f_map(3, **kw1),\n f_map(4, **kw1)\n ]], **kw)\n \"\"\"\n\n def f(x, **kwargs):\n if not self.recurse_if(x):\n return f_map(x, **kwargs)\n else:\n next_kwargs = f_kwargs(**kwargs)\n return f_reduce((f(xi, **next_kwargs) for xi in x), **kwargs)\n\n return f(x, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_taken_di_moveaxis.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_taken_di_moveaxis.return.result", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 131, "end_line": 151, "span_ids": ["moveaxis", "_Recurser.walk"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Implementation taken directly from numpy:\n# https://github.com/numpy/numpy/blob/d9b1e32cb8ef90d6b4a47853241db2a28146a57d/numpy/core/numeric.py#L1336-L1405\n@derived_from(np)\ndef moveaxis(a, source, destination):\n source = np.core.numeric.normalize_axis_tuple(source, a.ndim, \"source\")\n destination = np.core.numeric.normalize_axis_tuple(\n destination, a.ndim, \"destination\"\n )\n if len(source) != len(destination):\n raise ValueError(\n \"`source` and `destination` arguments must have \"\n \"the same number of elements\"\n )\n\n order = [n for n in range(a.ndim) if n not in source]\n\n for dest, src in sorted(zip(destination, source)):\n order.insert(dest, src)\n\n result = a.transpose(order)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_from_itertools_import_zip_GETNOREMOVE._getter_getter_nofancy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_from_itertools_import_zip_GETNOREMOVE._getter_getter_nofancy_", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["imports"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from itertools import zip_longest\nfrom numbers import Integral\n\nimport numpy as np\n\nfrom .. import config\nfrom ..blockwise import fuse_roots, optimize_blockwise\nfrom ..core import flatten, reverse_dict\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..optimization import fuse, inline_functions\nfrom ..utils import ensure_dict\nfrom .chunk import getitem\nfrom .core import getter, getter_inline, getter_nofancy\n\n# All get* functions the optimizations know about\nGETTERS = (getter, getter_nofancy, getter_inline, getitem)\n# These get* functions aren't ever completely removed from the graph,\n# even if the index should be a no-op by numpy semantics. Some array-like's\n# don't completely follow semantics, making indexing always necessary.\nGETNOREMOVE = (getter, getter_nofancy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_hold_keys_hold_keys.return.hold_keys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_hold_keys_hold_keys.return.hold_keys", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 73, "end_line": 108, "span_ids": ["hold_keys"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def hold_keys(dsk, dependencies):\n \"\"\"Find keys to avoid fusion\n\n We don't want to fuse data present in the graph because it is easier to\n serialize as a raw value.\n\n We don't want to fuse chains after getitem/GETTERS because we want to\n move around only small pieces of data, rather than the underlying arrays.\n \"\"\"\n dependents = reverse_dict(dependencies)\n data = {k for k, v in dsk.items() if type(v) not in (tuple, str)}\n\n hold_keys = list(data)\n for dat in data:\n deps = dependents[dat]\n for dep in deps:\n task = dsk[dep]\n # If the task is a get* function, we walk up the chain, and stop\n # when there's either more than one dependent, or the dependent is\n # no longer a get* function or an alias. We then add the final\n # key to the list of keys not to fuse.\n if type(task) is tuple and task and task[0] in GETTERS:\n try:\n while len(dependents[dep]) == 1:\n new_dep = next(iter(dependents[dep]))\n new_task = dsk[new_dep]\n # If the task is a get* or an alias, continue up the\n # linear chain\n if new_task[0] in GETTERS or new_task in dsk:\n dep = new_dep\n else:\n break\n except (IndexError, TypeError):\n pass\n hold_keys.append(dep)\n return hold_keys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_slices_optimize_slices.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_slices_optimize_slices.return.dsk", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 111, "end_line": 195, "span_ids": ["optimize_slices"], "tokens": 758}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize_slices(dsk):\n \"\"\"Optimize slices\n\n 1. Fuse repeated slices, like x[5:][2:6] -> x[7:11]\n 2. Remove full slices, like x[:] -> x\n\n See also:\n fuse_slice_dict\n \"\"\"\n fancy_ind_types = (list, np.ndarray)\n dsk = dsk.copy()\n for k, v in dsk.items():\n if type(v) is tuple and v[0] in GETTERS and len(v) in (3, 5):\n if len(v) == 3:\n get, a, a_index = v\n # getter defaults to asarray=True, getitem is semantically False\n a_asarray = get is not getitem\n a_lock = None\n else:\n get, a, a_index, a_asarray, a_lock = v\n while type(a) is tuple and a[0] in GETTERS and len(a) in (3, 5):\n if len(a) == 3:\n f2, b, b_index = a\n b_asarray = f2 is not getitem\n b_lock = None\n else:\n f2, b, b_index, b_asarray, b_lock = a\n\n if a_lock and a_lock is not b_lock:\n break\n if (type(a_index) is tuple) != (type(b_index) is tuple):\n break\n if type(a_index) is tuple:\n indices = b_index + a_index\n if len(a_index) != len(b_index) and any(i is None for i in indices):\n break\n if f2 is getter_nofancy and any(\n isinstance(i, fancy_ind_types) for i in indices\n ):\n break\n elif f2 is getter_nofancy and (\n type(a_index) in fancy_ind_types or type(b_index) in fancy_ind_types\n ):\n break\n try:\n c_index = fuse_slice(b_index, a_index)\n # rely on fact that nested gets never decrease in\n # strictness e.g. `(getter_nofancy, (getter, ...))` never\n # happens\n get = getter if f2 is getter_inline else f2\n except NotImplementedError:\n break\n a, a_index, a_lock = b, c_index, b_lock\n a_asarray |= b_asarray\n\n # Skip the get call if not from from_array and nothing to do\n if get not in GETNOREMOVE and (\n (\n type(a_index) is slice\n and not a_index.start\n and a_index.stop is None\n and a_index.step is None\n )\n or (\n type(a_index) is tuple\n and all(\n type(s) is slice\n and not s.start\n and s.stop is None\n and s.step is None\n for s in a_index\n )\n )\n ):\n dsk[k] = a\n elif get is getitem or (a_asarray and not a_lock):\n # default settings are fine, drop the extra parameters Since we\n # always fallback to inner `get` functions, `get is getitem`\n # can only occur if all gets are getitem, meaning all\n # parameters must be getitem defaults.\n dsk[k] = (get, a, a_index)\n else:\n dsk[k] = (get, a, a_index, a_asarray, a_lock)\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_normalize_slice_check_for_nonfusible_fancy_indexing.for_f_n_in_zip_longest_f.if_type_f_is_not_list_an.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_normalize_slice_check_for_nonfusible_fancy_indexing.for_f_n_in_zip_longest_f.if_type_f_is_not_list_an.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 198, "end_line": 227, "span_ids": ["check_for_nonfusible_fancy_indexing", "normalize_slice"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_slice(s):\n \"\"\"Replace Nones in slices with integers\n\n >>> normalize_slice(slice(None, None, None))\n slice(0, None, 1)\n \"\"\"\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)\n\n\ndef check_for_nonfusible_fancy_indexing(fancy, normal):\n # Check for fancy indexing and normal indexing, where the fancy\n # indexed dimensions != normal indexed dimensions with integers. E.g.:\n # disallow things like:\n # x[:, [1, 2], :][0, :, :] -> x[0, [1, 2], :] or\n # x[0, :, :][:, [1, 2], :] -> x[0, [1, 2], :]\n for f, n in zip_longest(fancy, normal, fillvalue=slice(None)):\n if type(f) is not list and isinstance(n, Integral):\n raise NotImplementedError(\n \"Can't handle normal indexing with \"\n \"integers and fancy indexing if the \"\n \"integers and fancy indices don't \"\n \"align with the same dimensions.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice_fuse_slice._and_newaxes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice_fuse_slice._and_newaxes", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 230, "end_line": 298, "span_ids": ["fuse_slice"], "tokens": 581}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse_slice(a, b):\n \"\"\"Fuse stacked slices together\n\n Fuse a pair of repeated slices into a single slice:\n\n >>> fuse_slice(slice(1000, 2000), slice(10, 15))\n slice(1010, 1015, None)\n\n This also works for tuples of slices\n\n >>> fuse_slice((slice(100, 200), slice(100, 200, 10)),\n ... (slice(10, 15), [5, 2]))\n (slice(110, 115, None), [150, 120])\n\n And a variety of other interesting cases\n\n >>> fuse_slice(slice(1000, 2000), 10) # integers\n 1010\n\n >>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2))\n slice(1050, 1100, 10)\n\n >>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists\n [1005, 1010, 1015]\n\n >>> fuse_slice(None, slice(None, None)) # doctest: +SKIP\n None\n \"\"\"\n # None only works if the second side is a full slice\n if a is None and isinstance(b, slice) and b == slice(None, None):\n return None\n\n # Replace None with 0 and one in start and step\n if isinstance(a, slice):\n a = normalize_slice(a)\n if isinstance(b, slice):\n b = normalize_slice(b)\n\n if isinstance(a, slice) and isinstance(b, Integral):\n if b < 0:\n raise NotImplementedError()\n return a.start + b * a.step\n\n if isinstance(a, slice) and isinstance(b, slice):\n start = a.start + a.step * b.start\n if b.stop is not None:\n stop = a.start + a.step * b.stop\n else:\n stop = None\n if a.stop is not None:\n if stop is not None:\n stop = min(a.stop, stop)\n else:\n stop = a.stop\n step = a.step * b.step\n if step == 1:\n step = None\n return slice(start, stop, step)\n\n if isinstance(b, list):\n return [fuse_slice(a, bb) for bb in b]\n if isinstance(a, list) and isinstance(b, (Integral, slice)):\n return a[b]\n\n if isinstance(a, tuple) and not isinstance(b, tuple):\n b = (b,)\n\n # If given two tuples walk through both, being mindful of uneven sizes\n # and newaxes\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice.None_8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_fuse_slice.None_8_", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 299, "end_line": 328, "span_ids": ["fuse_slice"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse_slice(a, b):\n # ... other code\n if isinstance(a, tuple) and isinstance(b, tuple):\n\n # Check for non-fusible cases with fancy-indexing\n a_has_lists = any(isinstance(item, list) for item in a)\n b_has_lists = any(isinstance(item, list) for item in b)\n if a_has_lists and b_has_lists:\n raise NotImplementedError(\"Can't handle multiple list indexing\")\n elif a_has_lists:\n check_for_nonfusible_fancy_indexing(a, b)\n elif b_has_lists:\n check_for_nonfusible_fancy_indexing(b, a)\n\n j = 0\n result = list()\n for i in range(len(a)):\n # axis ceased to exist or we're out of b\n if isinstance(a[i], Integral) or j == len(b):\n result.append(a[i])\n continue\n while b[j] is None: # insert any Nones on the rhs\n result.append(None)\n j += 1\n result.append(fuse_slice(a[i], b[j])) # Common case\n j += 1\n while j < len(b): # anything leftover on the right?\n result.append(b[j])\n j += 1\n return tuple(result)\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py__trim__trim.return.x_ind_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py__trim__trim.return.x_ind_", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 245, "end_line": 278, "span_ids": ["_trim"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _trim(x, axes, boundary, block_info):\n \"\"\"Similar to dask.array.chunk.trim but requires one to specificy the\n boundary condition.\n\n ``axes``, and ``boundary`` are assumed to have been coerced.\n\n \"\"\"\n axes = [axes.get(i, 0) for i in range(x.ndim)]\n axes_front = (ax[0] if isinstance(ax, tuple) else ax for ax in axes)\n axes_back = (\n -ax[1]\n if isinstance(ax, tuple) and ax[1]\n else -ax\n if isinstance(ax, Integral) and ax\n else None\n for ax in axes\n )\n\n trim_front = (\n 0 if (chunk_location == 0 and boundary.get(i, \"none\") == \"none\") else ax\n for i, (chunk_location, ax) in enumerate(\n zip(block_info[0][\"chunk-location\"], axes_front)\n )\n )\n trim_back = (\n None\n if (chunk_location == chunks - 1 and boundary.get(i, \"none\") == \"none\")\n else ax\n for i, (chunks, chunk_location, ax) in enumerate(\n zip(block_info[0][\"num-chunks\"], block_info[0][\"chunk-location\"], axes_back)\n )\n )\n ind = tuple(slice(front, back) for front, back in zip(trim_front, trim_back))\n return x[ind]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_periodic_periodic.return.concatenate_r_x_l_ax": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_periodic_periodic.return.concatenate_r_x_l_ax", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 281, "end_line": 302, "span_ids": ["periodic"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def periodic(x, axis, depth):\n \"\"\"Copy a slice of an array around to its other side\n\n Useful to create periodic boundary conditions for overlap\n \"\"\"\n\n left = (\n (slice(None, None, None),) * axis\n + (slice(0, depth),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n right = (\n (slice(None, None, None),) * axis\n + (slice(-depth, None),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n l = x[left]\n r = x[right]\n\n l, r = _remove_overlap_boundaries(l, r, axis, depth)\n\n return concatenate([r, x, l], axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_reflect_reflect.return.concatenate_l_x_r_ax": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_reflect_reflect.return.concatenate_l_x_r_ax", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 305, "end_line": 332, "span_ids": ["reflect"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reflect(x, axis, depth):\n \"\"\"Reflect boundaries of array on the same side\n\n This is the converse of ``periodic``\n \"\"\"\n if depth == 1:\n left = (\n (slice(None, None, None),) * axis\n + (slice(0, 1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n else:\n left = (\n (slice(None, None, None),) * axis\n + (slice(depth - 1, None, -1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n right = (\n (slice(None, None, None),) * axis\n + (slice(-1, -depth - 1, -1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n l = x[left]\n r = x[right]\n\n l, r = _remove_overlap_boundaries(l, r, axis, depth)\n\n return concatenate([l, x, r], axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_nearest_nearest.return.concatenate_l_x_r_ax": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_nearest_nearest.return.concatenate_l_x_r_ax", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 335, "end_line": 357, "span_ids": ["nearest"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def nearest(x, axis, depth):\n \"\"\"Each reflect each boundary value outwards\n\n This mimics what the skimage.filters.gaussian_filter(... mode=\"nearest\")\n does.\n \"\"\"\n left = (\n (slice(None, None, None),) * axis\n + (slice(0, 1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n right = (\n (slice(None, None, None),) * axis\n + (slice(-1, -2, -1),)\n + (slice(None, None, None),) * (x.ndim - axis - 1)\n )\n\n l = concatenate([x[left]] * depth, axis=axis)\n r = concatenate([x[right]] * depth, axis=axis)\n\n l, r = _remove_overlap_boundaries(l, r, axis, depth)\n\n return concatenate([l, x, r], axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_constant__remove_overlap_boundaries.return.l_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_constant__remove_overlap_boundaries.return.l_r", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 250, "end_line": 274, "span_ids": ["constant", "_remove_overlap_boundaries"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def constant(x, axis, depth, value):\n \"\"\"Add constant slice to either side of array\"\"\"\n chunks = list(x.chunks)\n chunks[axis] = (depth,)\n\n c = full_like(\n x,\n value,\n shape=tuple(map(sum, chunks)),\n chunks=tuple(chunks),\n dtype=x.dtype,\n )\n\n return concatenate([c, x, c], axis=axis)\n\n\ndef _remove_overlap_boundaries(l, r, axis, depth):\n lchunks = list(l.chunks)\n lchunks[axis] = (depth,)\n rchunks = list(r.chunks)\n rchunks[axis] = (depth,)\n\n l = l.rechunk(tuple(lchunks))\n r = r.rechunk(tuple(rchunks))\n return l, r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_boundaries_boundaries.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_boundaries_boundaries.return.x", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 277, "end_line": 307, "span_ids": ["boundaries"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def boundaries(x, depth=None, kind=None):\n \"\"\"Add boundary conditions to an array before overlaping\n\n See Also\n --------\n periodic\n constant\n \"\"\"\n if not isinstance(kind, dict):\n kind = {i: kind for i in range(x.ndim)}\n if not isinstance(depth, dict):\n depth = {i: depth for i in range(x.ndim)}\n\n for i in range(x.ndim):\n d = depth.get(i, 0)\n if d == 0:\n continue\n\n this_kind = kind.get(i, \"none\")\n if this_kind == \"none\":\n continue\n elif this_kind == \"periodic\":\n x = periodic(x, i, d)\n elif this_kind == \"reflect\":\n x = reflect(x, i, d)\n elif this_kind == \"nearest\":\n x = nearest(x, i, d)\n elif i in kind:\n x = constant(x, i, d, kind[i])\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_overlap._Share_boundaries_betwe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_overlap._Share_boundaries_betwe", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 472, "end_line": 526, "span_ids": ["overlap"], "tokens": 1056}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def overlap(x, depth, boundary):\n \"\"\"Share boundaries between neighboring blocks\n\n Parameters\n ----------\n\n x: da.Array\n A dask array\n depth: dict\n The size of the shared boundary per axis\n boundary: dict\n The boundary condition on each axis. Options are 'reflect', 'periodic',\n 'nearest', 'none', or an array value. Such a value will fill the\n boundary with that value.\n\n The depth input informs how many cells to overlap between neighboring\n blocks ``{0: 2, 2: 5}`` means share two cells in 0 axis, 5 cells in 2 axis.\n Axes missing from this input will not be overlapped.\n\n Any axis containing chunks smaller than depth will be rechunked if\n possible.\n\n Examples\n --------\n >>> import numpy as np\n >>> import dask.array as da\n\n >>> x = np.arange(64).reshape((8, 8))\n >>> d = da.from_array(x, chunks=(4, 4))\n >>> d.chunks\n ((4, 4), (4, 4))\n\n >>> g = da.overlap.overlap(d, depth={0: 2, 1: 1},\n ... boundary={0: 100, 1: 'reflect'})\n >>> g.chunks\n ((8, 8), (6, 6))\n\n >>> np.array(g)\n array([[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [ 0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],\n [ 8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],\n [ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [ 48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],\n [ 56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]])\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap.depth2_overlap.return.x4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap.depth2_overlap.return.x4", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 416, "end_line": 432, "span_ids": ["overlap"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def overlap(x, depth, boundary):\n depth2 = coerce_depth(x.ndim, depth)\n boundary2 = coerce_boundary(x.ndim, boundary)\n\n # rechunk if new chunks are needed to fit depth in every chunk\n depths = [max(d) if isinstance(d, tuple) else d for d in depth2.values()]\n new_chunks = tuple(\n ensure_minimum_chunksize(size, c) for size, c in zip(depths, x.chunks)\n )\n x1 = x.rechunk(new_chunks) # this is a no-op if x.chunks == new_chunks\n\n x2 = boundaries(x1, depth2, boundary2)\n x3 = overlap_internal(x2, depth2)\n trim = {\n k: v * 2 if boundary2.get(k, \"none\") != \"none\" else 0 for k, v in depth2.items()\n }\n x4 = chunk.trim(x3, trim)\n return x4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile_percentile._Allow_using_t_digest_if": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile_percentile._Allow_using_t_digest_if", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 67, "end_line": 151, "span_ids": ["percentile"], "tokens": 739}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def percentile(a, q, method=\"linear\", internal_method=\"default\", **kwargs):\n \"\"\"Approximate percentile of 1-D array\n\n Parameters\n ----------\n a : Array\n q : array_like of float\n Percentile or sequence of percentiles to compute, which must be between\n 0 and 100 inclusive.\n method : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional\n The interpolation method to use when the desired percentile lies\n between two data points ``i < j``. Only valid for ``method='dask'``.\n\n - 'linear': ``i + (j - i) * fraction``, where ``fraction``\n is the fractional part of the index surrounded by ``i``\n and ``j``.\n - 'lower': ``i``.\n - 'higher': ``j``.\n - 'nearest': ``i`` or ``j``, whichever is nearest.\n - 'midpoint': ``(i + j) / 2``.\n\n .. versionchanged:: 2022.1.0\n This argument was previously called \"interpolation\"\n\n internal_method : {'default', 'dask', 'tdigest'}, optional\n What internal method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for\n floats and ints and fallback to the ``'dask'`` otherwise.\n\n .. versionchanged:: 2022.1.0\n This argument was previously called \u201cmethod\u201d.\n\n interpolation : str, optional\n Deprecated name for the method keyword argument.\n\n .. deprecated:: 2022.1.0\n\n See Also\n --------\n numpy.percentile : Numpy's equivalent Percentile function\n \"\"\"\n from .dispatch import percentile_lookup as _percentile\n from .utils import array_safe, meta_from_array\n\n allowed_internal_methods = [\"default\", \"dask\", \"tdigest\"]\n\n if method in allowed_internal_methods:\n warnings.warn(\n \"In Dask 2022.1.0, the `method=` argument was renamed to `internal_method=`\",\n FutureWarning,\n )\n internal_method = method\n\n if \"interpolation\" in kwargs:\n if _numpy_122:\n warnings.warn(\n \"In Dask 2022.1.0, the `interpolation=` argument to percentile was renamed to \"\n \"`method= ` \",\n FutureWarning,\n )\n method = kwargs.pop(\"interpolation\")\n\n if kwargs:\n raise TypeError(\n f\"percentile() got an unexpected keyword argument {kwargs.keys()}\"\n )\n\n if not a.ndim == 1:\n raise NotImplementedError(\"Percentiles only implemented for 1-d arrays\")\n if isinstance(q, Number):\n q = [q]\n q = array_safe(q, like=meta_from_array(a))\n token = tokenize(a, q, method)\n\n dtype = a.dtype\n if np.issubdtype(dtype, np.integer):\n dtype = (array_safe([], dtype=dtype, like=meta_from_array(a)) / 0.5).dtype\n meta = meta_from_array(a, dtype=dtype)\n\n if internal_method not in allowed_internal_methods:\n raise ValueError(\n f\"`internal_method=` must be one of {allowed_internal_methods}\"\n )\n\n # Allow using t-digest if method is allowed and dtype is of floating or integer type\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile.if__percentile.return.Array_graph_name2_chunk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_percentile.if__percentile.return.Array_graph_name2_chunk", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 197, "span_ids": ["percentile"], "tokens": 400}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def percentile(a, q, method=\"linear\", internal_method=\"default\", **kwargs):\n # ... other code\n if (\n internal_method == \"tdigest\"\n and method == \"linear\"\n and (np.issubdtype(dtype, np.floating) or np.issubdtype(dtype, np.integer))\n ):\n\n from dask.utils import import_required\n\n import_required(\n \"crick\", \"crick is a required dependency for using the t-digest method.\"\n )\n\n name = \"percentile_tdigest_chunk-\" + token\n dsk = {\n (name, i): (_tdigest_chunk, key) for i, key in enumerate(a.__dask_keys__())\n }\n\n name2 = \"percentile_tdigest-\" + token\n\n dsk2 = {(name2, 0): (_percentiles_from_tdigest, q, sorted(dsk))}\n\n # Otherwise use the custom percentile algorithm\n else:\n # Add 0 and 100 during calculation for more robust behavior (hopefully)\n calc_q = np.pad(q, 1, mode=\"constant\")\n calc_q[-1] = 100\n name = \"percentile_chunk-\" + token\n dsk = {\n (name, i): (_percentile, key, calc_q, method)\n for i, key in enumerate(a.__dask_keys__())\n }\n\n name2 = \"percentile-\" + token\n dsk2 = {\n (name2, 0): (\n merge_percentiles,\n q,\n [calc_q] * len(a.chunks[0]),\n sorted(dsk),\n method,\n )\n }\n\n dsk = merge(dsk, dsk2)\n graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[a])\n return Array(graph, name2, chunks=((len(q),),), meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._derived_from_np_random_RandomState.multinomial.return.self__wrap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._derived_from_np_random_RandomState.multinomial.return.self__wrap_", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 286, "end_line": 340, "span_ids": ["RandomState.f", "RandomState.multinomial", "RandomState.lognormal", "RandomState.hypergeometric", "RandomState.logistic", "RandomState:3", "RandomState.gamma", "RandomState.gumbel", "RandomState.exponential", "RandomState.logseries", "RandomState.laplace", "RandomState.geometric"], "tokens": 725}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState:\n\n # @derived_from(np.random.RandomState, skipblocks=1)\n # def dirichlet(self, alpha, size=None, chunks=\"auto\"):\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def exponential(self, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"exponential\", scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def f(self, dfnum, dfden, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"f\", dfnum, dfden, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def gamma(self, shape, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"gamma\", shape, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def geometric(self, p, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"geometric\", p, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def gumbel(self, loc=0.0, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"gumbel\", loc, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def hypergeometric(self, ngood, nbad, nsample, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"hypergeometric\", ngood, nbad, nsample, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def laplace(self, loc=0.0, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"laplace\", loc, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def logistic(self, loc=0.0, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"logistic\", loc, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def lognormal(self, mean=0.0, sigma=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"lognormal\", mean, sigma, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def logseries(self, p, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"logseries\", p, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def multinomial(self, n, pvals, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"multinomial\",\n n,\n pvals,\n size=size,\n chunks=chunks,\n extra_chunks=((len(pvals),),),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.negative_binomial_RandomState.rayleigh.return.self__wrap_rayleigh_sc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.negative_binomial_RandomState.rayleigh.return.self__wrap_rayleigh_sc", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 342, "end_line": 405, "span_ids": ["RandomState.pareto", "RandomState.noncentral_chisquare", "RandomState.noncentral_f", "RandomState:4", "RandomState.normal", "RandomState.permutation", "RandomState.power", "RandomState.randint", "RandomState.rayleigh", "RandomState.random_sample", "RandomState.random_integers", "RandomState.poisson", "RandomState.negative_binomial"], "tokens": 747}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState:\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def negative_binomial(self, n, p, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"negative_binomial\", n, p, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def noncentral_chisquare(self, df, nonc, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"noncentral_chisquare\", df, nonc, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def noncentral_f(self, dfnum, dfden, nonc, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"noncentral_f\", dfnum, dfden, nonc, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def normal(self, loc=0.0, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"normal\", loc, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def pareto(self, a, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"pareto\", a, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def permutation(self, x):\n from .slicing import shuffle_slice\n\n if isinstance(x, numbers.Number):\n x = arange(x, chunks=\"auto\")\n\n index = np.arange(len(x))\n self._numpy_state.shuffle(index)\n return shuffle_slice(x, index)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def poisson(self, lam=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"poisson\", lam, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def power(self, a, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"power\", a, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def randint(self, low, high=None, size=None, chunks=\"auto\", dtype=\"l\", **kwargs):\n return self._wrap(\n \"randint\", low, high, size=size, chunks=chunks, dtype=dtype, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def random_integers(self, low, high=None, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"random_integers\", low, high, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def random_sample(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"random_sample\", size=size, chunks=chunks, **kwargs)\n\n random = random_sample\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def rayleigh(self, scale=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"rayleigh\", scale, size=size, chunks=chunks, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.standard_cauchy_RandomState.zipf.return.self__wrap_zipf_a_siz": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.standard_cauchy_RandomState.zipf.return.self__wrap_zipf_a_siz", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 407, "end_line": 455, "span_ids": ["RandomState.tomaxint", "RandomState.standard_t", "RandomState.triangular", "RandomState.weibull", "RandomState.standard_gamma", "RandomState.standard_cauchy", "RandomState.standard_normal", "RandomState.vonmises", "RandomState.zipf", "RandomState.uniform", "RandomState.standard_exponential", "RandomState.wald"], "tokens": 664}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState:\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_cauchy(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_cauchy\", size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_exponential(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_exponential\", size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_gamma(self, shape, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_gamma\", shape, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_normal(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_normal\", size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def standard_t(self, df, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"standard_t\", df, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def tomaxint(self, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"tomaxint\", size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def triangular(self, left, mode, right, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\n \"triangular\", left, mode, right, size=size, chunks=chunks, **kwargs\n )\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def uniform(self, low=0.0, high=1.0, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"uniform\", low, high, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def vonmises(self, mu, kappa, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"vonmises\", mu, kappa, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def wald(self, mean, scale, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"wald\", mean, scale, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def weibull(self, a, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"weibull\", a, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def zipf(self, a, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"zipf\", a, size=size, chunks=chunks, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py__choice_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py__choice_", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 457, "end_line": 522, "span_ids": ["_apply_random", "impl", "_choice"], "tokens": 468}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _choice(state_data, a, size, replace, p):\n state = np.random.RandomState(state_data)\n return state.choice(a, size=size, replace=replace, p=p)\n\n\ndef _apply_random(RandomState, funcname, state_data, size, args, kwargs):\n \"\"\"Apply RandomState method with seed\"\"\"\n if RandomState is None:\n RandomState = np.random.RandomState\n state = RandomState(state_data)\n func = getattr(state, funcname)\n return func(*args, size=size, **kwargs)\n\n\n_state = RandomState()\n\n\nseed = _state.seed\n\n\nbeta = _state.beta\nbinomial = _state.binomial\nchisquare = _state.chisquare\nif hasattr(_state, \"choice\"):\n choice = _state.choice\nexponential = _state.exponential\nf = _state.f\ngamma = _state.gamma\ngeometric = _state.geometric\ngumbel = _state.gumbel\nhypergeometric = _state.hypergeometric\nlaplace = _state.laplace\nlogistic = _state.logistic\nlognormal = _state.lognormal\nlogseries = _state.logseries\nmultinomial = _state.multinomial\nnegative_binomial = _state.negative_binomial\nnoncentral_chisquare = _state.noncentral_chisquare\nnoncentral_f = _state.noncentral_f\nnormal = _state.normal\npareto = _state.pareto\npermutation = _state.permutation\npoisson = _state.poisson\npower = _state.power\nrayleigh = _state.rayleigh\nrandom_sample = _state.random_sample\nrandom = random_sample\nrandint = _state.randint\nrandom_integers = _state.random_integers\ntriangular = _state.triangular\nuniform = _state.uniform\nvonmises = _state.vonmises\nwald = _state.wald\nweibull = _state.weibull\nzipf = _state.zipf\n\n\"\"\"\nStandard distributions\n\"\"\"\n\nstandard_cauchy = _state.standard_cauchy\nstandard_exponential = _state.standard_exponential\nstandard_gamma = _state.standard_gamma\nstandard_normal = _state.standard_normal\nstandard_t = _state.standard_t", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_cumdims_label_cumdims_label.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_cumdims_label_cumdims_label.return._", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 28, "end_line": 38, "span_ids": ["cumdims_label"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cumdims_label(chunks, const):\n \"\"\"Internal utility for cumulative sum with label.\n\n >>> cumdims_label(((5, 3, 3), (2, 2, 1)), 'n') # doctest: +NORMALIZE_WHITESPACE\n [(('n', 0), ('n', 5), ('n', 8), ('n', 11)),\n (('n', 0), ('n', 2), ('n', 4), ('n', 5))]\n \"\"\"\n return [\n tuple(zip((const,) * (1 + len(bds)), accumulate(add, (0,) + bds)))\n for bds in chunks\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__breakpoints__breakpoints.return.tuple_sorted_cumold_cum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__breakpoints__breakpoints.return.tuple_sorted_cumold_cum", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 41, "end_line": 52, "span_ids": ["_breakpoints"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _breakpoints(cumold, cumnew):\n \"\"\"\n\n >>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')\n >>> old = cumdims_label(((2, 2, 1), (5,)), 'o')\n\n >>> _breakpoints(new[0], old[0])\n (('n', 0), ('o', 0), ('n', 2), ('o', 2), ('o', 4), ('n', 5), ('o', 5))\n >>> _breakpoints(new[1], old[1])\n (('n', 0), ('o', 0), ('n', 2), ('n', 4), ('n', 5), ('o', 5))\n \"\"\"\n return tuple(sorted(cumold + cumnew, key=itemgetter(1)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__old_to_new__old_to_new.return.old_to_new": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__old_to_new__old_to_new.return.old_to_new", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 116, "end_line": 157, "span_ids": ["_old_to_new"], "tokens": 491}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _old_to_new(old_chunks, new_chunks):\n \"\"\"Helper to build old_chunks to new_chunks.\n\n Handles missing values, as long as the missing dimension\n is unchanged.\n\n Examples\n --------\n >>> old = ((10, 10, 10, 10, 10), )\n >>> new = ((25, 5, 20), )\n >>> _old_to_new(old, new) # doctest: +NORMALIZE_WHITESPACE\n [[[(0, slice(0, 10, None)), (1, slice(0, 10, None)), (2, slice(0, 5, None))],\n [(2, slice(5, 10, None))],\n [(3, slice(0, 10, None)), (4, slice(0, 10, None))]]]\n \"\"\"\n old_known = [x for x in old_chunks if not any(math.isnan(y) for y in x)]\n new_known = [x for x in new_chunks if not any(math.isnan(y) for y in x)]\n\n n_missing = [sum(math.isnan(y) for y in x) for x in old_chunks]\n n_missing2 = [sum(math.isnan(y) for y in x) for x in new_chunks]\n\n cmo = cumdims_label(old_known, \"o\")\n cmn = cumdims_label(new_known, \"n\")\n\n sums = [sum(o) for o in old_known]\n sums2 = [sum(n) for n in new_known]\n\n if not sums == sums2:\n raise ValueError(f\"Cannot change dimensions from {sums!r} to {sums2!r}\")\n if not n_missing == n_missing2:\n raise ValueError(\n \"Chunks must be unchanging along unknown dimensions.\\n\\n\"\n \"A possible solution:\\n x.compute_chunk_sizes()\"\n )\n\n old_to_new = [_intersect_1d(_breakpoints(cm[0], cm[1])) for cm in zip(cmo, cmn)]\n for idx, missing in enumerate(n_missing):\n if missing:\n # Missing dimensions are always unchanged, so old -> new is everything\n extra = [[(i, slice(0, None))] for i in range(missing)]\n old_to_new.insert(idx, extra)\n return old_to_new", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_intersect_chunks_intersect_chunks.return.cross": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_intersect_chunks_intersect_chunks.return.cross", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 158, "end_line": 182, "span_ids": ["intersect_chunks"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def intersect_chunks(old_chunks, new_chunks):\n \"\"\"\n Make dask.array slices as intersection of old and new chunks.\n\n >>> intersections = intersect_chunks(((4, 4), (2,)),\n ... ((8,), (1, 1)))\n >>> list(intersections) # doctest: +NORMALIZE_WHITESPACE\n [(((0, slice(0, 4, None)), (0, slice(0, 1, None))),\n ((1, slice(0, 4, None)), (0, slice(0, 1, None)))),\n (((0, slice(0, 4, None)), (0, slice(1, 2, None))),\n ((1, slice(0, 4, None)), (0, slice(1, 2, None))))]\n\n Parameters\n ----------\n\n old_chunks : iterable of tuples\n block sizes along each dimension (convert from old_chunks)\n new_chunks: iterable of tuples\n block sizes along each dimension (converts to new_chunks)\n \"\"\"\n old_to_new = _old_to_new(old_chunks, new_chunks)\n\n cross1 = product(*old_to_new)\n cross = chain(tuple(product(*cr)) for cr in cross1)\n return cross", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_rechunk_rechunk.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_rechunk_rechunk.return.x", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 187, "end_line": 280, "span_ids": ["rechunk"], "tokens": 839}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rechunk(x, chunks=\"auto\", threshold=None, block_size_limit=None, balance=False):\n \"\"\"\n Convert blocks in dask array x for new chunks.\n\n Parameters\n ----------\n x: dask array\n Array to be rechunked.\n chunks: int, tuple, dict or str, optional\n The new block dimensions to create. -1 indicates the full size of the\n corresponding dimension. Default is \"auto\" which automatically\n determines chunk sizes.\n threshold: int, optional\n The graph growth factor under which we don't bother introducing an\n intermediate step.\n block_size_limit: int, optional\n The maximum block size (in bytes) we want to produce\n Defaults to the configuration value ``array.chunk-size``\n balance : bool, default False\n If True, try to make each chunk to be the same size.\n\n This means ``balance=True`` will remove any small leftover chunks, so\n using ``x.rechunk(chunks=len(x) // N, balance=True)``\n will almost certainly result in ``N`` chunks.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.ones((1000, 1000), chunks=(100, 100))\n\n Specify uniform chunk sizes with a tuple\n\n >>> y = x.rechunk((1000, 10))\n\n Or chunk only specific dimensions with a dictionary\n\n >>> y = x.rechunk({0: 1000})\n\n Use the value ``-1`` to specify that you want a single chunk along a\n dimension or the value ``\"auto\"`` to specify that dask can freely rechunk a\n dimension to attain blocks of a uniform block size\n\n >>> y = x.rechunk({0: -1, 1: 'auto'}, block_size_limit=1e8)\n\n If a chunk size does not divide the dimension then rechunk will leave any\n unevenness to the last chunk.\n\n >>> x.rechunk(chunks=(400, -1)).chunks\n ((400, 400, 200), (1000,))\n\n However if you want more balanced chunks, and don't mind Dask choosing a\n different chunksize for you then you can use the ``balance=True`` option.\n\n >>> x.rechunk(chunks=(400, -1), balance=True).chunks\n ((500, 500), (1000,))\n \"\"\"\n # don't rechunk if array is empty\n if x.ndim > 0 and all(s == 0 for s in x.shape):\n return x\n\n if isinstance(chunks, dict):\n chunks = {validate_axis(c, x.ndim): v for c, v in chunks.items()}\n for i in range(x.ndim):\n if i not in chunks:\n chunks[i] = x.chunks[i]\n if isinstance(chunks, (tuple, list)):\n chunks = tuple(lc if lc is not None else rc for lc, rc in zip(chunks, x.chunks))\n chunks = normalize_chunks(\n chunks, x.shape, limit=block_size_limit, dtype=x.dtype, previous_chunks=x.chunks\n )\n\n # Now chunks are tuple of tuples\n if not balance and (chunks == x.chunks):\n return x\n ndim = x.ndim\n if not len(chunks) == ndim:\n raise ValueError(\"Provided chunks are not consistent with shape\")\n\n if balance:\n chunks = tuple(_balance_chunksizes(chunk) for chunk in chunks)\n\n new_shapes = tuple(map(sum, chunks))\n\n for new, old in zip(new_shapes, x.shape):\n if new != old and not math.isnan(old) and not math.isnan(new):\n raise ValueError(\"Provided chunks are not consistent with shape\")\n\n steps = plan_rechunk(\n x.chunks, chunks, x.dtype.itemsize, threshold, block_size_limit\n )\n for c in steps:\n x = _compute_rechunk(x, c)\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__number_of_blocks_divide_to_width.return.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__number_of_blocks_divide_to_width.return.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 257, "end_line": 291, "span_ids": ["estimate_graph_size", "divide_to_width", "_number_of_blocks", "_largest_block_size"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _number_of_blocks(chunks):\n return reduce(mul, map(len, chunks))\n\n\ndef _largest_block_size(chunks):\n return reduce(mul, map(max, chunks))\n\n\ndef estimate_graph_size(old_chunks, new_chunks):\n \"\"\"Estimate the graph size during a rechunk computation.\"\"\"\n # Estimate the number of intermediate blocks that will be produced\n # (we don't use intersect_chunks() which is much more expensive)\n crossed_size = reduce(\n mul,\n (\n (len(oc) + len(nc) - 1 if oc != nc else len(oc))\n for oc, nc in zip(old_chunks, new_chunks)\n ),\n )\n return crossed_size\n\n\ndef divide_to_width(desired_chunks, max_width):\n \"\"\"Minimally divide the given chunks so as to make the largest chunk\n width less or equal than *max_width*.\n \"\"\"\n chunks = []\n for c in desired_chunks:\n nb_divides = int(np.ceil(c / max_width))\n for i in range(nb_divides):\n n = c // (nb_divides - i)\n chunks.append(n)\n c -= n\n assert c == 0\n return tuple(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_merge_to_number_merge_to_number.return.tuple_filter_None_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_merge_to_number_merge_to_number.return.tuple_filter_None_chunks", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 294, "end_line": 345, "span_ids": ["merge_to_number"], "tokens": 436}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_to_number(desired_chunks, max_number):\n \"\"\"Minimally merge the given chunks so as to drop the number of\n chunks below *max_number*, while minimizing the largest width.\n \"\"\"\n if len(desired_chunks) <= max_number:\n return desired_chunks\n\n distinct = set(desired_chunks)\n if len(distinct) == 1:\n # Fast path for homogeneous target, also ensuring a regular result\n w = distinct.pop()\n n = len(desired_chunks)\n total = n * w\n\n desired_width = total // max_number\n width = w * (desired_width // w)\n adjust = (total - max_number * width) // w\n\n return (width + w,) * adjust + (width,) * (max_number - adjust)\n\n desired_width = sum(desired_chunks) // max_number\n nmerges = len(desired_chunks) - max_number\n\n heap = [\n (desired_chunks[i] + desired_chunks[i + 1], i, i + 1)\n for i in range(len(desired_chunks) - 1)\n ]\n heapq.heapify(heap)\n\n chunks = list(desired_chunks)\n\n while nmerges > 0:\n # Find smallest interval to merge\n width, i, j = heapq.heappop(heap)\n # If interval was made invalid by another merge, recompute\n # it, re-insert it and retry.\n if chunks[j] == 0:\n j += 1\n while chunks[j] == 0:\n j += 1\n heapq.heappush(heap, (chunks[i] + chunks[j], i, j))\n continue\n elif chunks[i] + chunks[j] != width:\n heapq.heappush(heap, (chunks[i] + chunks[j], i, j))\n continue\n # Merge\n assert chunks[i] != 0\n chunks[i] = 0 # mark deleted\n chunks[j] = width\n nmerges -= 1\n\n return tuple(filter(None, chunks))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_merge_rechunk_find_merge_rechunk.return.tuple_chunks_memory_lim": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_merge_rechunk_find_merge_rechunk.return.tuple_chunks_memory_lim", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 348, "end_line": 418, "span_ids": ["find_merge_rechunk"], "tokens": 684}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def find_merge_rechunk(old_chunks, new_chunks, block_size_limit):\n \"\"\"\n Find an intermediate rechunk that would merge some adjacent blocks\n together in order to get us nearer the *new_chunks* target, without\n violating the *block_size_limit* (in number of elements).\n \"\"\"\n ndim = len(old_chunks)\n\n old_largest_width = [max(c) for c in old_chunks]\n new_largest_width = [max(c) for c in new_chunks]\n\n graph_size_effect = {\n dim: len(nc) / len(oc)\n for dim, (oc, nc) in enumerate(zip(old_chunks, new_chunks))\n }\n\n block_size_effect = {\n dim: new_largest_width[dim] / (old_largest_width[dim] or 1)\n for dim in range(ndim)\n }\n\n # Our goal is to reduce the number of nodes in the rechunk graph\n # by merging some adjacent chunks, so consider dimensions where we can\n # reduce the # of chunks\n merge_candidates = [dim for dim in range(ndim) if graph_size_effect[dim] <= 1.0]\n\n # Merging along each dimension reduces the graph size by a certain factor\n # and increases memory largest block size by a certain factor.\n # We want to optimize the graph size while staying below the given\n # block_size_limit. This is in effect a knapsack problem, except with\n # multiplicative values and weights. Just use a greedy algorithm\n # by trying dimensions in decreasing value / weight order.\n def key(k):\n gse = graph_size_effect[k]\n bse = block_size_effect[k]\n if bse == 1:\n bse = 1 + 1e-9\n return (np.log(gse) / np.log(bse)) if bse > 0 else 0\n\n sorted_candidates = sorted(merge_candidates, key=key)\n\n largest_block_size = reduce(mul, old_largest_width)\n\n chunks = list(old_chunks)\n memory_limit_hit = False\n\n for dim in sorted_candidates:\n # Examine this dimension for possible graph reduction\n new_largest_block_size = (\n largest_block_size * new_largest_width[dim] // (old_largest_width[dim] or 1)\n )\n if new_largest_block_size <= block_size_limit:\n # Full replacement by new chunks is possible\n chunks[dim] = new_chunks[dim]\n largest_block_size = new_largest_block_size\n else:\n # Try a partial rechunk, dividing the new chunks into\n # smaller pieces\n largest_width = old_largest_width[dim]\n chunk_limit = int(block_size_limit * largest_width / largest_block_size)\n c = divide_to_width(new_chunks[dim], chunk_limit)\n if len(c) <= len(old_chunks[dim]):\n # We manage to reduce the number of blocks, so do it\n chunks[dim] = c\n largest_block_size = largest_block_size * max(c) // largest_width\n\n memory_limit_hit = True\n\n assert largest_block_size == _largest_block_size(chunks)\n assert largest_block_size <= block_size_limit\n return tuple(chunks), memory_limit_hit", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_split_rechunk_find_split_rechunk.return.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_find_split_rechunk_find_split_rechunk.return.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 421, "end_line": 446, "span_ids": ["find_split_rechunk"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def find_split_rechunk(old_chunks, new_chunks, graph_size_limit):\n \"\"\"\n Find an intermediate rechunk that would split some chunks to\n get us nearer *new_chunks*, without violating the *graph_size_limit*.\n \"\"\"\n ndim = len(old_chunks)\n\n chunks = list(old_chunks)\n\n for dim in range(ndim):\n graph_size = estimate_graph_size(chunks, new_chunks)\n if graph_size > graph_size_limit:\n break\n if len(old_chunks[dim]) > len(new_chunks[dim]):\n # It's not interesting to split\n continue\n # Merge the new chunks so as to stay within the graph size budget\n max_number = int(len(old_chunks[dim]) * graph_size_limit / graph_size)\n c = merge_to_number(new_chunks[dim], max_number)\n assert len(c) <= max_number\n # Consider the merge successful if its result has a greater length\n # and smaller max width than the old chunks\n if len(c) >= len(old_chunks[dim]) and max(c) <= max(old_chunks[dim]):\n chunks[dim] = c\n\n return tuple(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_plan_rechunk_plan_rechunk.return.steps_new_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_plan_rechunk_plan_rechunk.return.steps_new_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 449, "end_line": 528, "span_ids": ["plan_rechunk"], "tokens": 631}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plan_rechunk(\n old_chunks, new_chunks, itemsize, threshold=None, block_size_limit=None\n):\n \"\"\"Plan an iterative rechunking from *old_chunks* to *new_chunks*.\n The plan aims to minimize the rechunk graph size.\n\n Parameters\n ----------\n itemsize: int\n The item size of the array\n threshold: int\n The graph growth factor under which we don't bother\n introducing an intermediate step\n block_size_limit: int\n The maximum block size (in bytes) we want to produce during an\n intermediate step\n\n Notes\n -----\n No intermediate steps will be planned if any dimension of ``old_chunks``\n is unknown.\n \"\"\"\n threshold = threshold or config.get(\"array.rechunk-threshold\")\n block_size_limit = block_size_limit or config.get(\"array.chunk-size\")\n if isinstance(block_size_limit, str):\n block_size_limit = parse_bytes(block_size_limit)\n\n ndim = len(new_chunks)\n steps = []\n has_nans = [any(math.isnan(y) for y in x) for x in old_chunks]\n\n if ndim <= 1 or not all(new_chunks) or any(has_nans):\n # Trivial array / unknown dim => no need / ability for an intermediate\n return steps + [new_chunks]\n\n # Make it a number ef elements\n block_size_limit /= itemsize\n\n # Fix block_size_limit if too small for either old_chunks or new_chunks\n largest_old_block = _largest_block_size(old_chunks)\n largest_new_block = _largest_block_size(new_chunks)\n block_size_limit = max([block_size_limit, largest_old_block, largest_new_block])\n\n # The graph size above which to optimize\n graph_size_threshold = threshold * (\n _number_of_blocks(old_chunks) + _number_of_blocks(new_chunks)\n )\n\n current_chunks = old_chunks\n first_pass = True\n\n while True:\n graph_size = estimate_graph_size(current_chunks, new_chunks)\n if graph_size < graph_size_threshold:\n break\n\n if first_pass:\n chunks = current_chunks\n else:\n # We hit the block_size_limit in a previous merge pass =>\n # accept a significant increase in graph size in exchange for\n # 1) getting nearer the goal 2) reducing the largest block size\n # to make place for the following merge.\n # To see this pass in action, make the block_size_limit very small.\n chunks = find_split_rechunk(\n current_chunks, new_chunks, graph_size * threshold\n )\n chunks, memory_limit_hit = find_merge_rechunk(\n chunks, new_chunks, block_size_limit\n )\n if (chunks == current_chunks and not first_pass) or chunks == new_chunks:\n break\n if chunks != current_chunks:\n steps.append(chunks)\n current_chunks = chunks\n if not memory_limit_hit:\n break\n first_pass = False\n\n return steps + [new_chunks]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__compute_rechunk__compute_rechunk.return.Array_graph_merge_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__compute_rechunk__compute_rechunk.return.Array_graph_merge_name_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 531, "end_line": 589, "span_ids": ["_compute_rechunk"], "tokens": 613}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compute_rechunk(x, chunks):\n \"\"\"Compute the rechunk of *x* to the given *chunks*.\"\"\"\n if x.size == 0:\n # Special case for empty array, as the algorithm below does not behave correctly\n return empty(x.shape, chunks=chunks, dtype=x.dtype)\n\n ndim = x.ndim\n crossed = intersect_chunks(x.chunks, chunks)\n x2 = dict()\n intermediates = dict()\n token = tokenize(x, chunks)\n merge_name = \"rechunk-merge-\" + token\n split_name = \"rechunk-split-\" + token\n split_name_suffixes = count()\n\n # Pre-allocate old block references, to allow re-use and reduce the\n # graph's memory footprint a bit.\n old_blocks = np.empty([len(c) for c in x.chunks], dtype=\"O\")\n for index in np.ndindex(old_blocks.shape):\n old_blocks[index] = (x.name,) + index\n\n # Iterate over all new blocks\n new_index = product(*(range(len(c)) for c in chunks))\n\n for new_idx, cross1 in zip(new_index, crossed):\n key = (merge_name,) + new_idx\n old_block_indices = [[cr[i][0] for cr in cross1] for i in range(ndim)]\n subdims1 = [len(set(old_block_indices[i])) for i in range(ndim)]\n\n rec_cat_arg = np.empty(subdims1, dtype=\"O\")\n rec_cat_arg_flat = rec_cat_arg.flat\n\n # Iterate over the old blocks required to build the new block\n for rec_cat_index, ind_slices in enumerate(cross1):\n old_block_index, slices = zip(*ind_slices)\n name = (split_name, next(split_name_suffixes))\n old_index = old_blocks[old_block_index][1:]\n if all(\n slc.start == 0 and slc.stop == x.chunks[i][ind]\n for i, (slc, ind) in enumerate(zip(slices, old_index))\n ):\n rec_cat_arg_flat[rec_cat_index] = old_blocks[old_block_index]\n else:\n intermediates[name] = (getitem, old_blocks[old_block_index], slices)\n rec_cat_arg_flat[rec_cat_index] = name\n\n assert rec_cat_index == rec_cat_arg.size - 1\n\n # New block is formed by concatenation of sliced old blocks\n if all(d == 1 for d in rec_cat_arg.shape):\n x2[key] = rec_cat_arg.flat[0]\n else:\n x2[key] = (concatenate3, rec_cat_arg.tolist())\n\n del old_blocks, new_index\n\n layer = toolz.merge(x2, intermediates)\n graph = HighLevelGraph.from_collections(merge_name, layer, dependencies=[x])\n return Array(graph, merge_name, chunks, meta=x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__PrettyBlocks__PrettyBlocks.__repr__.__str__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__PrettyBlocks__PrettyBlocks.__repr__.__str__", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 618, "end_line": 654, "span_ids": ["_PrettyBlocks.__init__", "_PrettyBlocks.__str__", "_PrettyBlocks", "_PrettyBlocks:2"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _PrettyBlocks:\n def __init__(self, blocks):\n self.blocks = blocks\n\n def __str__(self):\n runs = []\n run = []\n repeats = 0\n for c in self.blocks:\n if run and run[-1] == c:\n if repeats == 0 and len(run) > 1:\n runs.append((None, run[:-1]))\n run = run[-1:]\n repeats += 1\n else:\n if repeats > 0:\n assert len(run) == 1\n runs.append((repeats + 1, run[-1]))\n run = []\n repeats = 0\n run.append(c)\n if run:\n if repeats == 0:\n runs.append((None, run))\n else:\n assert len(run) == 1\n runs.append((repeats + 1, run[-1]))\n\n parts = []\n for repeats, run in runs:\n if repeats is None:\n parts.append(str(run))\n else:\n parts.append(\"%d*[%s]\" % (repeats, run))\n return \" | \".join(parts)\n\n __repr__ = __str__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_blocks_format_blocks.return._PrettyBlocks_blocks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_blocks_format_blocks.return._PrettyBlocks_blocks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 631, "end_line": 645, "span_ids": ["format_blocks"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_blocks(blocks):\n \"\"\"\n Pretty-format *blocks*.\n\n >>> format_blocks((10, 10, 10))\n 3*[10]\n >>> format_blocks((2, 3, 4))\n [2, 3, 4]\n >>> format_blocks((10, 10, 5, 6, 2, 2, 2, 7))\n 2*[10] | [5, 6] | 3*[2] | [7]\n \"\"\"\n assert isinstance(blocks, tuple) and all(\n isinstance(x, int) or math.isnan(x) for x in blocks\n )\n return _PrettyBlocks(blocks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_builtins_divide.return.f_a_b_dtype_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_builtins_divide.return.f_a_b_dtype_dtype_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 38, "span_ids": ["imports", "divide"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import builtins\nimport contextlib\nimport operator\nfrom collections.abc import Iterable\nfrom functools import partial\nfrom itertools import product, repeat\nfrom math import ceil, factorial, log, log2\nfrom numbers import Integral, Number\n\nimport numpy as np\nfrom tlz import accumulate, compose, drop, get, partition_all, pluck\n\nfrom .. import config\nfrom ..base import tokenize\nfrom ..blockwise import lol_tuples\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import deepmap, derived_from, funcname, getargspec, is_series_like\nfrom . import chunk\nfrom .blockwise import blockwise\nfrom .core import Array, _concatenate2, handle_out, implements, unknown_chunk_message\nfrom .creation import arange, diagonal\n\n# Keep empty_lookup here for backwards compatibility\nfrom .dispatch import divide_lookup, empty_lookup # noqa: F401\nfrom .utils import (\n asarray_safe,\n compute_meta,\n is_arraylike,\n meta_from_array,\n validate_axis,\n)\nfrom .wrap import ones, zeros\n\n\ndef divide(a, b, dtype=None):\n key = lambda x: getattr(x, \"__array_priority__\", float(\"-inf\"))\n f = divide_lookup.dispatch(type(builtins.max(a, b, key=key)))\n return f(a, b, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction_reduction._General_version_of_red": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction_reduction._General_version_of_red", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 41, "end_line": 139, "span_ids": ["reduction"], "tokens": 999}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduction(\n x,\n chunk,\n aggregate,\n axis=None,\n keepdims=False,\n dtype=None,\n split_every=None,\n combine=None,\n name=None,\n out=None,\n concatenate=True,\n output_size=1,\n meta=None,\n):\n \"\"\"General version of reductions\n\n Parameters\n ----------\n x: Array\n Data being reduced along one or more axes\n chunk: callable(x_chunk, axis, keepdims)\n First function to be executed when resolving the dask graph.\n This function is applied in parallel to all original chunks of x.\n See below for function parameters.\n combine: callable(x_chunk, axis, keepdims), optional\n Function used for intermediate recursive aggregation (see\n split_every below). If omitted, it defaults to aggregate.\n If the reduction can be performed in less than 3 steps, it will not\n be invoked at all.\n aggregate: callable(x_chunk, axis, keepdims)\n Last function to be executed when resolving the dask graph,\n producing the final output. It is always invoked, even when the reduced\n Array counts a single chunk along the reduced axes.\n axis: int or sequence of ints, optional\n Axis or axes to aggregate upon. If omitted, aggregate along all axes.\n keepdims: boolean, optional\n Whether the reduction function should preserve the reduced axes,\n leaving them at size ``output_size``, or remove them.\n dtype: np.dtype\n data type of output. This argument was previously optional, but\n leaving as ``None`` will now raise an exception.\n split_every: int >= 2 or dict(axis: int), optional\n Determines the depth of the recursive aggregation. If set to or more\n than the number of input chunks, the aggregation will be performed in\n two steps, one ``chunk`` function per input chunk and a single\n ``aggregate`` function at the end. If set to less than that, an\n intermediate ``combine`` function will be used, so that any one\n ``combine`` or ``aggregate`` function has no more than ``split_every``\n inputs. The depth of the aggregation graph will be\n :math:`log_{split_every}(input chunks along reduced axes)`. Setting to\n a low value can reduce cache size and network transfers, at the cost of\n more CPU and a larger dask graph.\n\n Omit to let dask heuristically decide a good default. A default can\n also be set globally with the ``split_every`` key in\n :mod:`dask.config`.\n name: str, optional\n Prefix of the keys of the intermediate and output nodes. If omitted it\n defaults to the function names.\n out: Array, optional\n Another dask array whose contents will be replaced. Omit to create a\n new one. Note that, unlike in numpy, this setting gives no performance\n benefits whatsoever, but can still be useful if one needs to preserve\n the references to a previously existing Array.\n concatenate: bool, optional\n If True (the default), the outputs of the ``chunk``/``combine``\n functions are concatenated into a single np.array before being passed\n to the ``combine``/``aggregate`` functions. If False, the input of\n ``combine`` and ``aggregate`` will be either a list of the raw outputs\n of the previous step or a single output, and the function will have to\n concatenate it itself. It can be useful to set this to False if the\n chunk and/or combine steps do not produce np.arrays.\n output_size: int >= 1, optional\n Size of the output of the ``aggregate`` function along the reduced\n axes. Ignored if keepdims is False.\n\n Returns\n -------\n dask array\n\n **Function Parameters**\n\n x_chunk: numpy.ndarray\n Individual input chunk. For ``chunk`` functions, it is one of the\n original chunks of x. For ``combine`` and ``aggregate`` functions, it's\n the concatenation of the outputs produced by the previous ``chunk`` or\n ``combine`` functions. If concatenate=False, it's a list of the raw\n outputs from the previous functions.\n axis: tuple\n Normalized list of axes to reduce upon, e.g. ``(0, )``\n Scalar, negative, and None axes have been normalized away.\n Note that some numpy reduction functions cannot reduce along multiple\n axes at once and strictly require an int in input. Such functions have\n to be wrapped to cope.\n keepdims: bool\n Whether the reduction function should preserve the reduced axes or\n remove them.\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction.if_axis_is_None__reduction.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_reduction.if_axis_is_None__reduction.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 148, "end_line": 205, "span_ids": ["reduction"], "tokens": 476}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduction(\n x,\n chunk,\n aggregate,\n axis=None,\n keepdims=False,\n dtype=None,\n split_every=None,\n combine=None,\n name=None,\n out=None,\n concatenate=True,\n output_size=1,\n meta=None,\n):\n if axis is None:\n axis = tuple(range(x.ndim))\n if isinstance(axis, Integral):\n axis = (axis,)\n axis = validate_axis(axis, x.ndim)\n\n if dtype is None:\n raise ValueError(\"Must specify dtype\")\n if \"dtype\" in getargspec(chunk).args:\n chunk = partial(chunk, dtype=dtype)\n if \"dtype\" in getargspec(aggregate).args:\n aggregate = partial(aggregate, dtype=dtype)\n if is_series_like(x):\n x = x.values\n\n # Map chunk across all blocks\n inds = tuple(range(x.ndim))\n # The dtype of `tmp` doesn't actually matter, and may be incorrect.\n tmp = blockwise(\n chunk, inds, x, inds, axis=axis, keepdims=True, token=name, dtype=dtype or float\n )\n tmp._chunks = tuple(\n (output_size,) * len(c) if i in axis else c for i, c in enumerate(tmp.chunks)\n )\n\n if meta is None and hasattr(x, \"_meta\"):\n try:\n reduced_meta = compute_meta(\n chunk, x.dtype, x._meta, axis=axis, keepdims=True, computing_meta=True\n )\n except TypeError:\n reduced_meta = compute_meta(\n chunk, x.dtype, x._meta, axis=axis, keepdims=True\n )\n except ValueError:\n pass\n else:\n reduced_meta = None\n\n result = _tree_reduce(\n tmp,\n aggregate,\n axis,\n keepdims,\n dtype,\n split_every,\n combine,\n name=name,\n concatenate=concatenate,\n reduced_meta=reduced_meta,\n )\n if keepdims and output_size != 1:\n result._chunks = tuple(\n (output_size,) if i in axis else c for i, c in enumerate(tmp.chunks)\n )\n if meta is not None:\n result._meta = meta\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__tree_reduce__tree_reduce.return.partial_reduce_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__tree_reduce__tree_reduce.return.partial_reduce_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 200, "end_line": 255, "span_ids": ["_tree_reduce"], "tokens": 429}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _tree_reduce(\n x,\n aggregate,\n axis,\n keepdims,\n dtype,\n split_every=None,\n combine=None,\n name=None,\n concatenate=True,\n reduced_meta=None,\n):\n \"\"\"Perform the tree reduction step of a reduction.\n\n Lower level, users should use ``reduction`` or ``arg_reduction`` directly.\n \"\"\"\n # Normalize split_every\n split_every = split_every or config.get(\"split_every\", 4)\n if isinstance(split_every, dict):\n split_every = {k: split_every.get(k, 2) for k in axis}\n elif isinstance(split_every, Integral):\n n = builtins.max(int(split_every ** (1 / (len(axis) or 1))), 2)\n split_every = dict.fromkeys(axis, n)\n else:\n raise ValueError(\"split_every must be a int or a dict\")\n\n # Reduce across intermediates\n depth = 1\n for i, n in enumerate(x.numblocks):\n if i in split_every and split_every[i] != 1:\n depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))\n func = partial(combine or aggregate, axis=axis, keepdims=True)\n if concatenate:\n func = compose(func, partial(_concatenate2, axes=sorted(axis)))\n for i in range(depth - 1):\n x = partial_reduce(\n func,\n x,\n split_every,\n True,\n dtype=dtype,\n name=(name or funcname(combine or aggregate)) + \"-partial\",\n reduced_meta=reduced_meta,\n )\n func = partial(aggregate, axis=axis, keepdims=keepdims)\n if concatenate:\n func = compose(func, partial(_concatenate2, axes=sorted(axis)))\n return partial_reduce(\n func,\n x,\n split_every,\n keepdims=keepdims,\n dtype=dtype,\n name=(name or funcname(aggregate)) + \"-aggregate\",\n reduced_meta=reduced_meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_partial_reduce_partial_reduce.if_np_isscalar_meta_.else_.return.Array_graph_name_out_ch": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_partial_reduce_partial_reduce.if_np_isscalar_meta_.else_.return.Array_graph_name_out_ch", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 258, "end_line": 334, "span_ids": ["partial_reduce"], "tokens": 718}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def partial_reduce(\n func, x, split_every, keepdims=False, dtype=None, name=None, reduced_meta=None\n):\n \"\"\"Partial reduction across multiple axes.\n\n Parameters\n ----------\n func : function\n x : Array\n split_every : dict\n Maximum reduction block sizes in each dimension.\n\n Examples\n --------\n Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th\n dimension, and 3 blocks in the 2nd dimension:\n\n >>> partial_reduce(np.min, x, {0: 1, 2: 3}) # doctest: +SKIP\n \"\"\"\n name = (\n (name or funcname(func)) + \"-\" + tokenize(func, x, split_every, keepdims, dtype)\n )\n parts = [\n list(partition_all(split_every.get(i, 1), range(n)))\n for (i, n) in enumerate(x.numblocks)\n ]\n keys = product(*map(range, map(len, parts)))\n out_chunks = [\n tuple(1 for p in partition_all(split_every[i], c)) if i in split_every else c\n for (i, c) in enumerate(x.chunks)\n ]\n if not keepdims:\n out_axis = [i for i in range(x.ndim) if i not in split_every]\n getter = lambda k: get(out_axis, k)\n keys = map(getter, keys)\n out_chunks = list(getter(out_chunks))\n dsk = {}\n for k, p in zip(keys, product(*parts)):\n free = {\n i: j[0] for (i, j) in enumerate(p) if len(j) == 1 and i not in split_every\n }\n dummy = dict(i for i in enumerate(p) if i[0] in split_every)\n g = lol_tuples((x.name,), range(x.ndim), free, dummy)\n dsk[(name,) + k] = (func, g)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n\n meta = x._meta\n if reduced_meta is not None:\n try:\n meta = func(reduced_meta, computing_meta=True)\n # no meta keyword argument exists for func, and it isn't required\n except TypeError:\n try:\n meta = func(reduced_meta)\n except ValueError as e:\n # min/max functions have no identity, don't apply function to meta\n if \"zero-size array to reduction operation\" in str(e):\n meta = reduced_meta\n # when no work can be computed on the empty array (e.g., func is a ufunc)\n except ValueError:\n pass\n\n # some functions can't compute empty arrays (those for which reduced_meta\n # fall into the ValueError exception) and we have to rely on reshaping\n # the array according to len(out_chunks)\n if is_arraylike(meta) and meta.ndim != len(out_chunks):\n if len(out_chunks) == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * len(out_chunks))\n\n if np.isscalar(meta):\n return Array(graph, name, out_chunks, dtype=dtype)\n else:\n with contextlib.suppress(AttributeError):\n meta = meta.astype(dtype)\n return Array(graph, name, out_chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_sum_prod.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_sum_prod.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 328, "end_line": 360, "span_ids": ["sum", "prod"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef sum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is None:\n dtype = getattr(np.zeros(1, dtype=a.dtype).sum(), \"dtype\", object)\n result = reduction(\n a,\n chunk.sum,\n chunk.sum,\n axis=axis,\n keepdims=keepdims,\n dtype=dtype,\n split_every=split_every,\n out=out,\n )\n return result\n\n\n@derived_from(np)\ndef prod(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.empty((1,), dtype=a.dtype).prod(), \"dtype\", object)\n return reduction(\n a,\n chunk.prod,\n chunk.prod,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_min_all.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_min_all.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 363, "end_line": 418, "span_ids": ["max", "all", "any", "min"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@implements(np.min, np.amin)\n@derived_from(np)\ndef min(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.min,\n chunk.min,\n axis=axis,\n keepdims=keepdims,\n dtype=a.dtype,\n split_every=split_every,\n out=out,\n )\n\n\n@implements(np.max, np.amax)\n@derived_from(np)\ndef max(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.max,\n chunk.max,\n axis=axis,\n keepdims=keepdims,\n dtype=a.dtype,\n split_every=split_every,\n out=out,\n )\n\n\n@derived_from(np)\ndef any(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.any,\n chunk.any,\n axis=axis,\n keepdims=keepdims,\n dtype=\"bool\",\n split_every=split_every,\n out=out,\n )\n\n\n@derived_from(np)\ndef all(a, axis=None, keepdims=False, split_every=None, out=None):\n return reduction(\n a,\n chunk.all,\n chunk.all,\n axis=axis,\n keepdims=keepdims,\n dtype=\"bool\",\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nansum_nansum.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nansum_nansum.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 421, "end_line": 436, "span_ids": ["nansum"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nansum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n chunk.nansum,\n chunk.sum,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nannumel_mean_chunk.return._n_n_total_total_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nannumel_mean_chunk.return._n_n_total_total_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 570, "end_line": 584, "span_ids": ["mean_chunk", "nannumel"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def nannumel(x, **kwargs):\n \"\"\"A reduction to count the number of elements\"\"\"\n return chunk.sum(~(np.isnan(x)), **kwargs)\n\n\ndef mean_chunk(\n x, sum=chunk.sum, numel=numel, dtype=\"f8\", computing_meta=False, **kwargs\n):\n if computing_meta:\n return x\n n = numel(x, dtype=dtype, **kwargs)\n\n total = sum(x, dtype=dtype, **kwargs)\n\n return {\"n\": n, \"total\": total}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_combine_mean_combine.return._n_n_total_total_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_combine_mean_combine.return._n_n_total_total_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 601, "end_line": 622, "span_ids": ["mean_combine"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def mean_combine(\n pairs,\n sum=chunk.sum,\n numel=numel,\n dtype=\"f8\",\n axis=None,\n computing_meta=False,\n **kwargs,\n):\n if not isinstance(pairs, list):\n pairs = [pairs]\n\n ns = deepmap(lambda pair: pair[\"n\"], pairs) if not computing_meta else pairs\n n = _concatenate2(ns, axes=axis).sum(axis=axis, **kwargs)\n\n if computing_meta:\n return n\n\n totals = deepmap(lambda pair: pair[\"total\"], pairs)\n total = _concatenate2(totals, axes=axis).sum(axis=axis, **kwargs)\n\n return {\"n\": n, \"total\": total}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_mean.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_mean.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 639, "end_line": 658, "span_ids": ["mean"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef mean(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n elif a.dtype == object:\n dt = object\n else:\n dt = getattr(np.mean(np.zeros(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n mean_chunk,\n mean_agg,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n combine=mean_combine,\n out=out,\n concatenate=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_chunk_moment_chunk.return._total_total_n_n_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_chunk_moment_chunk.return._total_total_n_n_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 626, "end_line": 639, "span_ids": ["moment_chunk"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def moment_chunk(\n A, order=2, sum=chunk.sum, numel=numel, dtype=\"f8\", computing_meta=False, **kwargs\n):\n if computing_meta:\n return A\n n = numel(A, **kwargs)\n\n n = n.astype(np.int64)\n total = sum(A, dtype=dtype, **kwargs)\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n u = total / n\n xs = [sum((A - u) ** i, dtype=dtype, **kwargs) for i in range(2, order + 1)]\n M = np.stack(xs, axis=-1)\n return {\"total\": total, \"n\": n, \"M\": M}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__moment_helper__moment_helper.return.M": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__moment_helper__moment_helper.return.M", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 722, "end_line": 729, "span_ids": ["_moment_helper"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _moment_helper(Ms, ns, inner_term, order, sum, axis, kwargs):\n M = Ms[..., order - 2].sum(axis=axis, **kwargs) + sum(\n ns * inner_term**order, axis=axis, **kwargs\n )\n for k in range(1, order - 1):\n coeff = factorial(order) / (factorial(k) * factorial(order - k))\n M += coeff * sum(Ms[..., order - k - 2] * inner_term**k, axis=axis, **kwargs)\n return M", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_combine_moment_combine.return._total_total_n_n_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_combine_moment_combine.return._total_total_n_n_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 711, "end_line": 748, "span_ids": ["moment_combine"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def moment_combine(\n pairs,\n order=2,\n ddof=0,\n dtype=\"f8\",\n sum=np.sum,\n axis=None,\n computing_meta=False,\n **kwargs,\n):\n if not isinstance(pairs, list):\n pairs = [pairs]\n\n kwargs[\"dtype\"] = dtype\n kwargs[\"keepdims\"] = True\n\n ns = deepmap(lambda pair: pair[\"n\"], pairs) if not computing_meta else pairs\n ns = _concatenate2(ns, axes=axis)\n n = ns.sum(axis=axis, **kwargs)\n\n if computing_meta:\n return n\n\n totals = _concatenate2(deepmap(lambda pair: pair[\"total\"], pairs), axes=axis)\n Ms = _concatenate2(deepmap(lambda pair: pair[\"M\"], pairs), axes=axis)\n\n total = totals.sum(axis=axis, **kwargs)\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n mu = divide(total, n, dtype=dtype)\n inner_term = divide(totals, ns, dtype=dtype) - mu\n\n xs = [\n _moment_helper(Ms, ns, inner_term, o, sum, axis, kwargs)\n for o in range(2, order + 1)\n ]\n M = np.stack(xs, axis=-1)\n return {\"total\": total, \"n\": n, \"M\": M}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_agg_moment_agg.return.divide_M_denominator_dt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_agg_moment_agg.return.divide_M_denominator_dt", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 751, "end_line": 796, "span_ids": ["moment_agg"], "tokens": 383}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def moment_agg(\n pairs,\n order=2,\n ddof=0,\n dtype=\"f8\",\n sum=np.sum,\n axis=None,\n computing_meta=False,\n **kwargs,\n):\n if not isinstance(pairs, list):\n pairs = [pairs]\n\n kwargs[\"dtype\"] = dtype\n # To properly handle ndarrays, the original dimensions need to be kept for\n # part of the calculation.\n keepdim_kw = kwargs.copy()\n keepdim_kw[\"keepdims\"] = True\n\n ns = deepmap(lambda pair: pair[\"n\"], pairs) if not computing_meta else pairs\n ns = _concatenate2(ns, axes=axis)\n n = ns.sum(axis=axis, **keepdim_kw)\n\n if computing_meta:\n return n\n\n totals = _concatenate2(deepmap(lambda pair: pair[\"total\"], pairs), axes=axis)\n Ms = _concatenate2(deepmap(lambda pair: pair[\"M\"], pairs), axes=axis)\n\n mu = divide(totals.sum(axis=axis, **keepdim_kw), n, dtype=dtype)\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n inner_term = divide(totals, ns, dtype=dtype) - mu\n\n M = _moment_helper(Ms, ns, inner_term, order, sum, axis, kwargs)\n\n denominator = n.sum(axis=axis, **kwargs) - ddof\n\n # taking care of the edge case with empty or all-nans array with ddof > 0\n if isinstance(denominator, Number):\n if denominator < 0:\n denominator = np.nan\n elif denominator is not np.ma.masked:\n denominator[denominator < 0] = np.nan\n\n return divide(M, denominator, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_moment.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_moment_moment.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 799, "end_line": 832, "span_ids": ["moment"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def moment(\n a, order, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n):\n if not isinstance(order, Integral) or order < 0:\n raise ValueError(\"Order must be an integer >= 0\")\n\n if order < 2:\n reduced = a.sum(axis=axis) # get reduced shape and chunks\n if order == 0:\n # When order equals 0, the result is 1, by definition.\n return ones(\n reduced.shape, chunks=reduced.chunks, dtype=\"f8\", meta=reduced._meta\n )\n # By definition the first order about the mean is 0.\n return zeros(\n reduced.shape, chunks=reduced.chunks, dtype=\"f8\", meta=reduced._meta\n )\n\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n partial(moment_chunk, order=order),\n partial(moment_agg, order=order, ddof=ddof),\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n concatenate=False,\n combine=partial(moment_combine, order=order),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_var_var.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_var_var.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 768, "end_line": 786, "span_ids": ["var"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef var(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n moment_chunk,\n partial(moment_agg, ddof=ddof),\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n combine=moment_combine,\n name=\"var\",\n out=out,\n concatenate=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanvar_nanvar.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanvar_nanvar.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 789, "end_line": 808, "span_ids": ["nanvar"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nanvar(\n a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n partial(moment_chunk, sum=chunk.nansum, numel=nannumel),\n partial(moment_agg, sum=np.nansum, ddof=ddof),\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n combine=partial(moment_combine, sum=np.nansum),\n out=out,\n concatenate=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_std_nanstd.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_std_nanstd.return.result", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 836, "end_line": 871, "span_ids": ["std", "nanstd"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef std(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None):\n result = safe_sqrt(\n var(\n a,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n )\n if dtype and dtype != result.dtype:\n result = result.astype(dtype)\n return result\n\n\n@derived_from(np)\ndef nanstd(\n a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n):\n result = safe_sqrt(\n nanvar(\n a,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n )\n if dtype and dtype != result.dtype:\n result = result.astype(dtype)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_chunk_arg_chunk.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_chunk_arg_chunk.return.result", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 989, "end_line": 1018, "span_ids": ["arg_chunk"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arg_chunk(func, argfunc, x, axis, offset_info):\n arg_axis = None if len(axis) == x.ndim or x.ndim == 1 else axis[0]\n vals = func(x, axis=arg_axis, keepdims=True)\n arg = argfunc(x, axis=arg_axis, keepdims=True)\n if arg_axis is None:\n offset, total_shape = offset_info\n ind = np.unravel_index(arg.ravel()[0], x.shape)\n total_ind = tuple(o + i for (o, i) in zip(offset, ind))\n arg[:] = np.ravel_multi_index(total_ind, total_shape)\n else:\n arg += offset_info\n\n if isinstance(vals, np.ma.masked_array):\n if \"min\" in argfunc.__name__:\n fill_value = np.ma.minimum_fill_value(vals)\n else:\n fill_value = np.ma.maximum_fill_value(vals)\n vals = np.ma.filled(vals, fill_value)\n\n try:\n result = np.empty_like(\n vals, shape=vals.shape, dtype=[(\"vals\", vals.dtype), (\"arg\", arg.dtype)]\n )\n except TypeError:\n # Array type doesn't support structured arrays (e.g., CuPy)\n result = dict()\n\n result[\"vals\"] = vals\n result[\"arg\"] = arg\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_combine_nanarg_agg.return.arg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_combine_nanarg_agg.return.arg", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1021, "end_line": 1045, "span_ids": ["arg_agg", "nanarg_agg", "arg_combine"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arg_combine(func, argfunc, data, axis=None, **kwargs):\n arg, vals = _arg_combine(data, axis, argfunc, keepdims=True)\n\n try:\n result = np.empty_like(\n vals, shape=vals.shape, dtype=[(\"vals\", vals.dtype), (\"arg\", arg.dtype)]\n )\n except TypeError:\n # Array type doesn't support structured arrays (e.g., CuPy).\n result = dict()\n\n result[\"vals\"] = vals\n result[\"arg\"] = arg\n return result\n\n\ndef arg_agg(func, argfunc, data, axis=None, **kwargs):\n return _arg_combine(data, axis, argfunc, keepdims=False)[0]\n\n\ndef nanarg_agg(func, argfunc, data, axis=None, **kwargs):\n arg, vals = _arg_combine(data, axis, argfunc, keepdims=False)\n if np.any(np.isnan(vals)):\n raise ValueError(\"All NaN slice encountered\")\n return arg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_reduction_arg_reduction.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_arg_reduction_arg_reduction.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1048, "end_line": 1112, "span_ids": ["arg_reduction"], "tokens": 587}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None, out=None):\n \"\"\"Generic function for argreduction.\n\n Parameters\n ----------\n x : Array\n chunk : callable\n Partialed ``arg_chunk``.\n combine : callable\n Partialed ``arg_combine``.\n agg : callable\n Partialed ``arg_agg``.\n axis : int, optional\n split_every : int or dict, optional\n \"\"\"\n if axis is None:\n axis = tuple(range(x.ndim))\n ravel = True\n elif isinstance(axis, Integral):\n axis = validate_axis(axis, x.ndim)\n axis = (axis,)\n ravel = x.ndim == 1\n else:\n raise TypeError(f\"axis must be either `None` or int, got '{axis}'\")\n\n for ax in axis:\n chunks = x.chunks[ax]\n if len(chunks) > 1 and np.isnan(chunks).any():\n raise ValueError(\n \"Arg-reductions do not work with arrays that have \"\n \"unknown chunksizes. At some point in your computation \"\n \"this array lost chunking information.\\n\\n\"\n \"A possible solution is with \\n\"\n \" x.compute_chunk_sizes()\"\n )\n\n # Map chunk across all blocks\n name = f\"arg-reduce-{tokenize(axis, x, chunk, combine, split_every)}\"\n old = x.name\n keys = list(product(*map(range, x.numblocks)))\n offsets = list(product(*(accumulate(operator.add, bd[:-1], 0) for bd in x.chunks)))\n if ravel:\n offset_info = zip(offsets, repeat(x.shape))\n else:\n offset_info = pluck(axis[0], offsets)\n\n chunks = tuple((1,) * len(c) if i in axis else c for (i, c) in enumerate(x.chunks))\n dsk = {\n (name,) + k: (chunk, (old,) + k, axis, off)\n for (k, off) in zip(keys, offset_info)\n }\n\n dtype = np.argmin(asarray_safe([1], like=meta_from_array(x)))\n meta = None\n if is_arraylike(dtype):\n # This case occurs on non-NumPy types (e.g., CuPy), where the returned\n # value is an ndarray rather than a scalar.\n meta = dtype\n dtype = meta.dtype\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n tmp = Array(graph, name, chunks, dtype=dtype, meta=meta)\n\n result = _tree_reduce(tmp, agg, axis, False, dtype, split_every, combine)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_make_arg_reduction_make_arg_reduction.return.derived_from_np_wrapped_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_make_arg_reduction_make_arg_reduction.return.derived_from_np_wrapped_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1007, "end_line": 1031, "span_ids": ["make_arg_reduction"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_arg_reduction(func, argfunc, is_nan_func=False):\n \"\"\"Create an argreduction callable\n\n Parameters\n ----------\n func : callable\n The reduction (e.g. ``min``)\n argfunc : callable\n The argreduction (e.g. ``argmin``)\n \"\"\"\n chunk = partial(arg_chunk, func, argfunc)\n combine = partial(arg_combine, func, argfunc)\n if is_nan_func:\n agg = partial(nanarg_agg, func, argfunc)\n else:\n agg = partial(arg_agg, func, argfunc)\n\n def wrapped(x, axis=None, split_every=None, out=None):\n return arg_reduction(\n x, chunk, combine, agg, axis, split_every=split_every, out=out\n )\n\n wrapped.__name__ = func.__name__\n\n return derived_from(np)(wrapped)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__nanargmin_nanargmax.make_arg_reduction_chunk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__nanargmin_nanargmax.make_arg_reduction_chunk_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1075, "end_line": 1092, "span_ids": ["_nanargmax", "_nanargmin", "impl"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _nanargmin(x, axis, **kwargs):\n try:\n return chunk.nanargmin(x, axis, **kwargs)\n except ValueError:\n return chunk.nanargmin(np.where(np.isnan(x), np.inf, x), axis, **kwargs)\n\n\ndef _nanargmax(x, axis, **kwargs):\n try:\n return chunk.nanargmax(x, axis, **kwargs)\n except ValueError:\n return chunk.nanargmax(np.where(np.isnan(x), -np.inf, x), axis, **kwargs)\n\n\nargmin = make_arg_reduction(chunk.min, chunk.argmin)\nargmax = make_arg_reduction(chunk.max, chunk.argmax)\nnanargmin = make_arg_reduction(chunk.nanmin, _nanargmin, True)\nnanargmax = make_arg_reduction(chunk.nanmax, _nanargmax, True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_topk_topk.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_topk_topk.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1147, "end_line": 1203, "span_ids": ["topk"], "tokens": 459}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def topk(a, k, axis=-1, split_every=None):\n \"\"\"Extract the k largest elements from a on the given axis,\n and return them sorted from largest to smallest.\n If k is negative, extract the -k smallest elements instead,\n and return them sorted from smallest to largest.\n\n This performs best when ``k`` is much smaller than the chunk size. All\n results will be returned in a single chunk along the given axis.\n\n Parameters\n ----------\n x: Array\n Data being sorted\n k: int\n axis: int, optional\n split_every: int >=2, optional\n See :func:`reduce`. This parameter becomes very important when k is\n on the same order of magnitude of the chunk size or more, as it\n prevents getting the whole or a significant portion of the input array\n in memory all at once, with a negative impact on network transfer\n too when running on distributed.\n\n Returns\n -------\n Selection of x with size abs(k) along the given axis.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = np.array([5, 1, 3, 6])\n >>> d = da.from_array(x, chunks=2)\n >>> d.topk(2).compute()\n array([6, 5])\n >>> d.topk(-2).compute()\n array([1, 3])\n \"\"\"\n axis = validate_axis(axis, a.ndim)\n\n # chunk and combine steps of the reduction, which recursively invoke\n # np.partition to pick the top/bottom k elements from the previous step.\n # The selection is not sorted internally.\n chunk_combine = partial(chunk.topk, k=k)\n # aggregate step of the reduction. Internally invokes the chunk/combine\n # function, then sorts the results internally.\n aggregate = partial(chunk.topk_aggregate, k=k)\n\n return reduction(\n a,\n chunk=chunk_combine,\n combine=chunk_combine,\n aggregate=aggregate,\n axis=axis,\n keepdims=True,\n dtype=a.dtype,\n split_every=split_every,\n output_size=abs(k),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_argtopk_argtopk.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_argtopk_argtopk.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1206, "end_line": 1275, "span_ids": ["argtopk"], "tokens": 610}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def argtopk(a, k, axis=-1, split_every=None):\n \"\"\"Extract the indices of the k largest elements from a on the given axis,\n and return them sorted from largest to smallest. If k is negative, extract\n the indices of the -k smallest elements instead, and return them sorted\n from smallest to largest.\n\n This performs best when ``k`` is much smaller than the chunk size. All\n results will be returned in a single chunk along the given axis.\n\n Parameters\n ----------\n x: Array\n Data being sorted\n k: int\n axis: int, optional\n split_every: int >=2, optional\n See :func:`topk`. The performance considerations for topk also apply\n here.\n\n Returns\n -------\n Selection of np.intp indices of x with size abs(k) along the given axis.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = np.array([5, 1, 3, 6])\n >>> d = da.from_array(x, chunks=2)\n >>> d.argtopk(2).compute()\n array([3, 0])\n >>> d.argtopk(-2).compute()\n array([1, 2])\n \"\"\"\n axis = validate_axis(axis, a.ndim)\n\n # Generate nodes where every chunk is a tuple of (a, original index of a)\n idx = arange(a.shape[axis], chunks=(a.chunks[axis],), dtype=np.intp)\n idx = idx[tuple(slice(None) if i == axis else np.newaxis for i in range(a.ndim))]\n a_plus_idx = a.map_blocks(chunk.argtopk_preprocess, idx, dtype=object)\n\n # chunk and combine steps of the reduction. They acquire in input a tuple\n # of (a, original indices of a) and return another tuple containing the top\n # k elements of a and the matching original indices. The selection is not\n # sorted internally, as in np.argpartition.\n chunk_combine = partial(chunk.argtopk, k=k)\n # aggregate step of the reduction. Internally invokes the chunk/combine\n # function, then sorts the results internally, drops a and returns the\n # index only.\n aggregate = partial(chunk.argtopk_aggregate, k=k)\n\n if isinstance(axis, Number):\n naxis = 1\n else:\n naxis = len(axis)\n\n meta = a._meta.astype(np.intp).reshape((0,) * (a.ndim - naxis + 1))\n\n return reduction(\n a_plus_idx,\n chunk=chunk_combine,\n combine=chunk_combine,\n aggregate=aggregate,\n axis=axis,\n keepdims=True,\n dtype=np.intp,\n split_every=split_every,\n concatenate=False,\n output_size=abs(k),\n meta=meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_trace_median.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_trace_median.return.result", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1558, "end_line": 1595, "span_ids": ["median", "trace"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None):\n return diagonal(a, offset=offset, axis1=axis1, axis2=axis2).sum(-1, dtype=dtype)\n\n\n@derived_from(np)\ndef median(a, axis=None, keepdims=False, out=None):\n \"\"\"\n This works by automatically chunking the reduced axes to a single chunk if necessary\n and then calling ``numpy.median`` function across the remaining dimensions\n \"\"\"\n if axis is None:\n raise NotImplementedError(\n \"The da.median function only works along an axis. \"\n \"The full algorithm is difficult to do in parallel\"\n )\n\n if not isinstance(axis, Iterable):\n axis = (axis,)\n\n axis = [ax + a.ndim if ax < 0 else ax for ax in axis]\n\n # rechunk if reduced axes are not contained in a single chunk\n if builtins.any(a.numblocks[ax] > 1 for ax in axis):\n a = a.rechunk({ax: -1 if ax in axis else \"auto\" for ax in range(a.ndim)})\n\n result = a.map_blocks(\n np.median,\n axis=axis,\n keepdims=keepdims,\n drop_axis=axis if not keepdims else None,\n chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]\n if keepdims\n else None,\n )\n\n result = handle_out(out, result)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmedian_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmedian_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1598, "end_line": 1631, "span_ids": ["nanmedian"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nanmedian(a, axis=None, keepdims=False, out=None):\n \"\"\"\n This works by automatically chunking the reduced axes to a single chunk\n and then calling ``numpy.nanmedian`` function across the remaining dimensions\n \"\"\"\n if axis is None:\n raise NotImplementedError(\n \"The da.nanmedian function only works along an axis or a subset of axes. \"\n \"The full algorithm is difficult to do in parallel\"\n )\n\n if not isinstance(axis, Iterable):\n axis = (axis,)\n\n axis = [ax + a.ndim if ax < 0 else ax for ax in axis]\n\n # rechunk if reduced axes are not contained in a single chunk\n if builtins.any(a.numblocks[ax] > 1 for ax in axis):\n a = a.rechunk({ax: -1 if ax in axis else \"auto\" for ax in range(a.ndim)})\n\n result = a.map_blocks(\n np.nanmedian,\n axis=axis,\n keepdims=keepdims,\n drop_axis=axis if not keepdims else None,\n chunks=[1 if ax in axis else c for ax, c in enumerate(a.chunks)]\n if keepdims\n else None,\n )\n\n result = handle_out(out, result)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_expand_tuple_expand_tuple.return.tuple_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_expand_tuple_expand_tuple.return.tuple_out_", "embedding": null, "metadata": {"file_path": "dask/array/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 110, "span_ids": ["expand_tuple"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def expand_tuple(chunks, factor):\n \"\"\"\n\n >>> expand_tuple((2, 4), 2)\n (1, 1, 2, 2)\n\n >>> expand_tuple((2, 4), 3)\n (1, 1, 1, 1, 2)\n\n >>> expand_tuple((3, 4), 2)\n (1, 2, 2, 2)\n\n >>> expand_tuple((7, 4), 3)\n (2, 2, 3, 1, 1, 2)\n \"\"\"\n if factor == 1:\n return chunks\n\n out = []\n for c in chunks:\n x = c\n part = max(x / factor, 1)\n while x >= 2 * part:\n out.append(int(part))\n x -= int(part)\n if x:\n out.append(x)\n assert sum(chunks) == sum(out)\n return tuple(out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_contract_tuple_contract_tuple.return.tuple_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_contract_tuple_contract_tuple.return.tuple_out_", "embedding": null, "metadata": {"file_path": "dask/array/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 113, "end_line": 133, "span_ids": ["contract_tuple"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def contract_tuple(chunks, factor):\n \"\"\"Return simple chunks tuple such that factor divides all elements\n\n Examples\n --------\n\n >>> contract_tuple((2, 2, 8, 4), 4)\n (4, 8, 4)\n \"\"\"\n assert sum(chunks) % factor == 0\n\n out = []\n residual = 0\n for chunk in chunks:\n chunk += residual\n div = chunk // factor\n residual = chunk % factor\n good = factor * div\n if good:\n out.append(good)\n return tuple(out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_3d_atleast_3d.if_len_new_arys_1_.else_.return.new_arys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_3d_atleast_3d.if_len_new_arys_1_.else_.return.new_arys", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 61, "end_line": 78, "span_ids": ["atleast_3d"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef atleast_3d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None, None, None]\n elif x.ndim == 1:\n x = x[None, :, None]\n elif x.ndim == 2:\n x = x[:, :, None]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_swapaxes_transpose.return.blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_swapaxes_transpose.return.blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 145, "end_line": 170, "span_ids": ["transpose", "swapaxes"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef swapaxes(a, axis1, axis2):\n if axis1 == axis2:\n return a\n if axis1 < 0:\n axis1 = axis1 + a.ndim\n if axis2 < 0:\n axis2 = axis2 + a.ndim\n ind = list(range(a.ndim))\n out = list(ind)\n out[axis1], out[axis2] = axis2, axis1\n\n return blockwise(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2, dtype=a.dtype)\n\n\n@derived_from(np)\ndef transpose(a, axes=None):\n if axes:\n if len(axes) != a.ndim:\n raise ValueError(\"axes don't match array\")\n axes = tuple(d + a.ndim if d < 0 else d for d in axes)\n else:\n axes = tuple(range(a.ndim))[::-1]\n return blockwise(\n np.transpose, axes, a, tuple(range(a.ndim)), dtype=a.dtype, axes=axes\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_matmul_matmul.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_matmul_matmul.return.out", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 390, "end_line": 450, "span_ids": ["matmul"], "tokens": 519}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef matmul(a, b):\n a = asanyarray(a)\n b = asanyarray(b)\n\n if a.ndim == 0 or b.ndim == 0:\n raise ValueError(\"`matmul` does not support scalars.\")\n\n a_is_1d = False\n if a.ndim == 1:\n a_is_1d = True\n a = a[np.newaxis, :]\n\n b_is_1d = False\n if b.ndim == 1:\n b_is_1d = True\n b = b[:, np.newaxis]\n\n if a.ndim < b.ndim:\n a = a[(b.ndim - a.ndim) * (np.newaxis,)]\n elif a.ndim > b.ndim:\n b = b[(a.ndim - b.ndim) * (np.newaxis,)]\n\n # out_ind includes all dimensions to prevent contraction\n # in the blockwise below. We set the last two dimensions\n # of the output to the contraction axis and the 2nd\n # (last) dimension of b in that order\n out_ind = tuple(range(a.ndim + 1))\n # lhs_ind includes `a`/LHS dimensions\n lhs_ind = tuple(range(a.ndim))\n # on `b`/RHS everything above 2nd dimension, is the same\n # as `a`, -2 dimension is \"contracted\" with the last dimension\n # of `a`, last dimension of `b` is `b` specific\n rhs_ind = tuple(range(a.ndim - 2)) + (lhs_ind[-1], a.ndim)\n\n out = blockwise(\n _matmul,\n out_ind,\n a,\n lhs_ind,\n b,\n rhs_ind,\n adjust_chunks={lhs_ind[-1]: 1},\n dtype=result_type(a, b),\n concatenate=False,\n )\n\n # Because contraction + concatenate in blockwise leads to high\n # memory footprints, we want to avoid them. Instead we will perform\n # blockwise (without contraction) followed by reduction. More about\n # this issue: https://github.com/dask/dask/issues/6874\n\n # We will also perform the reduction without concatenation\n out = _sum_wo_cat(out, axis=-2)\n\n if a_is_1d:\n out = out.squeeze(-2)\n if b_is_1d:\n out = out.squeeze(-1)\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_outer__inner_apply_along_axis.return.np_apply_along_axis_func1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_outer__inner_apply_along_axis.return.np_apply_along_axis_func1", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 332, "end_line": 343, "span_ids": ["outer", "_inner_apply_along_axis"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef outer(a, b):\n a = a.flatten()\n b = b.flatten()\n\n dtype = np.outer(a.dtype.type(), b.dtype.type()).dtype\n\n return blockwise(np.outer, \"ij\", a, \"i\", b, \"j\", dtype=dtype)\n\n\ndef _inner_apply_along_axis(arr, func1d, func1d_axis, func1d_args, func1d_kwargs):\n return np.apply_along_axis(func1d, func1d_axis, arr, *func1d_args, **func1d_kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_along_axis_apply_along_axis.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_along_axis_apply_along_axis.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 432, "end_line": 478, "span_ids": ["apply_along_axis"], "tokens": 432}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef apply_along_axis(func1d, axis, arr, *args, dtype=None, shape=None, **kwargs):\n \"\"\"\n This is a blocked variant of :func:`numpy.apply_along_axis` implemented via\n :func:`dask.array.map_blocks`\n\n Notes\n -----\n If either of `dtype` or `shape` are not provided, Dask attempts to\n determine them by calling `func1d` on a dummy array. This may produce\n incorrect values for `dtype` or `shape`, so we recommend providing them.\n \"\"\"\n arr = asarray(arr)\n\n # Verify that axis is valid and throw an error otherwise\n axis = len(arr.shape[:axis])\n\n # If necessary, infer dtype and shape of the output of func1d by calling it on test data.\n if shape is None or dtype is None:\n test_data = np.ones((1,), dtype=arr.dtype)\n test_result = np.array(func1d(test_data, *args, **kwargs))\n if shape is None:\n shape = test_result.shape\n if dtype is None:\n dtype = test_result.dtype\n\n # Rechunk so that func1d is applied over the full axis.\n arr = arr.rechunk(\n arr.chunks[:axis] + (arr.shape[axis : axis + 1],) + arr.chunks[axis + 1 :]\n )\n\n # Map func1d over the data to get the result\n # Adds other axes as needed.\n result = arr.map_blocks(\n _inner_apply_along_axis,\n name=funcname(func1d) + \"-along-axis\",\n dtype=dtype,\n chunks=(arr.chunks[:axis] + shape + arr.chunks[axis + 1 :]),\n drop_axis=axis,\n new_axis=list(range(axis, axis + len(shape), 1)),\n func1d=func1d,\n func1d_axis=axis,\n func1d_args=args,\n func1d_kwargs=kwargs,\n )\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ediff1d__gradient_kernel.return.grad": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ediff1d__gradient_kernel.return.grad", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 468, "end_line": 502, "span_ids": ["_gradient_kernel", "ediff1d"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef ediff1d(ary, to_end=None, to_begin=None):\n ary = asarray(ary)\n\n aryf = ary.flatten()\n r = aryf[1:] - aryf[:-1]\n\n r = [r]\n if to_begin is not None:\n r = [asarray(to_begin).flatten()] + r\n if to_end is not None:\n r = r + [asarray(to_end).flatten()]\n r = concatenate(r)\n\n return r\n\n\ndef _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):\n \"\"\"\n x: nd-array\n array of one block\n coord: 1d-array or scalar\n coordinate along which the gradient is computed.\n axis: int\n axis along which the gradient is computed\n array_locs:\n actual location along axis. None if coordinate is scalar\n grad_kwargs:\n keyword to be passed to np.gradient\n \"\"\"\n block_loc = block_id[axis]\n if array_locs is not None:\n coord = coord[array_locs[0][block_loc] : array_locs[1][block_loc]]\n grad = np.gradient(x, coord, axis=axis, **grad_kwargs)\n return grad", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_gradient_gradient.return.results": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_gradient_gradient.return.results", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 635, "end_line": 710, "span_ids": ["gradient"], "tokens": 574}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef gradient(f, *varargs, axis=None, **kwargs):\n f = asarray(f)\n\n kwargs[\"edge_order\"] = math.ceil(kwargs.get(\"edge_order\", 1))\n if kwargs[\"edge_order\"] > 2:\n raise ValueError(\"edge_order must be less than or equal to 2.\")\n\n drop_result_list = False\n if axis is None:\n axis = tuple(range(f.ndim))\n elif isinstance(axis, Integral):\n drop_result_list = True\n axis = (axis,)\n\n axis = validate_axis(axis, f.ndim)\n\n if len(axis) != len(set(axis)):\n raise ValueError(\"duplicate axes not allowed\")\n\n axis = tuple(ax % f.ndim for ax in axis)\n\n if varargs == ():\n varargs = (1,)\n if len(varargs) == 1:\n varargs = len(axis) * varargs\n if len(varargs) != len(axis):\n raise TypeError(\n \"Spacing must either be a single scalar, or a scalar / 1d-array per axis\"\n )\n\n if issubclass(f.dtype.type, (np.bool8, Integral)):\n f = f.astype(float)\n elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:\n f = f.astype(float)\n\n results = []\n for i, ax in enumerate(axis):\n for c in f.chunks[ax]:\n if np.min(c) < kwargs[\"edge_order\"] + 1:\n raise ValueError(\n \"Chunk size must be larger than edge_order + 1. \"\n \"Minimum chunk for axis {} is {}. Rechunk to \"\n \"proceed.\".format(ax, np.min(c))\n )\n\n if np.isscalar(varargs[i]):\n array_locs = None\n else:\n if isinstance(varargs[i], Array):\n raise NotImplementedError(\"dask array coordinated is not supported.\")\n # coordinate position for each block taking overlap into account\n chunk = np.array(f.chunks[ax])\n array_loc_stop = np.cumsum(chunk) + 1\n array_loc_start = array_loc_stop - chunk - 2\n array_loc_stop[-1] -= 1\n array_loc_start[0] = 0\n array_locs = (array_loc_start, array_loc_stop)\n\n results.append(\n f.map_overlap(\n _gradient_kernel,\n dtype=f.dtype,\n depth={j: 1 if j == ax else 0 for j in range(f.ndim)},\n boundary=\"none\",\n coord=varargs[i],\n axis=ax,\n array_locs=array_locs,\n grad_kwargs=kwargs,\n )\n )\n\n if drop_result_list:\n results = results[0]\n\n return results", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram_histogram._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram_histogram._", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 735, "end_line": 815, "span_ids": ["histogram"], "tokens": 956}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):\n \"\"\"\n Blocked variant of :func:`numpy.histogram`.\n\n Parameters\n ----------\n a : dask.array.Array\n Input data; the histogram is computed over the flattened\n array. If the ``weights`` argument is used, the chunks of\n ``a`` are accessed to check chunking compatibility between\n ``a`` and ``weights``. If ``weights`` is ``None``, a\n :py:class:`dask.dataframe.Series` object can be passed as\n input data.\n bins : int or sequence of scalars, optional\n Either an iterable specifying the ``bins`` or the number of ``bins``\n and a ``range`` argument is required as computing ``min`` and ``max``\n over blocked arrays is an expensive operation that must be performed\n explicitly.\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n normed : bool, optional\n This is equivalent to the ``density`` argument, but produces incorrect\n results for unequal bin widths. It should not be used.\n weights : dask.array.Array, optional\n A dask.array.Array of weights, of the same block structure as ``a``. Each value in\n ``a`` only contributes its associated weight towards the bin count\n (instead of 1). If ``density`` is True, the weights are\n normalized, so that the integral of the density over the range\n remains 1.\n density : bool, optional\n If ``False``, the result will contain the number of samples in\n each bin. If ``True``, the result is the value of the\n probability *density* function at the bin, normalized such that\n the *integral* over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability *mass* function.\n Overrides the ``normed`` keyword if given.\n If ``density`` is True, ``bins`` cannot be a single-number delayed\n value. It must be a concrete number, or a (possibly-delayed)\n array/sequence of the bin edges.\n\n Returns\n -------\n hist : dask Array\n The values of the histogram. See `density` and `weights` for a\n description of the possible semantics.\n bin_edges : dask Array of dtype float\n Return the bin edges ``(length(hist)+1)``.\n\n Examples\n --------\n Using number of bins and range:\n\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array(np.arange(10000), chunks=10)\n >>> h, bins = da.histogram(x, bins=10, range=[0, 10000])\n >>> bins\n array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000.,\n 8000., 9000., 10000.])\n >>> h.compute()\n array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])\n\n Explicitly specifying the bins:\n\n >>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))\n >>> bins\n array([ 0, 5000, 10000])\n >>> h.compute()\n array([5000, 5000])\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_isinstance_bins_Array_histogram._Map_the_histogram_to_al": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_isinstance_bins_Array_histogram._Map_the_histogram_to_al", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 735, "end_line": 805, "span_ids": ["histogram"], "tokens": 692}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):\n if isinstance(bins, Array):\n scalar_bins = bins.ndim == 0\n # ^ `np.ndim` is not implemented by Dask array.\n elif isinstance(bins, Delayed):\n scalar_bins = bins._length is None or bins._length == 1\n else:\n scalar_bins = np.ndim(bins) == 0\n\n if bins is None or (scalar_bins and range is None):\n raise ValueError(\n \"dask.array.histogram requires either specifying \"\n \"bins as an iterable or specifying both a range and \"\n \"the number of bins\"\n )\n\n if weights is not None and weights.chunks != a.chunks:\n raise ValueError(\"Input array and weights must have the same chunked structure\")\n\n if normed is not False:\n raise ValueError(\n \"The normed= keyword argument has been deprecated. \"\n \"Please use density instead. \"\n \"See the numpy.histogram docstring for more information.\"\n )\n\n if density and scalar_bins and isinstance(bins, (Array, Delayed)):\n raise NotImplementedError(\n \"When `density` is True, `bins` cannot be a scalar Dask object. \"\n \"It must be a concrete number or a (possibly-delayed) array/sequence of bin edges.\"\n )\n\n for argname, val in [(\"bins\", bins), (\"range\", range), (\"weights\", weights)]:\n if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):\n raise TypeError(\n \"Dask types besides Array and Delayed are not supported \"\n \"for `histogram`. For argument `{}`, got: {!r}\".format(argname, val)\n )\n\n if range is not None:\n try:\n if len(range) != 2:\n raise ValueError(\n f\"range must be a sequence or array of length 2, but got {len(range)} items\"\n )\n if isinstance(range, (Array, np.ndarray)) and range.shape != (2,):\n raise ValueError(\n f\"range must be a 1-dimensional array of two items, but got an array of shape {range.shape}\"\n )\n except TypeError:\n raise TypeError(\n f\"Expected a sequence or array for range, not {range}\"\n ) from None\n\n token = tokenize(a, bins, range, weights, density)\n name = \"histogram-sum-\" + token\n\n if scalar_bins:\n bins = _linspace_from_delayed(range[0], range[1], bins + 1)\n # ^ NOTE `range[1]` is safe because of the above check, and the initial check\n # that range must not be None if `scalar_bins`\n else:\n if not isinstance(bins, (Array, np.ndarray)):\n bins = asarray(bins)\n if bins.ndim != 1:\n raise ValueError(\n f\"bins must be a 1-dimensional array or sequence, got shape {bins.shape}\"\n )\n\n (bins_ref, range_ref), deps = unpack_collections([bins, range])\n\n # Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_weights_is_None__histogram.if_density_is_not_None_.else_.return.n_bins": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram.if_weights_is_None__histogram.if_density_is_not_None_.else_.return.n_bins", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 806, "end_line": 843, "span_ids": ["histogram"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):\n # ... other code\n if weights is None:\n dsk = {\n (name, i, 0): (_block_hist, k, bins_ref, range_ref)\n for i, k in enumerate(flatten(a.__dask_keys__()))\n }\n dtype = np.histogram([])[0].dtype\n else:\n a_keys = flatten(a.__dask_keys__())\n w_keys = flatten(weights.__dask_keys__())\n dsk = {\n (name, i, 0): (_block_hist, k, bins_ref, range_ref, w)\n for i, (k, w) in enumerate(zip(a_keys, w_keys))\n }\n dtype = weights.dtype\n\n deps = (a,) + deps\n if weights is not None:\n deps += (weights,)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)\n\n # Turn graph into a 2D Array of shape (nchunks, nbins)\n nchunks = len(list(flatten(a.__dask_keys__())))\n nbins = bins.size - 1 # since `bins` is 1D\n chunks = ((1,) * nchunks, (nbins,))\n mapped = Array(graph, name, chunks, dtype=dtype)\n\n # Sum over chunks to get the final histogram\n n = mapped.sum(axis=0)\n\n # We need to replicate normed and density options from numpy\n if density is not None:\n if density:\n db = asarray(np.diff(bins).astype(float), chunks=n.chunks)\n return n / db / n.sum(), bins\n else:\n return n, bins\n else:\n return n, bins", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_cov_cov.if_not_rowvar_.else_.return._dot_X_X_T_conj_fac": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_cov_cov.if_not_rowvar_.else_.return._dot_X_X_T_conj_fac", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 846, "end_line": 891, "span_ids": ["cov"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef cov(m, y=None, rowvar=1, bias=0, ddof=None):\n # This was copied almost verbatim from np.cov\n if ddof is not None and ddof != int(ddof):\n raise ValueError(\"ddof must be integer\")\n\n # Handles complex arrays too\n m = asarray(m)\n if y is None:\n dtype = np.result_type(m, np.float64)\n else:\n y = asarray(y)\n dtype = np.result_type(m, y, np.float64)\n X = array(m, ndmin=2, dtype=dtype)\n\n if X.shape[0] == 1:\n rowvar = 1\n if rowvar:\n N = X.shape[1]\n axis = 0\n else:\n N = X.shape[0]\n axis = 1\n\n # check ddof\n if ddof is None:\n if bias == 0:\n ddof = 1\n else:\n ddof = 0\n fact = float(N - ddof)\n if fact <= 0:\n warnings.warn(\"Degrees of freedom <= 0 for slice\", RuntimeWarning)\n fact = 0.0\n\n if y is not None:\n y = array(y, ndmin=2, dtype=dtype)\n X = concatenate((X, y), axis)\n\n X = X - X.mean(axis=1 - axis, keepdims=True)\n if not rowvar:\n return (dot(X.T, X.conj()) / fact).squeeze()\n else:\n return (dot(X, X.T.conj()) / fact).squeeze()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_corrcoef_iscomplexobj.return.issubclass_x_dtype_type_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_corrcoef_iscomplexobj.return.issubclass_x_dtype_type_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1540, "end_line": 1566, "span_ids": ["iscomplexobj", "round", "corrcoef", "ndim"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef corrcoef(x, y=None, rowvar=1):\n c = cov(x, y, rowvar)\n if c.shape == ():\n return c / c\n d = diag(c)\n d = d.reshape((d.shape[0], 1))\n sqr_d = sqrt(d)\n return (c / sqr_d) / sqr_d.T\n\n\n@implements(np.round, np.round_)\n@derived_from(np)\ndef round(a, decimals=0):\n return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)\n\n\n@implements(np.ndim)\n@derived_from(np)\ndef ndim(a):\n return a.ndim\n\n\n@implements(np.iscomplexobj)\n@derived_from(np)\ndef iscomplexobj(x):\n return issubclass(x.dtype.type, np.complexfloating)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__unique_internal__unique_internal.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__unique_internal__unique_internal.return.r", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 917, "end_line": 975, "span_ids": ["_unique_internal"], "tokens": 596}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _unique_internal(ar, indices, counts, return_inverse=False):\n \"\"\"\n Helper/wrapper function for :func:`numpy.unique`.\n\n Uses :func:`numpy.unique` to find the unique values for the array chunk.\n Given this chunk may not represent the whole array, also take the\n ``indices`` and ``counts`` that are in 1-to-1 correspondence to ``ar``\n and reduce them in the same fashion as ``ar`` is reduced. Namely sum\n any counts that correspond to the same value and take the smallest\n index that corresponds to the same value.\n\n To handle the inverse mapping from the unique values to the original\n array, simply return a NumPy array created with ``arange`` with enough\n values to correspond 1-to-1 to the unique values. While there is more\n work needed to be done to create the full inverse mapping for the\n original array, this provides enough information to generate the\n inverse mapping in Dask.\n\n Given Dask likes to have one array returned from functions like\n ``blockwise``, some formatting is done to stuff all of the resulting arrays\n into one big NumPy structured array. Dask is then able to handle this\n object and can split it apart into the separate results on the Dask side,\n which then can be passed back to this function in concatenated chunks for\n further reduction or can be return to the user to perform other forms of\n analysis.\n\n By handling the problem in this way, it does not matter where a chunk\n is in a larger array or how big it is. The chunk can still be computed\n on the same way. Also it does not matter if the chunk is the result of\n other chunks being run through this function multiple times. The end\n result will still be just as accurate using this strategy.\n \"\"\"\n\n return_index = indices is not None\n return_counts = counts is not None\n\n u = np.unique(ar)\n\n dt = [(\"values\", u.dtype)]\n if return_index:\n dt.append((\"indices\", np.intp))\n if return_inverse:\n dt.append((\"inverse\", np.intp))\n if return_counts:\n dt.append((\"counts\", np.intp))\n\n r = np.empty(u.shape, dtype=dt)\n r[\"values\"] = u\n if return_inverse:\n r[\"inverse\"] = np.arange(len(r), dtype=np.intp)\n if return_index or return_counts:\n for i, v in enumerate(r[\"values\"]):\n m = ar == v\n if return_index:\n indices[m].min(keepdims=True, out=r[\"indices\"][i : i + 1])\n if return_counts:\n counts[m].sum(keepdims=True, out=r[\"counts\"][i : i + 1])\n\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__isin_kernel_isin.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__isin_kernel_isin.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1071, "end_line": 1097, "span_ids": ["_isin_kernel", "isin"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _isin_kernel(element, test_elements, assume_unique=False):\n values = np.in1d(element.ravel(), test_elements, assume_unique=assume_unique)\n return values.reshape(element.shape + (1,) * test_elements.ndim)\n\n\n@safe_wraps(getattr(np, \"isin\", None))\ndef isin(element, test_elements, assume_unique=False, invert=False):\n element = asarray(element)\n test_elements = asarray(test_elements)\n element_axes = tuple(range(element.ndim))\n test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))\n mapped = blockwise(\n _isin_kernel,\n element_axes + test_axes,\n element,\n element_axes,\n test_elements,\n test_axes,\n adjust_chunks={axis: lambda _: 1 for axis in test_axes},\n dtype=bool,\n assume_unique=assume_unique,\n )\n\n result = mapped.any(axis=test_axes)\n if invert:\n result = ~result\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_roll_roll.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_roll_roll.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1825, "end_line": 1871, "span_ids": ["roll"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef roll(array, shift, axis=None):\n result = array\n\n if axis is None:\n result = ravel(result)\n\n if not isinstance(shift, Integral):\n raise TypeError(\n \"Expect `shift` to be an instance of Integral when `axis` is None.\"\n )\n\n shift = (shift,)\n axis = (0,)\n else:\n try:\n len(shift)\n except TypeError:\n shift = (shift,)\n try:\n len(axis)\n except TypeError:\n axis = (axis,)\n\n if len(shift) != len(axis):\n raise ValueError(\"Must have the same number of shifts as axes.\")\n\n for i, s in zip(axis, shift):\n s = -s\n s %= result.shape[i]\n\n sl1 = result.ndim * [slice(None)]\n sl2 = result.ndim * [slice(None)]\n\n sl1[i] = slice(s, None)\n sl2[i] = slice(None, s)\n\n sl1 = tuple(sl1)\n sl2 = tuple(sl2)\n\n result = concatenate([result[sl1], result[sl2]], axis=i)\n\n result = result.reshape(array.shape)\n # Ensure that the output is always a new array object\n result = result.copy() if result is array else result\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_compress_compress.return.a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_compress_compress.return.a", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1181, "end_line": 1210, "span_ids": ["compress"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef compress(condition, a, axis=None):\n\n if not is_arraylike(condition):\n # Allow `condition` to be anything array-like, otherwise ensure `condition`\n # is a numpy array.\n condition = np.asarray(condition)\n condition = condition.astype(bool)\n a = asarray(a)\n\n if condition.ndim != 1:\n raise ValueError(\"Condition must be one dimensional\")\n\n if axis is None:\n a = a.ravel()\n axis = 0\n axis = validate_axis(axis, a.ndim)\n\n # Treat `condition` as filled with `False` (if it is too short)\n a = a[\n tuple(\n slice(None, len(condition)) if i == axis else slice(None)\n for i in range(a.ndim)\n )\n ]\n\n # Use `condition` to select along 1 dimension\n a = a[tuple(condition if i == axis else slice(None) for i in range(a.ndim))]\n\n return a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_extract__isnonzero_vec.np_vectorize__isnonzero_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_extract__isnonzero_vec.np_vectorize__isnonzero_v", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1719, "end_line": 1793, "span_ids": ["impl:3", "isclose", "isnull", "notnull", "allclose", "variadic_choose", "choose", "_take_dask_array_from_numpy", "_isnonzero_vec", "around", "extract", "_asarray_isnull", "take"], "tokens": 516}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef extract(condition, arr):\n condition = asarray(condition).astype(bool)\n arr = asarray(arr)\n return compress(condition.ravel(), arr.ravel())\n\n\n@derived_from(np)\ndef take(a, indices, axis=0):\n axis = validate_axis(axis, a.ndim)\n\n if isinstance(a, np.ndarray) and isinstance(indices, Array):\n return _take_dask_array_from_numpy(a, indices, axis)\n else:\n return a[(slice(None),) * axis + (indices,)]\n\n\ndef _take_dask_array_from_numpy(a, indices, axis):\n assert isinstance(a, np.ndarray)\n assert isinstance(indices, Array)\n\n return indices.map_blocks(\n lambda block: np.take(a, block, axis), chunks=indices.chunks, dtype=a.dtype\n )\n\n\n@derived_from(np)\ndef around(x, decimals=0):\n return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)\n\n\ndef _asarray_isnull(values):\n import pandas as pd\n\n return np.asarray(pd.isnull(values))\n\n\ndef isnull(values):\n \"\"\"pandas.isnull for dask arrays\"\"\"\n # eagerly raise ImportError, if pandas isn't available\n import pandas as pd # noqa\n\n return elemwise(_asarray_isnull, values, dtype=\"bool\")\n\n\ndef notnull(values):\n \"\"\"pandas.notnull for dask arrays\"\"\"\n return ~isnull(values)\n\n\n@derived_from(np)\ndef isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):\n func = partial(np.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)\n return elemwise(func, arr1, arr2, dtype=\"bool\")\n\n\n@derived_from(np)\ndef allclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):\n return isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=equal_nan).all()\n\n\ndef variadic_choose(a, *choices):\n return np.choose(a, choices)\n\n\n@derived_from(np)\ndef choose(a, choices):\n return elemwise(variadic_choose, a, *choices)\n\n\ndef _isnonzero_vec(v):\n return bool(np.count_nonzero(v))\n\n\n_isnonzero_vec = np.vectorize(_isnonzero_vec, otypes=[bool])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_isnonzero_isnonzero.try_.else_.return.a_astype_bool_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_isnonzero_isnonzero.try_.else_.return.a_astype_bool_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1290, "end_line": 1309, "span_ids": ["isnonzero"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def isnonzero(a):\n if a.dtype.kind in {\"U\", \"S\"}:\n # NumPy treats all-whitespace strings as falsy (like in `np.nonzero`).\n # but not in `.astype(bool)`. To match the behavior of numpy at least until\n # 1.19, we use `_isnonzero_vec`. When NumPy changes behavior, we should just\n # use the try block below.\n # https://github.com/numpy/numpy/issues/9875\n return a.map_blocks(_isnonzero_vec, dtype=bool)\n try:\n np.zeros(tuple(), dtype=a.dtype).astype(bool)\n except ValueError:\n ######################################################\n # Handle special cases where conversion to bool does #\n # not work correctly. #\n # #\n # xref: https://github.com/numpy/numpy/issues/9479 #\n ######################################################\n return a.map_blocks(_isnonzero_vec, dtype=bool)\n else:\n return a.astype(bool)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_argwhere_where.if_np_isscalar_condition_.else_.return.elemwise_np_where_condit": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_argwhere_where.if_np_isscalar_condition_.else_.return.elemwise_np_where_condit", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1312, "end_line": 1343, "span_ids": ["argwhere", "where"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef argwhere(a):\n a = asarray(a)\n\n nz = isnonzero(a).flatten()\n\n ind = indices(a.shape, dtype=np.intp, chunks=a.chunks)\n if ind.ndim > 1:\n ind = stack([ind[i].ravel() for i in range(len(ind))], axis=1)\n ind = compress(nz, ind, axis=0)\n\n return ind\n\n\n@derived_from(np)\ndef where(condition, x=None, y=None):\n if (x is None) != (y is None):\n raise ValueError(\"either both or neither of x and y should be given\")\n if (x is None) and (y is None):\n return nonzero(condition)\n\n if np.isscalar(condition):\n dtype = result_type(x, y)\n x = asarray(x)\n y = asarray(y)\n\n shape = broadcast_shapes(x.shape, y.shape)\n out = x if condition else y\n\n return broadcast_to(out, shape).astype(dtype)\n else:\n return elemwise(np.where, condition, x, y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_count_nonzero__unravel_index_kernel.return.np_stack_np_unravel_index": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_count_nonzero__unravel_index_kernel.return.np_stack_np_unravel_index", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1385, "end_line": 1405, "span_ids": ["count_nonzero", "_unravel_index_kernel", "nonzero", "flatnonzero"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef count_nonzero(a, axis=None):\n return isnonzero(asarray(a)).astype(np.intp).sum(axis=axis)\n\n\n@derived_from(np)\ndef flatnonzero(a):\n return argwhere(asarray(a).ravel())[:, 0]\n\n\n@derived_from(np)\ndef nonzero(a):\n ind = argwhere(a)\n if ind.ndim > 1:\n return tuple(ind[:, i] for i in range(ind.shape[1]))\n else:\n return (ind,)\n\n\ndef _unravel_index_kernel(indices, func_kwargs):\n return np.stack(np.unravel_index(indices, **func_kwargs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_coarsen_coarsen.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_coarsen_coarsen.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2042, "end_line": 2071, "span_ids": ["coarsen"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(chunk.coarsen)\ndef coarsen(reduction, x, axes, trim_excess=False, **kwargs):\n if not trim_excess and not all(x.shape[i] % div == 0 for i, div in axes.items()):\n msg = f\"Coarsening factors {axes} do not align with array shape {x.shape}.\"\n raise ValueError(msg)\n\n if reduction.__module__.startswith(\"dask.\"):\n reduction = getattr(np, reduction.__name__)\n\n new_chunks = {}\n for i, div in axes.items():\n aligned = aligned_coarsen_chunks(x.chunks[i], div)\n if aligned != x.chunks[i]:\n new_chunks[i] = aligned\n if new_chunks:\n x = x.rechunk(new_chunks)\n\n name = \"coarsen-\" + tokenize(reduction, x, axes, trim_excess)\n dsk = {\n (name,)\n + key[1:]: (apply, chunk.coarsen, [reduction, key, axes, trim_excess], kwargs)\n for key in flatten(x.__dask_keys__())\n }\n chunks = tuple(\n tuple(int(bd // axes.get(i, 1)) for bd in bds) for i, bds in enumerate(x.chunks)\n )\n\n meta = reduction(np.empty((1,) * x.ndim, dtype=x.dtype), **kwargs)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_split_at_breaks_split_at_breaks.return.split_array": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_split_at_breaks_split_at_breaks.return.split_array", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1492, "end_line": 1502, "span_ids": ["split_at_breaks"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def split_at_breaks(array, breaks, axis=0):\n \"\"\"Split an array into a list of arrays (using slices) at the given breaks\n\n >>> split_at_breaks(np.arange(6), [3, 5])\n [array([0, 1, 2]), array([3, 4]), array([5])]\n \"\"\"\n padded_breaks = concat([[None], breaks, [None]])\n slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]\n preslice = (slice(None),) * axis\n split_array = [array[preslice + (s,)] for s in slices]\n return split_array", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_insert_insert.return.concatenate_interleaved_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_insert_insert.return.concatenate_interleaved_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1505, "end_line": 1551, "span_ids": ["insert"], "tokens": 439}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef insert(arr, obj, values, axis):\n # axis is a required argument here to avoid needing to deal with the numpy\n # default case (which reshapes the array to make it flat)\n axis = validate_axis(axis, arr.ndim)\n\n if isinstance(obj, slice):\n obj = np.arange(*obj.indices(arr.shape[axis]))\n obj = np.asarray(obj)\n scalar_obj = obj.ndim == 0\n if scalar_obj:\n obj = np.atleast_1d(obj)\n\n obj = np.where(obj < 0, obj + arr.shape[axis], obj)\n if (np.diff(obj) < 0).any():\n raise NotImplementedError(\n \"da.insert only implemented for monotonic ``obj`` argument\"\n )\n\n split_arr = split_at_breaks(arr, np.unique(obj), axis)\n\n if getattr(values, \"ndim\", 0) == 0:\n # we need to turn values into a dask array\n name = \"values-\" + tokenize(values)\n dtype = getattr(values, \"dtype\", type(values))\n values = Array({(name,): values}, name, chunks=(), dtype=dtype)\n\n values_shape = tuple(\n len(obj) if axis == n else s for n, s in enumerate(arr.shape)\n )\n values = broadcast_to(values, values_shape)\n elif scalar_obj:\n values = values[(slice(None),) * axis + (None,)]\n\n values_chunks = tuple(\n values_bd if axis == n else arr_bd\n for n, (arr_bd, values_bd) in enumerate(zip(arr.chunks, values.chunks))\n )\n values = values.rechunk(values_chunks)\n\n counts = np.bincount(obj)[:-1]\n values_breaks = np.cumsum(counts[counts > 0])\n split_values = split_at_breaks(values, values_breaks, axis)\n\n interleaved = list(interleave([split_arr, split_values]))\n interleaved = [i for i in interleaved if i.nbytes]\n return concatenate(interleaved, axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_sanitize_index_sanitize_index.if_index_array_dtype_b.else_.raise_TypeError_Invalid_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_sanitize_index_sanitize_index.if_index_array_dtype_b.else_.raise_TypeError_Invalid_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 39, "end_line": 94, "span_ids": ["sanitize_index"], "tokens": 479}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sanitize_index(ind):\n \"\"\"Sanitize the elements for indexing along one axis\n\n >>> sanitize_index([2, 3, 5])\n array([2, 3, 5])\n >>> sanitize_index([True, False, True, False])\n array([0, 2])\n >>> sanitize_index(np.array([1, 2, 3]))\n array([1, 2, 3])\n >>> sanitize_index(np.array([False, True, True]))\n array([1, 2])\n >>> type(sanitize_index(np.int32(0)))\n \n >>> sanitize_index(1.0)\n 1\n >>> sanitize_index(0.5)\n Traceback (most recent call last):\n ...\n IndexError: Bad index. Must be integer-like: 0.5\n \"\"\"\n from .utils import asanyarray_safe\n\n if ind is None:\n return None\n elif isinstance(ind, slice):\n return slice(\n _sanitize_index_element(ind.start),\n _sanitize_index_element(ind.stop),\n _sanitize_index_element(ind.step),\n )\n elif isinstance(ind, Number):\n return _sanitize_index_element(ind)\n elif is_dask_collection(ind):\n return ind\n index_array = asanyarray_safe(ind, like=ind)\n if index_array.dtype == bool:\n nonzero = np.nonzero(index_array)\n if len(nonzero) == 1:\n # If a 1-element tuple, unwrap the element\n nonzero = nonzero[0]\n if is_arraylike(nonzero):\n return nonzero\n else:\n return np.asanyarray(nonzero)\n elif np.issubdtype(index_array.dtype, np.integer):\n return index_array\n elif np.issubdtype(index_array.dtype, np.floating):\n int_index = index_array.astype(np.intp)\n if np.allclose(index_array, int_index):\n return int_index\n else:\n check_int = np.isclose(index_array, int_index)\n first_err = index_array.ravel()[np.flatnonzero(~check_int)[0]]\n raise IndexError(\"Bad index. Must be integer-like: %s\" % first_err)\n else:\n raise TypeError(\"Invalid index type\", type(ind), ind)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_array_slice_array.return.dsk_out_bd_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_array_slice_array.return.dsk_out_bd_out", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 97, "end_line": 177, "span_ids": ["slice_array"], "tokens": 722}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_array(out_name, in_name, blockdims, index, itemsize):\n \"\"\"\n Main function for array slicing\n\n This function makes a new dask that slices blocks along every\n dimension and aggregates (via cartesian product) each dimension's\n slices so that the resulting block slices give the same results\n as the original slice on the original structure\n\n Index must be a tuple. It may contain the following types\n\n int, slice, list (at most one list), None\n\n Parameters\n ----------\n in_name - string\n This is the dask variable name that will be used as input\n out_name - string\n This is the dask variable output name\n blockshape - iterable of integers\n index - iterable of integers, slices, lists, or None\n itemsize : int\n The number of bytes required for each element of the array.\n\n Returns\n -------\n Dict where the keys are tuples of\n\n (out_name, dim_index[, dim_index[, ...]])\n\n and the values are\n\n (function, (in_name, dim_index, dim_index, ...),\n (slice(...), [slice()[,...]])\n\n Also new blockdims with shapes of each block\n\n ((10, 10, 10, 10), (20, 20))\n\n Examples\n --------\n >>> from pprint import pprint\n >>> dsk, blockdims = slice_array('y', 'x', [(20, 20, 20, 20, 20)],\n ... (slice(10, 35),), 8)\n >>> pprint(dsk) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n {('y', 0): (,\n ('x', 0),\n (slice(10, 20, 1),)),\n ('y', 1): (, ('x', 1), (slice(0, 15, 1),))}\n >>> blockdims\n ((10, 15),)\n\n See Also\n --------\n This function works by successively unwrapping cases and passing down\n through a sequence of functions.\n\n slice_with_newaxis : handle None/newaxis case\n slice_wrap_lists : handle fancy indexing with lists\n slice_slices_and_integers : handle everything else\n \"\"\"\n blockdims = tuple(map(tuple, blockdims))\n\n # x[:, :, :] - Punt and return old value\n if all(\n isinstance(index, slice) and index == slice(None, None, None) for index in index\n ):\n suffixes = product(*[range(len(bd)) for bd in blockdims])\n dsk = {(out_name,) + s: (in_name,) + s for s in suffixes}\n return dsk, blockdims\n\n # Add in missing colons at the end as needed. x[5] -> x[5, :, :]\n not_none_count = sum(i is not None for i in index)\n missing = len(blockdims) - not_none_count\n index += (slice(None, None, None),) * missing\n\n # Pass down to next function\n dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index, itemsize)\n\n bd_out = tuple(map(tuple, bd_out))\n return dsk_out, bd_out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_newaxes_slice_with_newaxes.if_where_none_.else_.return.dsk_blockdims2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_newaxes_slice_with_newaxes.if_where_none_.else_.return.dsk_blockdims2", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 180, "end_line": 218, "span_ids": ["slice_with_newaxes"], "tokens": 407}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_newaxes(out_name, in_name, blockdims, index, itemsize):\n \"\"\"\n Handle indexing with Nones\n\n Strips out Nones then hands off to slice_wrap_lists\n \"\"\"\n # Strip Nones from index\n index2 = tuple(ind for ind in index if ind is not None)\n where_none = [i for i, ind in enumerate(index) if ind is None]\n where_none_orig = list(where_none)\n for i, x in enumerate(where_none):\n n = sum(isinstance(ind, Integral) for ind in index[:x])\n if n:\n where_none[i] -= n\n\n # Pass down and do work\n dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2, itemsize)\n\n if where_none:\n expand = expander(where_none)\n expand_orig = expander(where_none_orig)\n\n # Insert \",0\" into the key: ('x', 2, 3) -> ('x', 0, 2, 0, 3)\n dsk2 = {\n (out_name,) + expand(k[1:], 0): (v[:2] + (expand_orig(v[2], None),))\n for k, v in dsk.items()\n if k[0] == out_name\n }\n\n # Add back intermediate parts of the dask that weren't the output\n dsk3 = merge(dsk2, {k: v for k, v in dsk.items() if k[0] != out_name})\n\n # Insert (1,) into blockdims: ((2, 2), (3, 3)) -> ((2, 2), (1,), (3, 3))\n blockdims3 = expand(blockdims2, (1,))\n\n return dsk3, blockdims3\n\n else:\n return dsk, blockdims2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_wrap_lists_slice_wrap_lists.return.dsk3_blockdims2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_wrap_lists_slice_wrap_lists.return.dsk3_blockdims2", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 218, "end_line": 280, "span_ids": ["slice_wrap_lists"], "tokens": 629}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_wrap_lists(out_name, in_name, blockdims, index, itemsize):\n \"\"\"\n Fancy indexing along blocked array dasks\n\n Handles index of type list. Calls slice_slices_and_integers for the rest\n\n See Also\n --------\n\n take : handle slicing with lists (\"fancy\" indexing)\n slice_slices_and_integers : handle slicing with slices and integers\n \"\"\"\n assert all(isinstance(i, (slice, list, Integral)) or is_arraylike(i) for i in index)\n if not len(blockdims) == len(index):\n raise IndexError(\"Too many indices for array\")\n\n # Do we have more than one list in the index?\n where_list = [\n i for i, ind in enumerate(index) if is_arraylike(ind) and ind.ndim > 0\n ]\n if len(where_list) > 1:\n raise NotImplementedError(\"Don't yet support nd fancy indexing\")\n # Is the single list an empty list? In this case just treat it as a zero\n # length slice\n if where_list and not index[where_list[0]].size:\n index = list(index)\n index[where_list.pop()] = slice(0, 0, 1)\n index = tuple(index)\n\n # No lists, hooray! just use slice_slices_and_integers\n if not where_list:\n return slice_slices_and_integers(out_name, in_name, blockdims, index)\n\n # Replace all lists with full slices [3, 1, 0] -> slice(None, None, None)\n index_without_list = tuple(\n slice(None, None, None) if is_arraylike(i) else i for i in index\n )\n\n # lists and full slices. Just use take\n if all(is_arraylike(i) or i == slice(None, None, None) for i in index):\n axis = where_list[0]\n blockdims2, dsk3 = take(\n out_name, in_name, blockdims, index[where_list[0]], itemsize, axis=axis\n )\n # Mixed case. Both slices/integers and lists. slice/integer then take\n else:\n # Do first pass without lists\n tmp = \"slice-\" + tokenize((out_name, in_name, blockdims, index))\n dsk, blockdims2 = slice_slices_and_integers(\n tmp, in_name, blockdims, index_without_list\n )\n\n # After collapsing some axes due to int indices, adjust axis parameter\n axis = where_list[0]\n axis2 = axis - sum(\n 1 for i, ind in enumerate(index) if i < axis and isinstance(ind, Integral)\n )\n\n # Do work\n blockdims2, dsk2 = take(out_name, tmp, blockdims2, index[axis], 8, axis=axis2)\n dsk3 = merge(dsk, dsk2)\n\n return dsk3, blockdims2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_slices_and_integers_slice_slices_and_integers.return.dsk_out_new_blockdims": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_slices_and_integers_slice_slices_and_integers.return.dsk_out_new_blockdims", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 286, "end_line": 340, "span_ids": ["slice_slices_and_integers"], "tokens": 489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_slices_and_integers(out_name, in_name, blockdims, index):\n \"\"\"\n Dask array indexing with slices and integers\n\n See Also\n --------\n\n _slice_1d\n \"\"\"\n from .core import unknown_chunk_message\n\n shape = tuple(cached_cumsum(dim, initial_zero=True)[-1] for dim in blockdims)\n\n for dim, ind in zip(shape, index):\n if np.isnan(dim) and ind != slice(None, None, None):\n raise ValueError(\n f\"Arrays chunk sizes are unknown: {shape}{unknown_chunk_message}\"\n )\n\n assert all(isinstance(ind, (slice, Integral)) for ind in index)\n assert len(index) == len(blockdims)\n\n # Get a list (for each dimension) of dicts{blocknum: slice()}\n block_slices = list(map(_slice_1d, shape, blockdims, index))\n sorted_block_slices = [sorted(i.items()) for i in block_slices]\n\n # (in_name, 1, 1, 2), (in_name, 1, 1, 4), (in_name, 2, 1, 2), ...\n in_names = list(product([in_name], *[pluck(0, s) for s in sorted_block_slices]))\n\n # (out_name, 0, 0, 0), (out_name, 0, 0, 1), (out_name, 0, 1, 0), ...\n out_names = list(\n product(\n [out_name],\n *[\n range(len(d))[::-1] if i.step and i.step < 0 else range(len(d))\n for d, i in zip(block_slices, index)\n if not isinstance(i, Integral)\n ],\n )\n )\n\n all_slices = list(product(*[pluck(1, s) for s in sorted_block_slices]))\n\n dsk_out = {\n out_name: (getitem, in_name, slices)\n for out_name, in_name, slices in zip(out_names, in_names, all_slices)\n }\n\n new_blockdims = [\n new_blockdim(d, db, i)\n for d, i, db in zip(shape, index, blockdims)\n if not isinstance(i, Integral)\n ]\n\n return dsk_out, new_blockdims", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d__slice_1d._Returns_a_dict_of_blo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d__slice_1d._Returns_a_dict_of_blo", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 331, "end_line": 403, "span_ids": ["_slice_1d"], "tokens": 883}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _slice_1d(dim_shape, lengths, index):\n \"\"\"Returns a dict of {blocknum: slice}\n\n This function figures out where each slice should start in each\n block for a single dimension. If the slice won't return any elements\n in the block, that block will not be in the output.\n\n Parameters\n ----------\n\n dim_shape - the number of elements in this dimension.\n This should be a positive, non-zero integer\n blocksize - the number of elements per block in this dimension\n This should be a positive, non-zero integer\n index - a description of the elements in this dimension that we want\n This might be an integer, a slice(), or an Ellipsis\n\n Returns\n -------\n\n dictionary where the keys are the integer index of the blocks that\n should be sliced and the values are the slices\n\n Examples\n --------\n\n Trivial slicing\n\n >>> _slice_1d(100, [60, 40], slice(None, None, None))\n {0: slice(None, None, None), 1: slice(None, None, None)}\n\n 100 length array cut into length 20 pieces, slice 0:35\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 35))\n {0: slice(None, None, None), 1: slice(0, 15, 1)}\n\n Support irregular blocks and various slices\n\n >>> _slice_1d(100, [20, 10, 10, 10, 25, 25], slice(10, 35))\n {0: slice(10, 20, 1), 1: slice(None, None, None), 2: slice(0, 5, 1)}\n\n Support step sizes\n\n >>> _slice_1d(100, [15, 14, 13], slice(10, 41, 3))\n {0: slice(10, 15, 3), 1: slice(1, 14, 3), 2: slice(2, 12, 3)}\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 100, 40)) # step > blocksize\n {0: slice(0, 20, 40), 2: slice(0, 20, 40), 4: slice(0, 20, 40)}\n\n Also support indexing single elements\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], 25)\n {1: 5}\n\n And negative slicing\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 0, -3)) # doctest: +NORMALIZE_WHITESPACE\n {4: slice(-1, -21, -3),\n 3: slice(-2, -21, -3),\n 2: slice(-3, -21, -3),\n 1: slice(-1, -21, -3),\n 0: slice(-2, -20, -3)}\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 12, -3)) # doctest: +NORMALIZE_WHITESPACE\n {4: slice(-1, -21, -3),\n 3: slice(-2, -21, -3),\n 2: slice(-3, -21, -3),\n 1: slice(-1, -21, -3),\n 0: slice(-2, -8, -3)}\n\n >>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, -12, -3))\n {4: slice(-1, -12, -3)}\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d.chunk_boundaries__slice_1d.return.d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__slice_1d.chunk_boundaries__slice_1d.return.d", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 404, "end_line": 495, "span_ids": ["_slice_1d"], "tokens": 793}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _slice_1d(dim_shape, lengths, index):\n chunk_boundaries = cached_cumsum(lengths)\n\n if isinstance(index, Integral):\n # use right-side search to be consistent with previous result\n i = bisect.bisect_right(chunk_boundaries, index)\n if i > 0:\n # the very first chunk has no relative shift\n ind = index - chunk_boundaries[i - 1]\n else:\n ind = index\n return {int(i): int(ind)}\n\n assert isinstance(index, slice)\n\n if index == colon:\n return {k: colon for k in range(len(lengths))}\n\n step = index.step or 1\n if step > 0:\n start = index.start or 0\n stop = index.stop if index.stop is not None else dim_shape\n else:\n start = index.start if index.start is not None else dim_shape - 1\n start = dim_shape - 1 if start >= dim_shape else start\n stop = -(dim_shape + 1) if index.stop is None else index.stop\n\n # posify start and stop\n if start < 0:\n start += dim_shape\n if stop < 0:\n stop += dim_shape\n\n d = dict()\n if step > 0:\n istart = bisect.bisect_right(chunk_boundaries, start)\n istop = bisect.bisect_left(chunk_boundaries, stop)\n\n # the bound is not exactly tight; make it tighter?\n istop = min(istop + 1, len(lengths))\n\n # jump directly to istart\n if istart > 0:\n start = start - chunk_boundaries[istart - 1]\n stop = stop - chunk_boundaries[istart - 1]\n\n for i in range(istart, istop):\n length = lengths[i]\n if start < length and stop > 0:\n d[i] = slice(start, min(stop, length), step)\n start = (start - length) % step\n else:\n start = start - length\n stop -= length\n else:\n rstart = start # running start\n\n istart = bisect.bisect_left(chunk_boundaries, start)\n istop = bisect.bisect_right(chunk_boundaries, stop)\n\n # the bound is not exactly tight; make it tighter?\n istart = min(istart + 1, len(chunk_boundaries) - 1)\n istop = max(istop - 1, -1)\n\n for i in range(istart, istop, -1):\n chunk_stop = chunk_boundaries[i]\n # create a chunk start and stop\n if i == 0:\n chunk_start = 0\n else:\n chunk_start = chunk_boundaries[i - 1]\n\n # if our slice is in this chunk\n if (chunk_start <= rstart < chunk_stop) and (rstart > stop):\n d[i] = slice(\n rstart - chunk_stop,\n max(chunk_start - chunk_stop - 1, stop - chunk_stop),\n step,\n )\n\n # compute the next running start point,\n offset = (rstart - (chunk_start - 1)) % step\n rstart = chunk_start + offset - 1\n\n # replace 0:20:1 with : if appropriate\n for k, v in d.items():\n if v == slice(0, lengths[k], 1):\n d[k] = slice(None, None, None)\n\n if not d: # special case x[:0]\n d[0] = slice(0, 0, 1)\n\n return d", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_partition_by_size_issorted.return.np_all_seq_1_seq_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_partition_by_size_issorted.return.np_all_seq_1_seq_1_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 507, "end_line": 535, "span_ids": ["issorted", "partition_by_size"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def partition_by_size(sizes, seq):\n \"\"\"\n\n >>> partition_by_size([10, 20, 10], [1, 5, 9, 12, 29, 35])\n [array([1, 5, 9]), array([ 2, 19]), array([5])]\n \"\"\"\n if not is_arraylike(seq):\n seq = np.asanyarray(seq)\n left = np.empty(len(sizes) + 1, dtype=int)\n left[0] = 0\n\n right = np.cumsum(sizes, out=left[1:])\n locations = np.empty(len(sizes) + 1, dtype=int)\n locations[0] = 0\n locations[1:] = np.searchsorted(seq, right)\n return [(seq[j:k] - l) for j, k, l in zip(locations[:-1], locations[1:], left)]\n\n\ndef issorted(seq):\n \"\"\"Is sequence sorted?\n\n >>> issorted([1, 2, 3])\n True\n >>> issorted([3, 1, 2])\n False\n \"\"\"\n if len(seq) == 0:\n return True\n return np.all(seq[:-1] <= seq[1:])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slicing_plan_slicing_plan.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slicing_plan_slicing_plan.return.out", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 538, "end_line": 579, "span_ids": ["slicing_plan"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slicing_plan(chunks, index):\n \"\"\"Construct a plan to slice chunks with the given index\n\n Parameters\n ----------\n chunks : Tuple[int]\n One dimensions worth of chunking information\n index : np.ndarray[int]\n The index passed to slice on that dimension\n\n Returns\n -------\n out : List[Tuple[int, np.ndarray]]\n A list of chunk/sub-index pairs corresponding to each output chunk\n \"\"\"\n from .utils import asarray_safe\n\n if not is_arraylike(index):\n index = np.asanyarray(index)\n cum_chunks = cached_cumsum(chunks)\n\n cum_chunks = asarray_safe(cum_chunks, like=index)\n # this dispactches to the array library\n chunk_locations = np.searchsorted(cum_chunks, index, side=\"right\")\n\n # but we need chunk_locations as python ints for getitem calls downstream\n chunk_locations = chunk_locations.tolist()\n where = np.where(np.diff(chunk_locations))[0] + 1\n\n extra = asarray_safe([0], like=where)\n c_loc = asarray_safe([len(chunk_locations)], like=where)\n where = np.concatenate([extra, where, c_loc])\n\n out = []\n for i in range(len(where) - 1):\n sub_index = index[where[i] : where[i + 1]]\n chunk = chunk_locations[where[i]]\n if chunk > 0:\n sub_index = sub_index - cum_chunks[chunk - 1]\n out.append((chunk, sub_index))\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_posify_index_posify_index.return.ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_posify_index_posify_index.return.ind", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 649, "end_line": 674, "span_ids": ["posify_index"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def posify_index(shape, ind):\n \"\"\"Flip negative indices around to positive ones\n\n >>> posify_index(10, 3)\n 3\n >>> posify_index(10, -3)\n 7\n >>> posify_index(10, [3, -3])\n array([3, 7])\n\n >>> posify_index((10, 20), (3, -3))\n (3, 17)\n >>> posify_index((10, 20), (3, [3, 4, -3])) # doctest: +NORMALIZE_WHITESPACE\n (3, array([ 3, 4, 17]))\n \"\"\"\n if isinstance(ind, tuple):\n return tuple(map(posify_index, shape, ind))\n if isinstance(ind, Integral):\n if ind < 0 and not math.isnan(shape):\n return ind + shape\n else:\n return ind\n if isinstance(ind, (np.ndarray, list)) and not math.isnan(shape):\n ind = np.asanyarray(ind)\n return np.where(ind < 0, ind + shape, ind)\n return ind", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__expander_expander.return._expander_tuple_where_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py__expander_expander.return._expander_tuple_where_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 677, "end_line": 711, "span_ids": ["expander", "_expander"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@memoize\ndef _expander(where):\n if not where:\n\n def expand(seq, val):\n return seq\n\n return expand\n else:\n decl = \"\"\"def expand(seq, val):\n return ({left}) + tuple({right})\n \"\"\"\n left = []\n j = 0\n for i in range(max(where) + 1):\n if i in where:\n left.append(\"val, \")\n else:\n left.append(\"seq[%d], \" % j)\n j += 1\n right = \"seq[%d:]\" % j\n left = \"\".join(left)\n decl = decl.format(**locals())\n ns = {}\n exec(compile(decl, \"\", \"exec\"), ns, ns)\n return ns[\"expand\"]\n\n\ndef expander(where):\n \"\"\"Create a function to insert value at many locations in sequence.\n\n >>> expander([0, 2])(['a', 'b', 'c'], 'z')\n ('z', 'a', 'z', 'b', 'c')\n \"\"\"\n return _expander(tuple(where))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_new_blockdim_new_blockdim.return._int_math_ceil_1_0_slc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_new_blockdim_new_blockdim.return._int_math_ceil_1_0_slc", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 714, "end_line": 738, "span_ids": ["new_blockdim"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def new_blockdim(dim_shape, lengths, index):\n \"\"\"\n\n >>> new_blockdim(100, [20, 10, 20, 10, 40], slice(0, 90, 2))\n [10, 5, 10, 5, 15]\n\n >>> new_blockdim(100, [20, 10, 20, 10, 40], [5, 1, 30, 22])\n [4]\n\n >>> new_blockdim(100, [20, 10, 20, 10, 40], slice(90, 10, -2))\n [16, 5, 10, 5, 4]\n \"\"\"\n if index == slice(None, None, None):\n return lengths\n if isinstance(index, list):\n return [len(index)]\n assert not isinstance(index, Integral)\n pairs = sorted(_slice_1d(dim_shape, lengths, index).items(), key=itemgetter(0))\n slices = [\n slice(0, lengths[i], 1) if slc == slice(None, None, None) else slc\n for i, slc in pairs\n ]\n if isinstance(index, slice) and index.step and index.step < 0:\n slices = slices[::-1]\n return [int(math.ceil((1.0 * slc.stop - slc.start) / slc.step)) for slc in slices]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_replace_ellipsis_replace_ellipsis.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_replace_ellipsis_replace_ellipsis.return._", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 741, "end_line": 759, "span_ids": ["replace_ellipsis"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def replace_ellipsis(n, index):\n \"\"\"Replace ... with slices, :, : ,:\n\n >>> replace_ellipsis(4, (3, Ellipsis, 2))\n (3, slice(None, None, None), slice(None, None, None), 2)\n\n >>> replace_ellipsis(2, (Ellipsis, None))\n (slice(None, None, None), slice(None, None, None), None)\n \"\"\"\n # Careful about using in or index because index may contain arrays\n isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis]\n if not isellipsis:\n return index\n else:\n loc = isellipsis[0]\n extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1)\n return (\n index[:loc] + (slice(None, None, None),) * extra_dimensions + index[loc + 1 :]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_slice_normalize_slice.return.idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_slice_normalize_slice.return.idx", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 762, "end_line": 795, "span_ids": ["normalize_slice"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_slice(idx, dim):\n \"\"\"Normalize slices to canonical form\n\n Parameters\n ----------\n idx: slice or other index\n dim: dimension length\n\n Examples\n --------\n >>> normalize_slice(slice(0, 10, 1), 10)\n slice(None, None, None)\n \"\"\"\n\n if isinstance(idx, slice):\n if math.isnan(dim):\n return idx\n start, stop, step = idx.indices(dim)\n if step > 0:\n if start == 0:\n start = None\n if stop >= dim:\n stop = None\n if step == 1:\n step = None\n if stop is not None and start is not None and stop < start:\n stop = start\n elif step < 0:\n if start >= dim - 1:\n start = None\n if stop < 0:\n stop = None\n return slice(start, stop, step)\n return idx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_index_normalize_index.return.idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_normalize_index_normalize_index.return.idx", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 855, "end_line": 921, "span_ids": ["normalize_index"], "tokens": 610}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_index(idx, shape):\n \"\"\"Normalize slicing indexes\n\n 1. Replaces ellipses with many full slices\n 2. Adds full slices to end of index\n 3. Checks bounding conditions\n 4. Replace multidimensional numpy arrays with dask arrays\n 5. Replaces numpy arrays with lists\n 6. Posify's integers and lists\n 7. Normalizes slices to canonical form\n\n Examples\n --------\n >>> normalize_index(1, (10,))\n (1,)\n >>> normalize_index(-1, (10,))\n (9,)\n >>> normalize_index([-1], (10,))\n (array([9]),)\n >>> normalize_index(slice(-3, 10, 1), (10,))\n (slice(7, None, None),)\n >>> normalize_index((Ellipsis, None), (10,))\n (slice(None, None, None), None)\n >>> normalize_index(np.array([[True, False], [False, True], [True, True]]), (3, 2))\n (dask.array,)\n \"\"\"\n from .core import Array, from_array\n\n if not isinstance(idx, tuple):\n idx = (idx,)\n\n # if a > 1D numpy.array is provided, cast it to a dask array\n if len(idx) > 0 and len(shape) > 1:\n i = idx[0]\n if is_arraylike(i) and not isinstance(i, Array) and i.shape == shape:\n idx = (from_array(i), *idx[1:])\n\n idx = replace_ellipsis(len(shape), idx)\n n_sliced_dims = 0\n for i in idx:\n if hasattr(i, \"ndim\") and i.ndim >= 1:\n n_sliced_dims += i.ndim\n elif i is None:\n continue\n else:\n n_sliced_dims += 1\n\n idx = idx + (slice(None),) * (len(shape) - n_sliced_dims)\n if len([i for i in idx if i is not None]) > len(shape):\n raise IndexError(\"Too many indices for array\")\n\n none_shape = []\n i = 0\n for ind in idx:\n if ind is not None:\n none_shape.append(shape[i])\n i += 1\n else:\n none_shape.append(None)\n\n for axis, (i, d) in enumerate(zip(idx, none_shape)):\n if d is not None:\n check_index(axis, i, d)\n idx = tuple(map(sanitize_index, idx))\n idx = tuple(map(normalize_slice, idx, none_shape))\n idx = posify_index(none_shape, idx)\n return idx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_out_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_slice_with_int_dask_array.return.x_tuple_out_index_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 936, "end_line": 986, "span_ids": ["slice_with_int_dask_array"], "tokens": 424}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_int_dask_array(x, index):\n \"\"\"Slice x with at most one 1D dask arrays of ints.\n\n This is a helper function of :meth:`Array.__getitem__`.\n\n Parameters\n ----------\n x: Array\n index: tuple with as many elements as x.ndim, among which there are\n one or more Array's with dtype=int\n\n Returns\n -------\n tuple of (sliced x, new index)\n\n where the new index is the same as the input, but with slice(None)\n replaced to the original slicer where a 1D filter has been applied and\n one less element where a zero-dimensional filter has been applied.\n \"\"\"\n from .core import Array\n\n assert len(index) == x.ndim\n fancy_indexes = [\n isinstance(idx, (tuple, list))\n or (isinstance(idx, (np.ndarray, Array)) and idx.ndim > 0)\n for idx in index\n ]\n if sum(fancy_indexes) > 1:\n raise NotImplementedError(\"Don't yet support nd fancy indexing\")\n\n out_index = []\n dropped_axis_cnt = 0\n for in_axis, idx in enumerate(index):\n out_axis = in_axis - dropped_axis_cnt\n if isinstance(idx, Array) and idx.dtype.kind in \"iu\":\n if idx.ndim == 0:\n idx = idx[np.newaxis]\n x = slice_with_int_dask_array_on_axis(x, idx, out_axis)\n x = x[tuple(0 if i == out_axis else slice(None) for i in range(x.ndim))]\n dropped_axis_cnt += 1\n elif idx.ndim == 1:\n x = slice_with_int_dask_array_on_axis(x, idx, out_axis)\n out_index.append(slice(None))\n else:\n raise NotImplementedError(\n \"Slicing with dask.array of ints only permitted when \"\n \"the indexer has zero or one dimensions\"\n )\n else:\n out_index.append(idx)\n return x, tuple(out_index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_on_axis_slice_with_int_dask_array_on_axis.return.y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_int_dask_array_on_axis_slice_with_int_dask_array_on_axis.return.y", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1043, "end_line": 1109, "span_ids": ["slice_with_int_dask_array_on_axis"], "tokens": 524}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_int_dask_array_on_axis(x, idx, axis):\n \"\"\"Slice a ND dask array with a 1D dask arrays of ints along the given\n axis.\n\n This is a helper function of :func:`slice_with_int_dask_array`.\n \"\"\"\n from . import chunk\n from .core import Array, blockwise, from_array\n from .utils import asarray_safe\n\n assert 0 <= axis < x.ndim\n\n if np.isnan(x.chunks[axis]).any():\n raise NotImplementedError(\n \"Slicing an array with unknown chunks with \"\n \"a dask.array of ints is not supported\"\n )\n\n # Calculate the offset at which each chunk starts along axis\n # e.g. chunks=(..., (5, 3, 4), ...) -> offset=[0, 5, 8]\n offset = np.roll(np.cumsum(asarray_safe(x.chunks[axis], like=x._meta)), 1)\n offset[0] = 0\n offset = from_array(offset, chunks=1)\n # Tamper with the declared chunks of offset to make blockwise align it with\n # x[axis]\n offset = Array(\n offset.dask, offset.name, (x.chunks[axis],), offset.dtype, meta=x._meta\n )\n\n # Define axis labels for blockwise\n x_axes = tuple(range(x.ndim))\n idx_axes = (x.ndim,) # arbitrary index not already in x_axes\n offset_axes = (axis,)\n p_axes = x_axes[: axis + 1] + idx_axes + x_axes[axis + 1 :]\n y_axes = x_axes[:axis] + idx_axes + x_axes[axis + 1 :]\n\n # Calculate the cartesian product of every chunk of x vs every chunk of idx\n p = blockwise(\n chunk.slice_with_int_dask_array,\n p_axes,\n x,\n x_axes,\n idx,\n idx_axes,\n offset,\n offset_axes,\n x_size=x.shape[axis],\n axis=axis,\n dtype=x.dtype,\n meta=x._meta,\n )\n\n # Aggregate on the chunks of x along axis\n y = blockwise(\n chunk.slice_with_int_dask_array_aggregate,\n y_axes,\n idx,\n idx_axes,\n p,\n p_axes,\n concatenate=True,\n x_chunks=x.chunks[axis],\n axis=axis,\n dtype=x.dtype,\n meta=x._meta,\n )\n return y", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_bool_dask_array_slice_with_bool_dask_array.return.out_tuple_out_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_slice_with_bool_dask_array_slice_with_bool_dask_array.return.out_tuple_out_index_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1108, "end_line": 1197, "span_ids": ["slice_with_bool_dask_array"], "tokens": 715}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_bool_dask_array(x, index):\n \"\"\"Slice x with one or more dask arrays of bools\n\n This is a helper function of `Array.__getitem__`.\n\n Parameters\n ----------\n x: Array\n index: tuple with as many elements as x.ndim, among which there are\n one or more Array's with dtype=bool\n\n Returns\n -------\n tuple of (sliced x, new index)\n\n where the new index is the same as the input, but with slice(None)\n replaced to the original slicer when a filter has been applied.\n\n Note: The sliced x will have nan chunks on the sliced axes.\n \"\"\"\n from .core import Array, blockwise, elemwise\n\n out_index = [\n slice(None) if isinstance(ind, Array) and ind.dtype == bool else ind\n for ind in index\n ]\n\n if len(index) == 1 and index[0].ndim == x.ndim:\n if not np.isnan(x.shape).any() and not np.isnan(index[0].shape).any():\n x = x.ravel()\n index = tuple(i.ravel() for i in index)\n elif x.ndim > 1:\n warnings.warn(\n \"When slicing a Dask array of unknown chunks with a boolean mask \"\n \"Dask array, the output array may have a different ordering \"\n \"compared to the equivalent NumPy operation. This will raise an \"\n \"error in a future release of Dask.\",\n stacklevel=3,\n )\n y = elemwise(getitem, x, *index, dtype=x.dtype)\n name = \"getitem-\" + tokenize(x, index)\n dsk = {(name, i): k for i, k in enumerate(core.flatten(y.__dask_keys__()))}\n chunks = ((np.nan,) * y.npartitions,)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[y])\n return Array(graph, name, chunks, x.dtype), out_index\n\n if any(\n isinstance(ind, Array) and ind.dtype == bool and ind.ndim != 1 for ind in index\n ):\n raise NotImplementedError(\n \"Slicing with dask.array of bools only permitted when \"\n \"the indexer has only one dimension or when \"\n \"it has the same dimension as the sliced \"\n \"array\"\n )\n indexes = [\n ind if isinstance(ind, Array) and ind.dtype == bool else slice(None)\n for ind in index\n ]\n\n arginds = []\n i = 0\n for ind in indexes:\n if isinstance(ind, Array) and ind.dtype == bool:\n new = (ind, tuple(range(i, i + ind.ndim)))\n i += x.ndim\n else:\n new = (slice(None), None)\n i += 1\n arginds.append(new)\n\n arginds = list(concat(arginds))\n\n out = blockwise(\n getitem_variadic,\n tuple(range(x.ndim)),\n x,\n tuple(range(x.ndim)),\n *arginds,\n dtype=x.dtype,\n )\n\n chunks = []\n for ind, chunk in zip(index, out.chunks):\n if isinstance(ind, Array) and ind.dtype == bool:\n chunks.append((np.nan,) * len(chunk))\n else:\n chunks.append(chunk)\n out._chunks = tuple(chunks)\n return out, tuple(out_index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_getitem_variadic_make_block_sorted_slices.return.index2_index3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_getitem_variadic_make_block_sorted_slices.return.index2_index3", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1145, "end_line": 1200, "span_ids": ["make_block_sorted_slices", "getitem_variadic"], "tokens": 418}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getitem_variadic(x, *index):\n return x[index]\n\n\ndef make_block_sorted_slices(index, chunks):\n \"\"\"Generate blockwise-sorted index pairs for shuffling an array.\n\n Parameters\n ----------\n index : ndarray\n An array of index positions.\n chunks : tuple\n Chunks from the original dask array\n\n Returns\n -------\n index2 : ndarray\n Same values as `index`, but each block has been sorted\n index3 : ndarray\n The location of the values of `index` in `index2`\n\n Examples\n --------\n >>> index = np.array([6, 0, 4, 2, 7, 1, 5, 3])\n >>> chunks = ((4, 4),)\n >>> a, b = make_block_sorted_slices(index, chunks)\n\n Notice that the first set of 4 items are sorted, and the\n second set of 4 items are sorted.\n\n >>> a\n array([0, 2, 4, 6, 1, 3, 5, 7])\n >>> b\n array([3, 0, 2, 1, 7, 4, 6, 5])\n \"\"\"\n from .core import slices_from_chunks\n\n slices = slices_from_chunks(chunks)\n\n if len(slices[0]) > 1:\n slices = [slice_[0] for slice_ in slices]\n\n offsets = np.roll(np.cumsum(chunks[0]), 1)\n offsets[0] = 0\n\n index2 = np.empty_like(index)\n index3 = np.empty_like(index)\n\n for slice_, offset in zip(slices, offsets):\n a = index[slice_]\n b = np.sort(a)\n c = offset + np.argsort(b.take(np.argsort(a)))\n index2[slice_] = b\n index3[slice_] = c\n\n return index2, index3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_shuffle_slice_shuffle_slice.with_warnings_catch_warni.return.x_index2_rechunk_chunks2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_shuffle_slice_shuffle_slice.with_warnings_catch_warni.return.x_index2_rechunk_chunks2", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1203, "end_line": 1225, "span_ids": ["shuffle_slice"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle_slice(x, index):\n \"\"\"A relatively efficient way to shuffle `x` according to `index`.\n\n Parameters\n ----------\n x : Array\n index : ndarray\n This should be an ndarray the same length as `x` containing\n each index position in ``range(0, len(x))``.\n\n Returns\n -------\n Array\n \"\"\"\n from .core import PerformanceWarning\n\n chunks1 = chunks2 = x.chunks\n if x.ndim > 1:\n chunks1 = (chunks1[0],)\n index2, index3 = make_block_sorted_slices(index, chunks1)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n return x[index2].rechunk(chunks2)[index3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_____all__._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_____all__._", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 81, "span_ids": ["impl:16", "docstring"], "tokens": 519}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nStatistical functions and tests, following scipy.stats.\n\nSome differences\n\n- We don't handle missing values at all\n\n\"\"\"\n# This is lightly adapted from scipy.stats 0.19\n# https://github.com/scipy/scipy/blob/v0.19.0/scipy/stats/stats.py\nimport math\nfrom collections import namedtuple\n\nimport numpy as np\n\nimport dask.array as da\nfrom dask import delayed\nfrom dask.array.ufunc import wrap_elemwise\nfrom dask.utils import derived_from\n\ntry:\n import scipy.stats\nexcept ImportError as e:\n raise ImportError(\"`dask.array.stats` requires `scipy` to be installed.\") from e\nfrom scipy import special\nfrom scipy.stats import distributions\n\n# copied from https://github.com/scipy/scipy/blob/v1.8.0/scipy/stats/_stats_py.py since\n# these are all private after v1.8.0\nF_onewayResult = namedtuple(\"F_onewayResult\", (\"statistic\", \"pvalue\"))\nKurtosistestResult = namedtuple(\"KurtosistestResult\", (\"statistic\", \"pvalue\"))\nNormaltestResult = namedtuple(\"NormaltestResult\", (\"statistic\", \"pvalue\"))\nPower_divergenceResult = namedtuple(\"Power_divergenceResult\", (\"statistic\", \"pvalue\"))\nSkewtestResult = namedtuple(\"SkewtestResult\", (\"statistic\", \"pvalue\"))\nTtest_1sampResult = namedtuple(\"Ttest_1sampResult\", (\"statistic\", \"pvalue\"))\nTtest_indResult = namedtuple(\"Ttest_indResult\", (\"statistic\", \"pvalue\"))\nTtest_relResult = namedtuple(\"Ttest_relResult\", (\"statistic\", \"pvalue\"))\n\n# Map from names to lambda_ values used in power_divergence().\n_power_div_lambda_names = {\n \"pearson\": 1,\n \"log-likelihood\": 0,\n \"freeman-tukey\": -0.5,\n \"mod-log-likelihood\": -1,\n \"neyman\": -2,\n \"cressie-read\": 2 / 3,\n}\n\n__all__ = [\n \"ttest_ind\",\n \"ttest_1samp\",\n \"ttest_rel\",\n \"chisquare\",\n \"power_divergence\",\n \"skew\",\n \"skewtest\",\n \"kurtosis\",\n \"kurtosistest\",\n \"normaltest\",\n \"f_oneway\",\n \"moment\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_1samp_ttest_1samp.return.delayed_Ttest_1sampResult": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_1samp_ttest_1samp.return.delayed_Ttest_1sampResult", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 93, "end_line": 109, "span_ids": ["ttest_1samp"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef ttest_1samp(a, popmean, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n n = a.shape[axis]\n df = n - 1\n\n d = da.mean(a, axis) - popmean\n v = da.var(a, axis, ddof=1)\n denom = da.sqrt(v / float(n))\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n t = da.divide(d, denom)\n t, prob = _ttest_finish(df, t)\n return delayed(Ttest_1sampResult, nout=2)(t, prob)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_rel_chisquare.return.power_divergence_f_obs_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_ttest_rel_chisquare.return.power_divergence_f_obs_f", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 112, "end_line": 136, "span_ids": ["chisquare", "ttest_rel"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef ttest_rel(a, b, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n n = a.shape[axis]\n df = float(n - 1)\n\n d = (a - b).astype(np.float64)\n v = da.var(d, axis, ddof=1)\n dm = da.mean(d, axis)\n denom = da.sqrt(v / float(n))\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n t = da.divide(dm, denom)\n t, prob = _ttest_finish(df, t)\n\n return delayed(Ttest_relResult, nout=2)(t, prob)\n\n\n@derived_from(scipy.stats)\ndef chisquare(f_obs, f_exp=None, ddof=0, axis=0):\n return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_=\"pearson\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_power_divergence_power_divergence.return.delayed_Power_divergenceR": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_power_divergence_power_divergence.return.delayed_Power_divergenceR", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 151, "end_line": 194, "span_ids": ["power_divergence"], "tokens": 452}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):\n\n if isinstance(lambda_, str):\n if lambda_ not in _power_div_lambda_names:\n names = repr(list(_power_div_lambda_names.keys()))[1:-1]\n raise ValueError(\n f\"invalid string for lambda_: {lambda_!r}. \"\n f\"Valid strings are {names}\"\n )\n lambda_ = _power_div_lambda_names[lambda_]\n elif lambda_ is None:\n lambda_ = 1\n\n if f_exp is not None:\n # f_exp = np.atleast_1d(np.asanyarray(f_exp))\n pass\n else:\n f_exp = f_obs.mean(axis=axis, keepdims=True)\n\n # `terms` is the array of terms that are summed along `axis` to create\n # the test statistic. We use some specialized code for a few special\n # cases of lambda_.\n if lambda_ == 1:\n # Pearson's chi-squared statistic\n terms = (f_obs - f_exp) ** 2 / f_exp\n elif lambda_ == 0:\n # Log-likelihood ratio (i.e. G-test)\n terms = 2.0 * _xlogy(f_obs, f_obs / f_exp)\n elif lambda_ == -1:\n # Modified log-likelihood ratio\n terms = 2.0 * _xlogy(f_exp, f_exp / f_obs)\n else:\n # General Cressie-Read power divergence.\n terms = f_obs * ((f_obs / f_exp) ** lambda_ - 1)\n terms /= 0.5 * lambda_ * (lambda_ + 1)\n\n stat = terms.sum(axis=axis)\n\n num_obs = _count(terms, axis=axis)\n # ddof = asarray(ddof)\n p = delayed(distributions.chi2.sf)(stat, num_obs - 1 - ddof)\n\n return delayed(Power_divergenceResult, nout=2)(stat, p)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skew_skew.return.vals": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skew_skew.return.vals", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 197, "end_line": 220, "span_ids": ["skew"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef skew(a, axis=0, bias=True, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n n = a.shape[axis] # noqa; for bias\n m2 = moment(a, 2, axis)\n m3 = moment(a, 3, axis)\n zero = m2 == 0\n vals = da.where(~zero, m3 / m2**1.5, 0.0)\n # vals = da.where(~zero, (m2, m3),\n # lambda m2, m3: m3 / m2**1.5,\n # 0.)\n if not bias:\n # Need a version of np.place\n raise NotImplementedError(\"bias=False is not implemented.\")\n\n if vals.ndim == 0:\n # TODO: scalar, min is a workaround\n return vals.min()\n\n return vals", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skewtest_skewtest.return.delayed_SkewtestResult_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_skewtest_skewtest.return.delayed_SkewtestResult_n", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 223, "end_line": 251, "span_ids": ["skewtest"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef skewtest(a, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n b2 = skew(a, axis)\n n = float(a.shape[axis])\n if n < 8:\n raise ValueError(\n \"skewtest is not valid with less than 8 samples; %i samples\"\n \" were given.\" % int(n)\n )\n y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))\n beta2 = (\n 3.0\n * (n**2 + 27 * n - 70)\n * (n + 1)\n * (n + 3)\n / ((n - 2.0) * (n + 5) * (n + 7) * (n + 9))\n )\n W2 = -1 + math.sqrt(2 * (beta2 - 1))\n delta = 1 / math.sqrt(0.5 * math.log(W2))\n alpha = math.sqrt(2.0 / (W2 - 1))\n y = np.where(y == 0, 1, y)\n Z = delta * np.log(y / alpha + np.sqrt((y / alpha) ** 2 + 1))\n\n return delayed(SkewtestResult, nout=2)(Z, 2 * distributions.norm.sf(np.abs(Z)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosistest_kurtosistest.return.delayed_KurtosistestResul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosistest_kurtosistest.return.delayed_KurtosistestResul", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 284, "end_line": 318, "span_ids": ["kurtosistest"], "tokens": 546}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef kurtosistest(a, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n n = float(a.shape[axis])\n b2 = kurtosis(a, axis, fisher=False)\n\n E = 3.0 * (n - 1) / (n + 1)\n varb2 = (\n 24.0 * n * (n - 2) * (n - 3) / ((n + 1) * (n + 1.0) * (n + 3) * (n + 5))\n ) # [1]_ Eq. 1\n x = (b2 - E) / np.sqrt(varb2) # [1]_ Eq. 4\n # [1]_ Eq. 2:\n sqrtbeta1 = (\n 6.0\n * (n * n - 5 * n + 2)\n / ((n + 7) * (n + 9))\n * np.sqrt((6.0 * (n + 3) * (n + 5)) / (n * (n - 2) * (n - 3)))\n )\n # [1]_ Eq. 3:\n A = 6.0 + 8.0 / sqrtbeta1 * (2.0 / sqrtbeta1 + np.sqrt(1 + 4.0 / (sqrtbeta1**2)))\n term1 = 1 - 2 / (9.0 * A)\n denom = 1 + x * np.sqrt(2 / (A - 4.0))\n denom = np.where(denom < 0, 99, denom)\n term2 = np.where(denom < 0, term1, np.power((1 - 2.0 / A) / denom, 1 / 3.0))\n Z = (term1 - term2) / np.sqrt(2 / (9.0 * A)) # [1]_ Eq. 5\n Z = np.where(denom == 99, 0, Z)\n if Z.ndim == 0:\n Z = Z[()]\n\n # zprob uses upper tail, so Z needs to be positive\n return delayed(KurtosistestResult, nout=2)(Z, 2 * distributions.norm.sf(np.abs(Z)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_normaltest_normaltest.return.delayed_NormaltestResult_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_normaltest_normaltest.return.delayed_NormaltestResult_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 308, "end_line": 318, "span_ids": ["normaltest"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef normaltest(a, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n\n s, _ = skewtest(a, axis)\n k, _ = kurtosistest(a, axis)\n k2 = s * s + k * k\n return delayed(NormaltestResult, nout=2)(k2, delayed(distributions.chi2.sf)(k2, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_f_oneway_f_oneway.return.delayed_F_onewayResult_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_f_oneway_f_oneway.return.delayed_F_onewayResult_n", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 321, "end_line": 353, "span_ids": ["f_oneway"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef f_oneway(*args):\n # args = [np.asarray(arg, dtype=float) for arg in args]\n # ANOVA on N groups, each in its own array\n num_groups = len(args)\n alldata = da.concatenate(args)\n bign = len(alldata)\n\n # Determine the mean of the data, and subtract that from all inputs to a\n # variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance\n # to a shift in location, and centering all data around zero vastly\n # improves numerical stability.\n offset = alldata.mean()\n alldata -= offset\n\n sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))\n ssbn = 0\n for a in args:\n ssbn += _square_of_sums(a - offset) / float(len(a))\n\n # Naming: variables ending in bn/b are for \"between treatments\", wn/w are\n # for \"within treatments\"\n ssbn -= _square_of_sums(alldata) / float(bign)\n sswn = sstot - ssbn\n dfbn = num_groups - 1\n dfwn = bign - num_groups\n msb = ssbn / float(dfbn)\n msw = sswn / float(dfwn)\n f = msb / msw\n\n prob = _fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf\n\n return delayed(F_onewayResult, nout=2)(f, prob)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_moment__equal_var_ttest_denom.return.df_denom": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_moment__equal_var_ttest_denom.return.df_denom", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 369, "end_line": 391, "span_ids": ["moment", "impl:24", "_equal_var_ttest_denom"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef moment(a, moment=1, axis=0, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n return da.moment(a, moment, axis=axis)\n\n\n# -------\n# Helpers\n# -------\n# Don't really want to do all of scipy.special (or do we?)\n\n_xlogy = wrap_elemwise(special.xlogy, source=special)\n_fdtrc = wrap_elemwise(special.fdtrc, source=special)\n\n\ndef _equal_var_ttest_denom(v1, n1, v2, n2):\n df = n1 + n2 - 2.0\n svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df\n denom = da.sqrt(svar * (1.0 / n1 + 1.0 / n2)) # XXX: np -> da\n return df, denom", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__unequal_var_ttest_denom__unequal_var_ttest_denom.return.df_denom": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__unequal_var_ttest_denom__unequal_var_ttest_denom.return.df_denom", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 394, "end_line": 404, "span_ids": ["_unequal_var_ttest_denom"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _unequal_var_ttest_denom(v1, n1, v2, n2):\n vn1 = v1 / n1\n vn2 = v2 / n2\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n df = (vn1 + vn2) ** 2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))\n\n # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).\n # Hence it doesn't matter what df is as long as it's not NaN.\n df = da.where(da.isnan(df), 1, df) # XXX: np -> da\n denom = da.sqrt(vn1 + vn2)\n return df, denom", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__ttest_ind_from_stats__count.if_axis_is_None_.else_.return.x_shape_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__ttest_ind_from_stats__count.if_axis_is_None_.else_.return.x_shape_axis_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 394, "end_line": 421, "span_ids": ["_ttest_finish", "_count", "_ttest_ind_from_stats"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _ttest_ind_from_stats(mean1, mean2, denom, df):\n\n d = mean1 - mean2\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n t = da.divide(d, denom)\n t, prob = _ttest_finish(df, t)\n\n return (t, prob)\n\n\ndef _ttest_finish(df, t):\n \"\"\"Common code between all 3 t-test functions.\"\"\"\n # XXX: np.abs -> da.absolute\n # XXX: delayed(distributions.t.sf)\n prob = (\n delayed(distributions.t.sf)(da.absolute(t), df) * 2\n ) # use np.abs to get upper tail\n if t.ndim == 0:\n t = t[()]\n\n return t, prob\n\n\ndef _count(x, axis=None):\n if axis is None:\n return x.size\n else:\n return x.shape[axis]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__sum_of_squares__sum_of_squares.return.da_sum_a_a_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__sum_of_squares__sum_of_squares.return.da_sum_a_a_axis_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 424, "end_line": 443, "span_ids": ["_sum_of_squares"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sum_of_squares(a, axis=0):\n \"\"\"\n Squares each element of the input array, and returns the sum(s) of that.\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int or None, optional\n Axis along which to calculate. Default is 0. If None, compute over\n the whole array `a`.\n Returns\n -------\n sum_of_squares : ndarray\n The sum along the given axis for (a**2).\n See also\n --------\n _square_of_sums : The square(s) of the sum(s) (the opposite of\n `_sum_of_squares`).\n \"\"\"\n return da.sum(a * a, axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__square_of_sums_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py__square_of_sums_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 446, "end_line": 466, "span_ids": ["_square_of_sums"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _square_of_sums(a, axis=0):\n \"\"\"\n Sums elements of the input array, and returns the square(s) of that sum.\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int or None, optional\n Axis along which to calculate. Default is 0. If None, compute over\n the whole array `a`.\n Returns\n -------\n square_of_sums : float or ndarray\n The square of the sum over `axis`.\n See also\n --------\n _sum_of_squares : The sum of squares (the opposite of `square_of_sums`).\n \"\"\"\n s = da.sum(a, axis)\n return s * s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_math_text_style._font_size_1_0rem_font_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_math_text_style._font_size_1_0rem_font_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 40, "span_ids": ["imports", "svg", "impl"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import math\nimport re\n\nimport numpy as np\n\n\ndef svg(chunks, size=200, **kwargs):\n \"\"\"Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n \"\"\"\n shape = tuple(map(sum, chunks))\n if np.isnan(shape).any(): # don't support unknown sizes\n raise NotImplementedError(\n \"Can't generate SVG with unknown chunk sizes.\\n\\n\"\n \" A possible solution is with x.compute_chunk_sizes()\"\n )\n if not all(shape):\n raise NotImplementedError(\"Can't generate SVG with 0-length dimensions\")\n if len(chunks) == 0:\n raise NotImplementedError(\"Can't generate SVG with 0 dimensions\")\n if len(chunks) == 1:\n return svg_1d(chunks, size=size, **kwargs)\n elif len(chunks) == 2:\n return svg_2d(chunks, size=size, **kwargs)\n elif len(chunks) == 3:\n return svg_3d(chunks, size=size, **kwargs)\n else:\n return svg_nd(chunks, size=size, **kwargs)\n\n\ntext_style = 'font-size=\"1.0rem\" font-weight=\"100\" text-anchor=\"middle\"'", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_2d_svg_2d.return.header_n_join_lines_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_2d_svg_2d.return.header_n_join_lines_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 43, "end_line": 72, "span_ids": ["svg_2d"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_2d(chunks, offset=(0, 0), skew=(0, 0), size=200, sizes=None):\n shape = tuple(map(sum, chunks))\n sizes = sizes or draw_sizes(shape, size=size)\n y, x = grid_points(chunks, sizes)\n\n lines, (min_x, max_x, min_y, max_y) = svg_grid(\n x, y, offset=offset, skew=skew, size=size\n )\n\n header = (\n '\\n'\n % (max_x + 50, max_y + 50)\n )\n footer = \"\\n\"\n\n if shape[0] >= 100:\n rotate = -90\n else:\n rotate = 0\n\n text = [\n \"\",\n \" \",\n ' %d'\n % (max_x / 2, max_y + 20, text_style, shape[1]),\n ' %d'\n % (max_x + 20, max_y / 2, text_style, rotate, max_x + 20, max_y / 2, shape[0]),\n ]\n\n return header + \"\\n\".join(lines + text) + footer", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_3d_svg_3d.return.header_n_join_xy_z": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_3d_svg_3d.return.header_n_join_xy_z", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 75, "end_line": 129, "span_ids": ["svg_3d"], "tokens": 581}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_3d(chunks, size=200, sizes=None, offset=(0, 0)):\n shape = tuple(map(sum, chunks))\n sizes = sizes or draw_sizes(shape, size=size)\n x, y, z = grid_points(chunks, sizes)\n ox, oy = offset\n\n xy, (mnx, mxx, mny, mxy) = svg_grid(\n x / 1.7, y, offset=(ox + 10, oy + 0), skew=(1, 0), size=size\n )\n\n zx, (_, _, _, max_x) = svg_grid(\n z, x / 1.7, offset=(ox + 10, oy + 0), skew=(0, 1), size=size\n )\n zy, (min_z, max_z, min_y, max_y) = svg_grid(\n z, y, offset=(ox + max_x + 10, oy + max_x), skew=(0, 0), size=size\n )\n\n header = (\n '\\n'\n % (max_z + 50, max_y + 50)\n )\n footer = \"\\n\"\n\n if shape[1] >= 100:\n rotate = -90\n else:\n rotate = 0\n\n text = [\n \"\",\n \" \",\n ' %d'\n % ((min_z + max_z) / 2, max_y + 20, text_style, shape[2]),\n ' %d'\n % (\n max_z + 20,\n (min_y + max_y) / 2,\n text_style,\n rotate,\n max_z + 20,\n (min_y + max_y) / 2,\n shape[1],\n ),\n ' %d'\n % (\n (mnx + mxx) / 2 - 10,\n mxy - (mxx - mnx) / 2 + 20,\n text_style,\n (mnx + mxx) / 2 - 10,\n mxy - (mxx - mnx) / 2 + 20,\n shape[0],\n ),\n ]\n\n return header + \"\\n\".join(xy + zx + zy + text) + footer", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_nd_svg_nd.return.header_n_n_join_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_nd_svg_nd.return.header_n_n_join_out_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 160, "span_ids": ["svg_nd"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_nd(chunks, size=200):\n if len(chunks) % 3 == 1:\n chunks = ((1,),) + chunks\n shape = tuple(map(sum, chunks))\n sizes = draw_sizes(shape, size=size)\n\n chunks2 = chunks\n sizes2 = sizes\n out = []\n left = 0\n total_height = 0\n while chunks2:\n n = len(chunks2) % 3 or 3\n o = svg(chunks2[:n], sizes=sizes2[:n], offset=(left, 0))\n chunks2 = chunks2[n:]\n sizes2 = sizes2[n:]\n\n lines = o.split(\"\\n\")\n header = lines[0]\n height = float(re.search(r'height=\"(\\d*\\.?\\d*)\"', header).groups()[0])\n total_height = max(total_height, height)\n width = float(re.search(r'width=\"(\\d*\\.?\\d*)\"', header).groups()[0])\n left += width + 10\n o = \"\\n\".join(lines[1:-1]) # remove header and footer\n\n out.append(o)\n\n header = (\n '\\n'\n % (left, total_height)\n )\n footer = \"\\n\"\n return header + \"\\n\\n\".join(out) + footer", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_lines_svg_lines.return.lines": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_lines_svg_lines.return.lines", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 167, "end_line": 190, "span_ids": ["svg_lines"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_lines(x1, y1, x2, y2, max_n=20):\n \"\"\"Convert points into lines of text for an SVG plot\n\n Examples\n --------\n >>> svg_lines([0, 1], [0, 0], [10, 11], [1, 1]) # doctest: +NORMALIZE_WHITESPACE\n [' ',\n ' ']\n \"\"\"\n n = len(x1)\n\n if n > max_n:\n indices = np.linspace(0, n - 1, max_n, dtype=\"int\")\n else:\n indices = range(n)\n\n lines = [\n ' ' % (x1[i], y1[i], x2[i], y2[i])\n for i in indices\n ]\n\n lines[0] = lines[0].replace(\" /\", ' style=\"stroke-width:2\" /')\n lines[-1] = lines[-1].replace(\" /\", ' style=\"stroke-width:2\" /')\n return lines", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_grid_svg_grid.return.h_lines_v_lines_rect_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_grid_svg_grid.return.h_lines_v_lines_rect_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 193, "end_line": 246, "span_ids": ["svg_grid"], "tokens": 554}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_grid(x, y, offset=(0, 0), skew=(0, 0), size=200):\n \"\"\"Create lines of SVG text that show a grid\n\n Parameters\n ----------\n x: numpy.ndarray\n y: numpy.ndarray\n offset: tuple\n translational displacement of the grid in SVG coordinates\n skew: tuple\n \"\"\"\n # Horizontal lines\n x1 = np.zeros_like(y) + offset[0]\n y1 = y + offset[1]\n x2 = np.full_like(y, x[-1]) + offset[0]\n y2 = y + offset[1]\n\n if skew[0]:\n y2 += x.max() * skew[0]\n if skew[1]:\n x1 += skew[1] * y\n x2 += skew[1] * y\n\n min_x = min(x1.min(), x2.min())\n min_y = min(y1.min(), y2.min())\n max_x = max(x1.max(), x2.max())\n max_y = max(y1.max(), y2.max())\n max_n = size // 6\n\n h_lines = [\"\", \" \"] + svg_lines(x1, y1, x2, y2, max_n)\n\n # Vertical lines\n x1 = x + offset[0]\n y1 = np.zeros_like(x) + offset[1]\n x2 = x + offset[0]\n y2 = np.full_like(x, y[-1]) + offset[1]\n\n if skew[0]:\n y1 += skew[0] * x\n y2 += skew[0] * x\n if skew[1]:\n x2 += skew[1] * y.max()\n\n v_lines = [\"\", \" \"] + svg_lines(x1, y1, x2, y2, max_n)\n\n color = \"ECB172\" if len(x) < max_n and len(y) < max_n else \"8B4903\"\n corners = f\"{x1[0]},{y1[0]} {x1[-1]},{y1[-1]} {x2[-1]},{y2[-1]} {x2[0]},{y2[0]}\"\n rect = [\n \"\",\n \" \",\n f' ',\n ]\n\n return h_lines + v_lines + rect, (min_x, max_x, min_y, max_y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_1d_draw_sizes.return.tuple_size_r_for_r_in_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_svg_1d_draw_sizes.return.tuple_size_r_for_r_in_r", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 249, "end_line": 264, "span_ids": ["grid_points", "svg_1d", "draw_sizes"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svg_1d(chunks, sizes=None, **kwargs):\n return svg_2d(((1,),) + chunks, **kwargs)\n\n\ndef grid_points(chunks, sizes):\n cumchunks = [np.cumsum((0,) + c) for c in chunks]\n points = [x * size / x[-1] for x, size in zip(cumchunks, sizes)]\n return points\n\n\ndef draw_sizes(shape, size=200):\n \"\"\"Get size in pixels for all dimensions\"\"\"\n mx = max(shape)\n ratios = [mx / max(0.1, d) for d in shape]\n ratios = [ratio_response(r) for r in ratios]\n return tuple(size / r for r in ratios)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_ratio_response_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/svg.py_ratio_response_", "embedding": null, "metadata": {"file_path": "dask/array/svg.py", "file_name": "svg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 255, "end_line": 270, "span_ids": ["ratio_response"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ratio_response(x):\n \"\"\"How we display actual size ratios\n\n Common ratios in sizes span several orders of magnitude,\n which is hard for us to perceive.\n\n We keep ratios in the 1-3 range accurate, and then apply a logarithm to\n values up until about 100 or so, at which point we stop scaling.\n \"\"\"\n if x < math.e:\n return x\n elif x <= 100:\n return math.log(x + 12.4) # f(e) == e\n else:\n return math.log(100 + 12.4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_sparse_tensordot_test_array_function_sparse_tensordot.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_sparse_tensordot_test_array_function_sparse_tensordot.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 110, "end_line": 122, "span_ids": ["test_array_function_sparse_tensordot"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_function_sparse_tensordot():\n sparse = pytest.importorskip(\"sparse\")\n x = np.random.random((2, 3, 4))\n x[x < 0.9] = 0\n y = np.random.random((4, 3, 2))\n y[y < 0.9] = 0\n\n xx = sparse.COO(x)\n yy = sparse.COO(y)\n\n assert_eq(\n np.tensordot(x, y, axes=(2, 0)), np.tensordot(xx, yy, axes=(2, 0)).todense()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_cupy_svd_test_array_function_cupy_svd.assert_eq_v_v_base_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_function_cupy_svd_test_array_function_cupy_svd.assert_eq_v_v_base_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 125, "end_line": 137, "span_ids": ["test_array_function_cupy_svd"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [(100, 100), (500, 100)])\ndef test_array_function_cupy_svd(chunks):\n cupy = pytest.importorskip(\"cupy\")\n x = cupy.random.random((500, 100))\n\n y = da.from_array(x, chunks=chunks, asarray=False)\n\n u_base, s_base, v_base = da.linalg.svd(y)\n u, s, v = np.linalg.svd(y)\n\n assert_eq(u, u_base)\n assert_eq(s, s_base)\n assert_eq(v, v_base)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_unregistered_func_test_unregistered_func.assert_eq_xx_yy_check_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_unregistered_func_test_unregistered_func.assert_eq_xx_yy_check_m", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 140, "end_line": 177, "span_ids": ["test_unregistered_func"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: np.concatenate([x, x, x]),\n lambda x: np.cov(x, x),\n lambda x: np.dot(x, x),\n lambda x: np.dstack((x, x)),\n lambda x: np.flip(x, axis=0),\n lambda x: np.hstack((x, x)),\n lambda x: np.matmul(x, x),\n lambda x: np.mean(x),\n lambda x: np.stack([x, x]),\n lambda x: np.sum(x),\n lambda x: np.var(x),\n lambda x: np.vstack((x, x)),\n lambda x: np.linalg.norm(x),\n ],\n)\ndef test_unregistered_func(func):\n # Wrap a procol-based encapsulated ndarray\n x = EncapsulateNDArray(np.random.random((100, 100)))\n\n # See if Dask holds the array fine\n y = da.from_array(x, chunks=(50, 50))\n\n # Check if it's an equivalent array\n assert_eq(x, y, check_meta=False, check_type=False)\n\n # Perform two NumPy functions, one on the\n # Encapsulated array\n xx = func(x)\n\n # And one on the Dask array holding these\n # encapsulated arrays\n yy = func(y)\n\n # Check that they are equivalent arrays.\n assert_eq(xx, yy, check_meta=False, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_np_test_meta_from_array.assert_meta_from_array_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_np_test_meta_from_array.assert_meta_from_array_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_utils.py", "file_name": "test_array_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 48, "span_ids": ["imports", "test_meta_from_array"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array.core import Array\nfrom dask.array.utils import assert_eq, meta_from_array\nfrom dask.local import get_sync\n\nasarrays = [np.asarray]\n\ntry:\n import sparse\n\n asarrays.append(sparse.COO.from_numpy)\nexcept ImportError:\n pass\n\ntry:\n import cupy\n\n asarrays.append(cupy.asarray)\nexcept ImportError:\n pass\n\n\n@pytest.mark.parametrize(\"asarray\", asarrays)\ndef test_meta_from_array(asarray):\n x = np.array(1)\n assert meta_from_array(x, ndim=1).shape == (0,)\n\n x = np.ones((1, 2, 3), dtype=\"float32\")\n x = asarray(x)\n\n assert meta_from_array(x).shape == (0, 0, 0)\n assert meta_from_array(x).dtype == \"float32\"\n assert type(meta_from_array(x)) is type(x)\n\n assert meta_from_array(x, ndim=2).shape == (0, 0)\n assert meta_from_array(x, ndim=4).shape == (0, 0, 0, 0)\n assert meta_from_array(x, dtype=\"float64\").dtype == \"float64\"\n assert meta_from_array(x, dtype=float).dtype == \"float64\"\n\n x = da.ones((1,))\n assert isinstance(meta_from_array(x), np.ndarray)\n\n assert meta_from_array(123) == 123\n assert meta_from_array(\"foo\") == \"foo\"\n assert meta_from_array(np.dtype(\"float32\")) == np.dtype(\"float32\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_collections_test_optimize_blockwise.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_collections_test_optimize_blockwise.assert_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 324, "span_ids": ["imports", "test_optimize_blockwise", "test_index_subs"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import collections\nfrom operator import add\n\nimport numpy as np\nimport pytest\n\nimport dask\nimport dask.array as da\nfrom dask.array.utils import assert_eq\nfrom dask.blockwise import (\n _BLOCKWISE_DEFAULT_PREFIX,\n Blockwise,\n _unique_dep,\n index_subs,\n optimize_blockwise,\n rewrite_blockwise,\n)\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.utils_test import dec, inc\n\na, b, c, d, e, f, g = \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"\n_0, _1, _2, _3, _4, _5, _6, _7, _8, _9 = (\n f\"{_BLOCKWISE_DEFAULT_PREFIX}{i}\" for i in range(10)\n)\ni, j, k = \"i\", \"j\", \"k\"\n\n\ndef test_index_subs():\n assert index_subs(tuple(\"ij\"), {\"i\": \"j\", \"j\": \"i\"}) == tuple(\"ji\")\n\n\ndef test_optimize_blockwise():\n x = da.ones(10, chunks=(5,))\n y = (((x + 1) + 2) + 3) + 4\n\n dsk = da.optimization.optimize_blockwise(y.dask)\n\n assert isinstance(dsk, HighLevelGraph)\n\n assert (\n len([layer for layer in dsk.layers.values() if isinstance(layer, Blockwise)])\n == 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_diamond_fusion_test_blockwise_diamond_fusion.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_diamond_fusion_test_blockwise_diamond_fusion.assert_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 236, "end_line": 250, "span_ids": ["test_blockwise_diamond_fusion"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_diamond_fusion():\n x = da.ones(10, chunks=(5,))\n y = ((x + 1) + 2) + 3\n a = y * 2\n b = y * 3\n c = a + b\n d = ((c + 1) + 2) + 3\n\n dsk = da.optimization.optimize_blockwise(d.dask)\n assert isinstance(dsk, HighLevelGraph)\n\n assert (\n len([layer for layer in dsk.layers.values() if isinstance(layer, Blockwise)])\n == 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_non_blockwise_output_test_blockwise_non_blockwise_output.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_non_blockwise_output_test_blockwise_non_blockwise_output.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 253, "end_line": 279, "span_ids": ["test_blockwise_non_blockwise_output"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_non_blockwise_output():\n x = da.ones(10, chunks=(5,))\n y = ((x + 1) + 2) + 3\n w = y.sum()\n z = ((y * 2) * 3) * 4\n\n z_top_before = tuple(z.dask.layers[z.name].indices)\n (zz,) = dask.optimize(z)\n z_top_after = tuple(z.dask.layers[z.name].indices)\n assert z_top_before == z_top_after, \"z_top mutated\"\n\n dsk = optimize_blockwise(z.dask, keys=list(dask.core.flatten(z.__dask_keys__())))\n assert isinstance(dsk, HighLevelGraph)\n assert (\n len([layer for layer in dsk.layers.values() if isinstance(layer, Blockwise)])\n == 1\n )\n\n dsk = optimize_blockwise(\n HighLevelGraph.merge(w.dask, z.dask),\n keys=list(dask.core.flatten([w.__dask_keys__(), z.__dask_keys__()])),\n )\n assert isinstance(dsk, HighLevelGraph)\n assert (\n len([layer for layer in z.dask.layers.values() if isinstance(layer, Blockwise)])\n >= 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_top_len_test_blockwise_names.assert_y_name_startswith_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_top_len_test_blockwise_names.assert_y_name_startswith_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 282, "end_line": 325, "span_ids": ["test_common_token_names_args", "test_inner_compute", "test_top_len", "test_common_token_names_kwargs", "test_blockwise_names"], "tokens": 370}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_top_len():\n x = da.ones(10, chunks=(5,))\n y = x[:, None] * x[None, :]\n\n d = y.dask.layers[y.name]\n assert len(d) == 4\n\n\ndef test_inner_compute():\n x = da.ones(10, chunks=(5,)) + 1 + 2 + 3\n a = x.sum()\n y = x * 2 * 3 * 4\n b = y.sum()\n z = x * 2 * 3\n\n dask.compute(x, a, y, b, z)\n\n\n@pytest.mark.parametrize(\"name\", [\"_\", \"_0\", \"_1\", \".\", \".0\"])\ndef test_common_token_names_args(name):\n x = np.array([\"a\", \"bb\", \"ccc\"], dtype=object)\n d = da.from_array(x, chunks=2)\n\n result = da.blockwise(add, \"i\", d, \"i\", name, None, dtype=object)\n expected = x + name\n\n assert_eq(result, expected)\n\n\n@pytest.mark.parametrize(\"name\", [\"_0\", \"_1\", \".\", \".0\", \"_\"])\ndef test_common_token_names_kwargs(name):\n x = np.array([\"a\", \"bb\", \"ccc\"], dtype=object)\n d = da.from_array(x, chunks=2)\n\n result = da.blockwise(lambda x, y: x + y, \"i\", d, \"i\", y=name, dtype=object)\n expected = x + name\n\n assert_eq(result, expected)\n\n\ndef test_blockwise_names():\n x = da.ones(5, chunks=(2,))\n y = da.blockwise(add, \"i\", x, \"i\", dtype=x.dtype)\n assert y.name.startswith(\"add\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_test_blockwise_new_axes.assert_eq_y_np_ones_4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_test_blockwise_new_axes.assert_eq_y_np_ones_4_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 297, "end_line": 327, "span_ids": ["test_blockwise_new_axes"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_new_axes():\n def f(x):\n return x[:, None] * np.ones((1, 7))\n\n x = da.ones(5, chunks=2)\n y = da.blockwise(\n f, \"aq\", x, \"a\", new_axes={\"q\": 7}, concatenate=True, dtype=x.dtype\n )\n assert y.chunks == ((2, 2, 1), (7,))\n assert_eq(y, np.ones((5, 7)))\n\n def f(x):\n return x[None, :] * np.ones((7, 1))\n\n x = da.ones(5, chunks=2)\n y = da.blockwise(\n f, \"qa\", x, \"a\", new_axes={\"q\": 7}, concatenate=True, dtype=x.dtype\n )\n assert y.chunks == ((7,), (2, 2, 1))\n assert_eq(y, np.ones((7, 5)))\n\n def f(x):\n y = x.sum(axis=1)\n return y[:, None] * np.ones((1, 5))\n\n x = da.ones((4, 6), chunks=(2, 2))\n y = da.blockwise(\n f, \"aq\", x, \"ab\", new_axes={\"q\": 5}, concatenate=True, dtype=x.dtype\n )\n assert y.chunks == ((2, 2), (5,))\n assert_eq(y, np.ones((4, 5)) * 6)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_2_test_blockwise_stacked_new_axes.assert_eq_z_np_ones_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_2_test_blockwise_stacked_new_axes.assert_eq_z_np_ones_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 330, "end_line": 362, "span_ids": ["test_blockwise_stacked_new_axes", "test_blockwise_new_axes_2"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_new_axes_2():\n x = da.ones((2, 2), chunks=(1, 1))\n\n def func(x):\n return np.stack([x, -x], axis=-1)\n\n y = da.blockwise(\n func,\n (\"x\", \"y\", \"sign\"),\n x,\n (\"x\", \"y\"),\n dtype=x.dtype,\n concatenate=True,\n new_axes={\"sign\": 2},\n )\n\n assert_eq(y, y)\n\n\n@pytest.mark.parametrize(\"concatenate\", [True, False])\ndef test_blockwise_stacked_new_axes(concatenate):\n def f(x):\n return x[..., None] * np.ones((1, 7))\n\n x = da.ones(5, chunks=2)\n y = da.blockwise(\n f, \"aq\", x, \"a\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n z = da.blockwise(\n f, \"abq\", y, \"ab\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n assert z.chunks == ((2, 2, 1), (7,), (7,))\n assert_eq(z, np.ones((5, 7, 7)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_front_test_blockwise_stacked_new_axes_front.assert_eq_w_np_ones_7_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_front_test_blockwise_stacked_new_axes_front.assert_eq_w_np_ones_7_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 365, "end_line": 386, "span_ids": ["test_blockwise_stacked_new_axes_front"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"concatenate\", [True, False])\ndef test_blockwise_stacked_new_axes_front(concatenate):\n def f(x):\n if isinstance(x, list):\n x = np.concatenate(x)\n return x[None, ...] * np.ones(7)[(slice(None),) + (None,) * x.ndim]\n\n x = da.ones(5, chunks=2)\n y = da.blockwise(\n f, \"qa\", x, \"a\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n z = da.blockwise(\n f, \"qab\", y, \"ab\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n assert z.chunks == ((7,), (7,), (2, 2, 1))\n assert_eq(z, np.ones((7, 7, 5)))\n\n w = da.blockwise(\n lambda x: x[:, 0, 0], \"a\", z, \"abc\", dtype=x.dtype, concatenate=True\n )\n assert w.chunks == ((7,),)\n assert_eq(w, np.ones((7,)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_same_dim_test_blockwise_stacked_new_axes_same_dim.assert_eq_c_np_ones_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_stacked_new_axes_same_dim_test_blockwise_stacked_new_axes_same_dim.assert_eq_c_np_ones_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 389, "end_line": 404, "span_ids": ["test_blockwise_stacked_new_axes_same_dim"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"concatenate\", [True, False])\ndef test_blockwise_stacked_new_axes_same_dim(concatenate):\n def f(x):\n return x[..., None] * np.ones((1, 7))\n\n x = da.ones(5, chunks=2)\n y = da.zeros(5, chunks=2)\n a = da.blockwise(\n f, \"aq\", x, \"a\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n b = da.blockwise(\n f, \"aq\", y, \"a\", new_axes={\"q\": 7}, concatenate=concatenate, dtype=x.dtype\n )\n c = a + b\n assert c.chunks == ((2, 2, 1), (7,))\n assert_eq(c, np.ones((5, 7)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_chunked_test_blockwise_new_axes_chunked.assert_eq_y_np_array_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_new_axes_chunked_test_blockwise_new_axes_chunked.assert_eq_y_np_array_0", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 407, "end_line": 414, "span_ids": ["test_blockwise_new_axes_chunked"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_new_axes_chunked():\n def f(x):\n return x[None, :] * 2\n\n x = da.arange(0, 6, 1, chunks=2, dtype=np.int32)\n y = da.blockwise(f, \"qa\", x, \"a\", new_axes={\"q\": (1, 1)}, dtype=x.dtype)\n assert y.chunks == ((1, 1), (2, 2, 2))\n assert_eq(y, np.array([[0, 2, 4, 6, 8, 10], [0, 2, 4, 6, 8, 10]], np.int32))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_no_args_test_blockwise_kwargs.assert_eq_y_np_ones_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_no_args_test_blockwise_kwargs.assert_eq_y_np_ones_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 417, "end_line": 443, "span_ids": ["test_blockwise_no_args", "test_blockwise_no_array_args", "test_blockwise_kwargs"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_no_args():\n def f():\n return np.ones((2, 3), np.float32)\n\n x = da.blockwise(f, \"ab\", new_axes={\"a\": 2, \"b\": (3, 3)}, dtype=np.float32)\n assert x.chunks == ((2,), (3, 3))\n assert_eq(x, np.ones((2, 6), np.float32))\n\n\ndef test_blockwise_no_array_args():\n def f(dtype):\n return np.ones((2, 3), dtype)\n\n x = da.blockwise(\n f, \"ab\", np.float32, None, new_axes={\"a\": 2, \"b\": (3, 3)}, dtype=np.float32\n )\n assert x.chunks == ((2,), (3, 3))\n assert_eq(x, np.ones((2, 6), np.float32))\n\n\ndef test_blockwise_kwargs():\n def f(a, b=0):\n return a + b\n\n x = da.ones(5, chunks=(2,))\n y = da.blockwise(f, \"i\", x, \"i\", b=10, dtype=x.dtype)\n assert_eq(y, np.ones(5) + 10)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_chunks_test_blockwise_chunks.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_chunks_test_blockwise_chunks.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 446, "end_line": 487, "span_ids": ["test_blockwise_chunks"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_chunks():\n x = da.ones((5, 5), chunks=((2, 1, 2), (3, 2)))\n\n def double(a, axis=0):\n return np.concatenate([a, a], axis=axis)\n\n y = da.blockwise(\n double,\n \"ij\",\n x,\n \"ij\",\n adjust_chunks={\"i\": lambda n: 2 * n},\n axis=0,\n dtype=x.dtype,\n )\n assert y.chunks == ((4, 2, 4), (3, 2))\n assert_eq(y, np.ones((10, 5)))\n\n y = da.blockwise(\n double,\n \"ij\",\n x,\n \"ij\",\n adjust_chunks={\"j\": lambda n: 2 * n},\n axis=1,\n dtype=x.dtype,\n )\n assert y.chunks == ((2, 1, 2), (6, 4))\n assert_eq(y, np.ones((5, 10)))\n\n x = da.ones((10, 10), chunks=(5, 5))\n y = da.blockwise(\n double, \"ij\", x, \"ij\", axis=0, adjust_chunks={\"i\": 10}, dtype=x.dtype\n )\n assert y.chunks == ((10, 10), (5, 5))\n assert_eq(y, np.ones((20, 10)))\n\n y = da.blockwise(\n double, \"ij\", x, \"ij\", axis=0, adjust_chunks={\"i\": (10, 10)}, dtype=x.dtype\n )\n assert y.chunks == ((10, 10), (5, 5))\n assert_eq(y, np.ones((20, 10)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_bag_array_conversion_test_svd.assert_eq_z_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_bag_array_conversion_test_svd.assert_eq_z_z_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 656, "end_line": 671, "span_ids": ["test_bag_array_conversion", "test_svd"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bag_array_conversion():\n import dask.bag as db\n\n b = db.range(10, npartitions=1)\n (x,) = b.map_partitions(np.asarray).to_delayed()\n (x,) = (da.from_delayed(a, shape=(10,), dtype=int) for a in [x])\n z = da.concatenate([x])\n assert_eq(z, np.arange(10), check_graph=False)\n\n\ndef test_svd():\n x = da.ones((1, 1), chunks=(1, 1))\n y = x * 2\n u, s, v = da.linalg.svd(y)\n z = y + u\n assert_eq(z, z)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_args_delayed_test_args_delayed.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_args_delayed_test_args_delayed.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 528, "end_line": 536, "span_ids": ["test_args_delayed"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_args_delayed():\n x = da.arange(10, chunks=(5,))\n y = dask.delayed(lambda: 100)()\n\n z = da.blockwise(add, \"i\", x, \"i\", y, None, dtype=x.dtype)\n assert_eq(z, np.arange(10) + 100)\n\n z = da.blockwise(lambda x, y: x + y, \"i\", x, \"i\", y=y, dtype=x.dtype)\n assert_eq(z, np.arange(10) + 100)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_namedtuple_test_namedtuple.assert_eq_A_B_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_namedtuple_test_namedtuple.assert_eq_A_B_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 685, "end_line": 696, "span_ids": ["test_namedtuple"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"tup\", [(1, 2), collections.namedtuple(\"foo\", [\"a\", \"b\"])(1, 2)] # type: ignore\n)\ndef test_namedtuple(tup):\n A = da.random.random((20, 20), chunks=(10, 10))\n\n def f(data, x):\n return data\n\n B = da.blockwise(f, (\"d1\", \"d2\"), A, (\"d1\", \"d2\"), x=tup, dtype=A.dtype)\n\n assert_eq(A, B)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_validate_top_inputs_test_validate_top_inputs.assert_i_in_str_info_va": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_validate_top_inputs_test_validate_top_inputs.assert_i_in_str_info_va", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 553, "end_line": 567, "span_ids": ["test_validate_top_inputs"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_validate_top_inputs():\n A = da.random.random((20, 20), chunks=(10, 10))\n\n with pytest.raises(ValueError) as info:\n da.blockwise(inc, \"jk\", A, \"ij\", dtype=A.dtype)\n\n assert \"unknown dimension\" in str(info.value).lower()\n assert \"k\" in str(info.value)\n assert \"j\" not in str(info.value)\n\n with pytest.raises(ValueError) as info:\n da.blockwise(inc, \"ii\", A, \"ij\", dtype=A.dtype)\n\n assert \"repeated\" in str(info.value).lower()\n assert \"i\" in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_dont_merge_before_reductions_test_dont_merge_before_reductions.z_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_dont_merge_before_reductions_test_dont_merge_before_reductions.z_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 601, "end_line": 611, "span_ids": ["test_dont_merge_before_reductions"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_merge_before_reductions():\n x = da.ones(10, chunks=(5,))\n y = da.blockwise(inc, \"i\", x, \"i\", dtype=x.dtype)\n z = da.blockwise(sum, \"\", y, \"i\", dtype=y.dtype)\n w = da.blockwise(sum, \"\", z, \"\", dtype=y.dtype)\n\n dsk = optimize_blockwise(w.dask)\n\n assert len([d for d in dsk.layers.values() if isinstance(d, Blockwise)]) == 2\n\n z.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_atop_legacy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_atop_legacy_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 729, "end_line": 747, "span_ids": ["test_non_hlg", "test_atop_legacy"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_atop_legacy():\n x = da.ones(10, chunks=(5,))\n with pytest.warns(\n UserWarning, match=\"The da.atop function has moved to da.blockwise\"\n ):\n y = da.atop(inc, \"i\", x, \"i\", dtype=x.dtype)\n z = da.blockwise(inc, \"i\", x, \"i\", dtype=x.dtype)\n assert_eq(y, z)\n assert y.name == z.name\n\n\ndef test_non_hlg():\n # Regression test for https://github.com/dask/dask/issues/5850\n a = da.from_array(np.ones(1, np.float64), chunks=(1,))\n a.dask = dict(a.dask) # Convert from HighLevelGraph to plain dict\n b = da.from_array(np.zeros(1, np.float64), chunks=(1,))\n x = a + b\n assert_eq(x, a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_pytest_test_keepdims_wrapper_no_axis.assert_rwf_276": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_pytest_test_keepdims_wrapper_no_axis.assert_rwf_276", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_chunk.py", "file_name": "test_chunk.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["imports", "test_keepdims_wrapper_no_axis"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport operator\n\nimport numpy as np\n\nimport dask.array as da\nfrom dask.array.chunk import coarsen, getitem, keepdims_wrapper\n\n\ndef test_keepdims_wrapper_no_axis():\n def summer(a, axis=None):\n return a.sum(axis=axis)\n\n summer_wrapped = keepdims_wrapper(summer)\n\n assert summer_wrapped != summer\n\n a = np.arange(24).reshape(1, 2, 3, 4)\n\n r = summer(a)\n rw = summer_wrapped(a, keepdims=True)\n rwf = summer_wrapped(a, keepdims=False)\n\n assert r.ndim == 0\n assert r.shape == tuple()\n assert r == 276\n\n assert rw.ndim == 4\n assert rw.shape == (1, 1, 1, 1)\n assert (rw == 276).all()\n\n assert rwf.ndim == 0\n assert rwf.shape == tuple()\n assert rwf == 276", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_one_axis_test_keepdims_wrapper_one_axis.assert_rwf_np_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_one_axis_test_keepdims_wrapper_one_axis.assert_rwf_np_array_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_chunk.py", "file_name": "test_chunk.py", "file_type": "text/x-python", "category": "test", "start_line": 38, "end_line": 62, "span_ids": ["test_keepdims_wrapper_one_axis"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_keepdims_wrapper_one_axis():\n def summer(a, axis=None):\n return a.sum(axis=axis)\n\n summer_wrapped = keepdims_wrapper(summer)\n\n assert summer_wrapped != summer\n\n a = np.arange(24).reshape(1, 2, 3, 4)\n\n r = summer(a, axis=2)\n rw = summer_wrapped(a, axis=2, keepdims=True)\n rwf = summer_wrapped(a, axis=2, keepdims=False)\n\n assert r.ndim == 3\n assert r.shape == (1, 2, 4)\n assert (r == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()\n\n assert rw.ndim == 4\n assert rw.shape == (1, 2, 1, 4)\n assert (rw == np.array([[[[12, 15, 18, 21]], [[48, 51, 54, 57]]]])).all()\n\n assert rwf.ndim == 3\n assert rwf.shape == (1, 2, 4)\n assert (rwf == np.array([[[12, 15, 18, 21], [48, 51, 54, 57]]])).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_two_axes_test_keepdims_wrapper_two_axes.assert_rwf_np_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_keepdims_wrapper_two_axes_test_keepdims_wrapper_two_axes.assert_rwf_np_array_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_chunk.py", "file_name": "test_chunk.py", "file_type": "text/x-python", "category": "test", "start_line": 65, "end_line": 89, "span_ids": ["test_keepdims_wrapper_two_axes"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_keepdims_wrapper_two_axes():\n def summer(a, axis=None):\n return a.sum(axis=axis)\n\n summer_wrapped = keepdims_wrapper(summer)\n\n assert summer_wrapped != summer\n\n a = np.arange(24).reshape(1, 2, 3, 4)\n\n r = summer(a, axis=(1, 3))\n rw = summer_wrapped(a, axis=(1, 3), keepdims=True)\n rwf = summer_wrapped(a, axis=(1, 3), keepdims=False)\n\n assert r.ndim == 2\n assert r.shape == (1, 3)\n assert (r == np.array([[60, 92, 124]])).all()\n\n assert rw.ndim == 4\n assert rw.shape == (1, 1, 3, 1)\n assert (rw == np.array([[[[60], [92], [124]]]])).all()\n\n assert rwf.ndim == 2\n assert rwf.shape == (1, 3)\n assert (rwf == np.array([[60, 92, 124]])).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_coarsen_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_chunk.py_test_coarsen_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_chunk.py", "file_name": "test_chunk.py", "file_type": "text/x-python", "category": "test", "start_line": 94, "end_line": 125, "span_ids": ["test_getitem", "test_integer_input", "test_coarsen"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_coarsen():\n x = np.random.randint(10, size=(24, 24))\n y = coarsen(np.sum, x, {0: 2, 1: 4})\n assert y.shape == (12, 6)\n assert y[0, 0] == np.sum(x[:2, :4])\n\n\n\"\"\"\ndef test_coarsen_on_uneven_shape():\n x = np.random.randint(10, size=(23, 24))\n y = coarsen(np.sum, x, {0: 2, 1: 4})\n assert y.shape == (12, 6)\n assert y[0, 0] == np.sum(x[:2, :4])\n assert eq(y[11, :], x[23, :])\n\"\"\"\n\n\ndef test_integer_input():\n assert da.zeros((4, 6), chunks=2).rechunk(3).chunks == ((3, 1), (3, 3))\n\n\ndef test_getitem():\n x = np.random.rand(1_000_000)\n y = getitem(x, slice(120, 122))\n\n assert y.flags.owndata\n assert not getitem(x, slice(1, None)).flags.owndata\n\n y_op = operator.getitem(x, slice(120, 122))\n assert not y_op.flags.owndata\n assert not operator.getitem(x, slice(1, None)).flags.owndata", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_pytest_test_arr_like.if_order_F_.else_.assert_not_np_isfortran_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_pytest_test_arr_like.if_order_F_.else_.assert_not_np_isfortran_d", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 72, "span_ids": ["imports", "test_arr_like"], "tokens": 539}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\nimport pytest\nfrom tlz import concat\n\nimport dask\nimport dask.array as da\nfrom dask.array.core import normalize_chunks\nfrom dask.array.utils import assert_eq, same_keys\n\n\n@pytest.mark.parametrize(\n \"funcname\",\n [\n \"empty_like\",\n \"empty\",\n \"ones_like\",\n \"ones\",\n \"zeros_like\",\n \"zeros\",\n \"full_like\",\n \"full\",\n ],\n)\n@pytest.mark.parametrize(\"cast_shape\", [tuple, list, np.asarray])\n@pytest.mark.parametrize(\"cast_chunks\", [tuple, list, np.asarray])\n@pytest.mark.parametrize(\"shape, chunks\", [((10, 10), (4, 4))])\n@pytest.mark.parametrize(\"name\", [None, \"my-name\"])\n@pytest.mark.parametrize(\"order\", [\"C\", \"F\"])\n@pytest.mark.parametrize(\"dtype\", [\"i4\"])\ndef test_arr_like(funcname, shape, cast_shape, dtype, cast_chunks, chunks, name, order):\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n shape = cast_shape(shape)\n chunks = cast_chunks(chunks)\n\n if \"full\" in funcname:\n old_np_func = np_func\n old_da_func = da_func\n\n np_func = lambda *a, **k: old_np_func(*a, fill_value=5, **k)\n da_func = lambda *a, **k: old_da_func(*a, fill_value=5, **k)\n\n dtype = np.dtype(dtype)\n\n if \"like\" in funcname:\n a = np.random.randint(0, 10, shape).astype(dtype)\n\n np_r = np_func(a, order=order)\n da_r = da_func(a, order=order, chunks=chunks, name=name)\n else:\n np_r = np_func(shape, order=order, dtype=dtype)\n da_r = da_func(shape, order=order, dtype=dtype, chunks=chunks, name=name)\n\n assert np_r.shape == da_r.shape\n assert np_r.dtype == da_r.dtype\n\n if \"empty\" not in funcname:\n assert (np_r == np.asarray(da_r)).all()\n\n if name is None:\n assert funcname.split(\"_\")[0] in da_r.name\n else:\n assert da_r.name == name\n\n if \"order\" == \"F\":\n assert np.isfortran(da_r.compute())\n else:\n assert not np.isfortran(da_r.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arr_like_shape_test_arr_like_shape.if_empty_not_in_funcnam.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arr_like_shape_test_arr_like_shape.if_empty_not_in_funcnam.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 109, "span_ids": ["test_arr_like_shape"], "tokens": 396}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"funcname, kwargs\",\n [\n (\"empty_like\", {}),\n (\"ones_like\", {}),\n (\"zeros_like\", {}),\n (\"full_like\", {\"fill_value\": 5}),\n ],\n)\n@pytest.mark.parametrize(\n \"shape, chunks, out_shape\",\n [\n ((10, 10), (4, 4), None),\n ((10, 10), (4, 4), (20, 3)),\n ((10, 10), (4), (20)),\n ((10, 10, 10), (4, 2), (5, 5)),\n ((2, 3, 5, 7), None, (3, 5, 7)),\n ((2, 3, 5, 7), (2, 5, 3), (3, 5, 7)),\n ((2, 3, 5, 7), (2, 5, 3, \"auto\", 3), (11,) + (2, 3, 5, 7)),\n ((2, 3, 5, 7), \"auto\", (3, 5, 7)),\n ],\n)\n@pytest.mark.parametrize(\"dtype\", [\"i4\"])\ndef test_arr_like_shape(funcname, kwargs, shape, dtype, chunks, out_shape):\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n a = np.random.randint(0, 10, shape).astype(dtype)\n np_r = np_func(a, shape=out_shape, **kwargs)\n da_r = da_func(a, chunks=chunks, shape=out_shape, **kwargs)\n\n assert np_r.shape == da_r.shape\n assert np_r.dtype == da_r.dtype\n\n if \"empty\" not in funcname:\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_test_arange.assert_da_arange_10_chun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_test_arange.assert_da_arange_10_chun", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 146, "end_line": 191, "span_ids": ["test_arange"], "tokens": 471}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_arange():\n darr = da.arange(77, chunks=13)\n nparr = np.arange(77)\n assert_eq(darr, nparr)\n\n darr = da.arange(2, 13, chunks=5)\n nparr = np.arange(2, 13)\n assert_eq(darr, nparr)\n\n darr = da.arange(4, 21, 9, chunks=13)\n nparr = np.arange(4, 21, 9)\n assert_eq(darr, nparr)\n\n # negative steps\n darr = da.arange(53, 5, -3, chunks=5)\n nparr = np.arange(53, 5, -3)\n assert_eq(darr, nparr)\n\n darr = da.arange(77, chunks=13, dtype=float)\n nparr = np.arange(77, dtype=float)\n assert_eq(darr, nparr)\n\n darr = da.arange(2, 13, chunks=5, dtype=int)\n nparr = np.arange(2, 13, dtype=int)\n assert_eq(darr, nparr)\n assert sorted(da.arange(2, 13, chunks=5).dask) == sorted(\n da.arange(2, 13, chunks=5).dask\n )\n assert sorted(da.arange(77, chunks=13, dtype=float).dask) == sorted(\n da.arange(77, chunks=13, dtype=float).dask\n )\n\n # 0 size output\n darr = da.arange(0, 1, -0.5, chunks=20)\n nparr = np.arange(0, 1, -0.5)\n assert_eq(darr, nparr)\n\n darr = da.arange(0, -1, 0.5, chunks=20)\n nparr = np.arange(0, -1, 0.5)\n assert_eq(darr, nparr)\n\n # Unexpected or missing kwargs\n with pytest.raises(TypeError, match=\"whatsthis\"):\n da.arange(10, chunks=-1, whatsthis=1)\n\n assert da.arange(10).chunks == ((10,),)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_dtypes_test_arange_dtypes.assert_eq_a_np_a_da_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_dtypes_test_arange_dtypes.assert_eq_a_np_a_da_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 218, "span_ids": ["test_arange_dtypes"], "tokens": 403}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"start,stop,step,dtype\",\n [\n (0, 1, 1, None), # int64\n (1.5, 2, 1, None), # float64\n (1, 2.5, 1, None), # float64\n (1, 2, 0.5, None), # float64\n (np.float32(1), np.float32(2), np.float32(1), None), # promoted to float64\n (np.int32(1), np.int32(2), np.int32(1), None), # promoted to int64\n (np.uint32(1), np.uint32(2), np.uint32(1), None), # promoted to int64\n (np.uint64(1), np.uint64(2), np.uint64(1), None), # promoted to float64\n (np.uint32(1), np.uint32(2), np.uint32(1), np.uint32),\n (np.uint64(1), np.uint64(2), np.uint64(1), np.uint64),\n # numpy.arange gives unexpected results\n # https://github.com/numpy/numpy/issues/11505\n # (1j, 2, 1, None),\n # (1, 2j, 1, None),\n # (1, 2, 1j, None),\n # (1+2j, 2+3j, 1+.1j, None),\n ],\n)\ndef test_arange_dtypes(start, stop, step, dtype):\n a_np = np.arange(start, stop, step, dtype=dtype)\n a_da = da.arange(start, stop, step, dtype=dtype, chunks=-1)\n assert_eq(a_np, a_da)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_cast_float_int_step_test_arange_cast_float_int_step.assert_eq_darr_nparr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_cast_float_int_step_test_arange_cast_float_int_step.assert_eq_darr_nparr_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 228, "span_ids": ["test_arange_cast_float_int_step"], "tokens": 106}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n reason=\"Casting floats to ints is not supported since edge\"\n \"behavior is not specified or guaranteed by NumPy.\"\n)\ndef test_arange_cast_float_int_step():\n darr = da.arange(3.3, -9.1, -0.25, chunks=3, dtype=\"i8\")\n nparr = np.arange(3.3, -9.1, -0.25, dtype=\"i8\")\n assert_eq(darr, nparr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_float_step_test_arange_float_step.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_arange_float_step_test_arange_float_step.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 231, "end_line": 246, "span_ids": ["test_arange_float_step"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_arange_float_step():\n darr = da.arange(2.0, 13.0, 0.3, chunks=4)\n nparr = np.arange(2.0, 13.0, 0.3)\n assert_eq(darr, nparr)\n\n darr = da.arange(7.7, 1.5, -0.8, chunks=3)\n nparr = np.arange(7.7, 1.5, -0.8)\n assert_eq(darr, nparr)\n\n darr = da.arange(0, 1, 0.01, chunks=20)\n nparr = np.arange(0, 1, 0.01)\n assert_eq(darr, nparr)\n\n darr = da.arange(0, 1, 0.03, chunks=20)\n nparr = np.arange(0, 1, 0.03)\n assert_eq(darr, nparr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indices_wrong_chunks_test_indices_dimensions_chunks.with_dask_config_set_ar.assert_expected_actual": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indices_wrong_chunks_test_indices_dimensions_chunks.with_dask_config_set_ar.assert_expected_actual", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 265, "span_ids": ["test_indices_wrong_chunks", "test_indices_dimensions_chunks"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_indices_wrong_chunks():\n with pytest.raises(ValueError):\n da.indices((1,), chunks=tuple())\n\n\ndef test_indices_dimensions_chunks():\n chunks = ((1, 4, 2, 3), (5, 5))\n darr = da.indices((10, 10), chunks=chunks)\n assert darr.chunks == ((1, 1),) + chunks\n\n with dask.config.set({\"array.chunk-size\": \"50 MiB\"}):\n shape = (10000, 10000)\n expected = normalize_chunks(\"auto\", shape=shape, dtype=int)\n result = da.indices(shape, chunks=\"auto\")\n # indices prepends a dimension\n actual = result.chunks[1:]\n assert expected == actual", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_empty_indicies_test_empty_indicies.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_empty_indicies_test_empty_indicies.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 268, "end_line": 291, "span_ids": ["test_empty_indicies"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_indicies():\n darr = da.indices(tuple(), chunks=tuple())\n nparr = np.indices(tuple())\n assert darr.shape == nparr.shape\n assert darr.dtype == nparr.dtype\n assert_eq(darr, nparr)\n\n darr = da.indices(tuple(), float, chunks=tuple())\n nparr = np.indices(tuple(), float)\n assert darr.shape == nparr.shape\n assert darr.dtype == nparr.dtype\n assert_eq(darr, nparr)\n\n darr = da.indices((0,), float, chunks=(1,))\n nparr = np.indices((0,), float)\n assert darr.shape == nparr.shape\n assert darr.dtype == nparr.dtype\n assert_eq(darr, nparr)\n\n darr = da.indices((0, 1, 2), float, chunks=(1, 1, 2))\n nparr = np.indices((0, 1, 2), float)\n assert darr.shape == nparr.shape\n assert darr.dtype == nparr.dtype\n assert_eq(darr, nparr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indicies_test_indicies.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_indicies_test_indicies.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 294, "end_line": 309, "span_ids": ["test_indicies"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_indicies():\n darr = da.indices((1,), chunks=(1,))\n nparr = np.indices((1,))\n assert_eq(darr, nparr)\n\n darr = da.indices((1,), float, chunks=(1,))\n nparr = np.indices((1,), float)\n assert_eq(darr, nparr)\n\n darr = da.indices((2, 1), chunks=(2, 1))\n nparr = np.indices((2, 1))\n assert_eq(darr, nparr)\n\n darr = da.indices((2, 3), chunks=(1, 2))\n nparr = np.indices((2, 3))\n assert_eq(darr, nparr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_test_meshgrid.for_e_r_a_e_r_d_i_in_zi.if_sparse_.else_.assert_e_r_d_chunks_xi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_test_meshgrid.for_e_r_a_e_r_d_i_in_zi.if_sparse_.else_.assert_e_r_d_chunks_xi", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 312, "end_line": 352, "span_ids": ["test_meshgrid"], "tokens": 441}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shapes, chunks\",\n [\n ([()], [()]),\n ([(0,)], [(0,)]),\n ([(2,), (3,)], [(1,), (2,)]),\n ([(2,), (3,), (4,)], [(1,), (2,), (3,)]),\n ([(2,), (3,), (4,), (5,)], [(1,), (2,), (3,), (4,)]),\n ([(2, 3), (4,)], [(1, 2), (3,)]),\n ],\n)\n@pytest.mark.parametrize(\"indexing\", [\"ij\", \"xy\"])\n@pytest.mark.parametrize(\"sparse\", [False, True])\ndef test_meshgrid(shapes, chunks, indexing, sparse):\n xi_a = []\n xi_d = []\n xi_dc = []\n for each_shape, each_chunk in zip(shapes, chunks):\n xi_a.append(np.random.random(each_shape))\n xi_d_e = da.from_array(xi_a[-1], chunks=each_chunk)\n xi_d.append(xi_d_e)\n xi_d_ef = xi_d_e.flatten()\n xi_dc.append(xi_d_ef.chunks[0])\n do = list(range(len(xi_dc)))\n if indexing == \"xy\" and len(xi_dc) > 1:\n do[0], do[1] = do[1], do[0]\n xi_dc[0], xi_dc[1] = xi_dc[1], xi_dc[0]\n xi_dc = tuple(xi_dc)\n\n r_a = np.meshgrid(*xi_a, indexing=indexing, sparse=sparse)\n r_d = da.meshgrid(*xi_d, indexing=indexing, sparse=sparse)\n\n assert isinstance(r_d, list)\n assert len(r_a) == len(r_d)\n\n for e_r_a, e_r_d, i in zip(r_a, r_d, do):\n assert_eq(e_r_a, e_r_d)\n if sparse:\n assert e_r_d.chunks[i] == xi_dc[i]\n else:\n assert e_r_d.chunks == xi_dc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_inputcoercion_test_meshgrid_inputcoercion.assert_eq_z_z_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_meshgrid_inputcoercion_test_meshgrid_inputcoercion.assert_eq_z_z_d_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 355, "end_line": 365, "span_ids": ["test_meshgrid_inputcoercion"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meshgrid_inputcoercion():\n a = [1, 2, 3]\n b = np.array([4, 5, 6, 7])\n x, y = np.meshgrid(a, b, indexing=\"ij\")\n z = x * y\n\n x_d, y_d = da.meshgrid(a, b, indexing=\"ij\")\n z_d = x_d * y_d\n\n assert z_d.shape == (len(a), len(b))\n assert_eq(z, z_d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_eye_test_eye.with_dask_config_set_ar.assert_4_x_npartitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_eye_test_eye.with_dask_config_set_ar.assert_4_x_npartitions_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 410, "end_line": 432, "span_ids": ["test_eye"], "tokens": 466}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_eye():\n assert_eq(da.eye(9, chunks=3), np.eye(9))\n assert_eq(da.eye(9), np.eye(9))\n assert_eq(da.eye(10, chunks=3), np.eye(10))\n assert_eq(da.eye(9, chunks=3, M=11), np.eye(9, M=11))\n assert_eq(da.eye(11, chunks=3, M=9), np.eye(11, M=9))\n assert_eq(da.eye(7, chunks=3, M=11), np.eye(7, M=11))\n assert_eq(da.eye(11, chunks=3, M=7), np.eye(11, M=7))\n assert_eq(da.eye(9, chunks=3, k=2), np.eye(9, k=2))\n assert_eq(da.eye(9, chunks=3, k=-2), np.eye(9, k=-2))\n assert_eq(da.eye(7, chunks=3, M=11, k=5), np.eye(7, M=11, k=5))\n assert_eq(da.eye(11, chunks=3, M=7, k=-6), np.eye(11, M=7, k=-6))\n assert_eq(da.eye(6, chunks=3, M=9, k=7), np.eye(6, M=9, k=7))\n assert_eq(da.eye(12, chunks=3, M=6, k=-3), np.eye(12, M=6, k=-3))\n\n assert_eq(da.eye(9, chunks=3, dtype=int), np.eye(9, dtype=int))\n assert_eq(da.eye(10, chunks=3, dtype=int), np.eye(10, dtype=int))\n assert_eq(da.eye(10, chunks=-1, dtype=int), np.eye(10, dtype=int))\n assert_eq(da.eye(9, chunks=3, dtype=None), np.eye(9, dtype=None))\n\n with dask.config.set({\"array.chunk-size\": \"50 MiB\"}):\n x = da.eye(10000, \"auto\")\n assert 4 < x.npartitions < 32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal_test_diagonal.None_14": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal_test_diagonal.None_14", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 438, "end_line": 502, "span_ids": ["test_diagonal"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diagonal():\n v = np.arange(11)\n with pytest.raises(ValueError):\n da.diagonal(v)\n\n v = np.arange(4).reshape((2, 2))\n with pytest.raises(ValueError):\n da.diagonal(v, axis1=0, axis2=0)\n\n with pytest.raises(np.AxisError):\n da.diagonal(v, axis1=-4)\n\n with pytest.raises(np.AxisError):\n da.diagonal(v, axis2=-4)\n\n v = np.arange(4 * 5 * 6).reshape((4, 5, 6))\n v = da.from_array(v, chunks=2)\n assert_eq(da.diagonal(v), np.diagonal(v))\n # Empty diagonal.\n assert_eq(da.diagonal(v, offset=10), np.diagonal(v, offset=10))\n assert_eq(da.diagonal(v, offset=-10), np.diagonal(v, offset=-10))\n\n with pytest.raises(ValueError):\n da.diagonal(v, axis1=-2)\n\n # Negative axis.\n assert_eq(da.diagonal(v, axis1=-1), np.diagonal(v, axis1=-1))\n assert_eq(da.diagonal(v, offset=1, axis1=-1), np.diagonal(v, offset=1, axis1=-1))\n\n # Heterogeneous chunks.\n v = np.arange(2 * 3 * 4 * 5 * 6).reshape((2, 3, 4, 5, 6))\n v = da.from_array(v, chunks=(1, (1, 2), (1, 2, 1), (2, 1, 2), (5, 1)))\n\n assert_eq(da.diagonal(v), np.diagonal(v))\n assert_eq(\n da.diagonal(v, offset=2, axis1=3, axis2=1),\n np.diagonal(v, offset=2, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-2, axis1=3, axis2=1),\n np.diagonal(v, offset=-2, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-2, axis1=3, axis2=4),\n np.diagonal(v, offset=-2, axis1=3, axis2=4),\n )\n\n assert_eq(da.diagonal(v, 1), np.diagonal(v, 1))\n assert_eq(da.diagonal(v, -1), np.diagonal(v, -1))\n # Positional arguments\n assert_eq(da.diagonal(v, 1, 2, 1), np.diagonal(v, 1, 2, 1))\n\n v = np.arange(2 * 3 * 4 * 5 * 6).reshape((2, 3, 4, 5, 6))\n assert_eq(da.diagonal(v, axis1=1, axis2=3), np.diagonal(v, axis1=1, axis2=3))\n assert_eq(\n da.diagonal(v, offset=1, axis1=1, axis2=3),\n np.diagonal(v, offset=1, axis1=1, axis2=3),\n )\n\n assert_eq(\n da.diagonal(v, offset=1, axis1=3, axis2=1),\n np.diagonal(v, offset=1, axis1=3, axis2=1),\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal.None_15_test_diagonal.None_22": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diagonal.None_15_test_diagonal.None_22", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 534, "end_line": 568, "span_ids": ["test_diagonal"], "tokens": 397}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diagonal():\n # ... other code\n\n assert_eq(\n da.diagonal(v, offset=-5, axis1=3, axis2=1),\n np.diagonal(v, offset=-5, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-6, axis1=3, axis2=1),\n np.diagonal(v, offset=-6, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-6, axis1=-3, axis2=1),\n np.diagonal(v, offset=-6, axis1=-3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-6, axis1=-3, axis2=1),\n np.diagonal(v, offset=-6, axis1=-3, axis2=1),\n )\n\n v = da.from_array(v, chunks=2)\n assert_eq(\n da.diagonal(v, offset=1, axis1=3, axis2=1),\n np.diagonal(v, offset=1, axis1=3, axis2=1),\n )\n assert_eq(\n da.diagonal(v, offset=-1, axis1=3, axis2=1),\n np.diagonal(v, offset=-1, axis1=3, axis2=1),\n )\n\n v = np.arange(384).reshape((8, 8, 6))\n assert_eq(da.diagonal(v, offset=-1, axis1=2), np.diagonal(v, offset=-1, axis1=2))\n\n v = da.from_array(v, chunks=(4, 4, 2))\n assert_eq(da.diagonal(v, offset=-1, axis1=2), np.diagonal(v, offset=-1, axis1=2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_fromfunction_test_fromfunction.assert_same_keys_d_d2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_fromfunction_test_fromfunction.assert_same_keys_d_d2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 571, "end_line": 588, "span_ids": ["test_fromfunction"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [None, \"f8\", \"i8\"])\n@pytest.mark.parametrize(\n \"func, kwargs\",\n [\n (lambda x, y: x + y, {}),\n (lambda x, y, c=1: x + c * y, {}),\n (lambda x, y, c=1: x + c * y, {\"c\": 3}),\n ],\n)\ndef test_fromfunction(func, dtype, kwargs):\n a = np.fromfunction(func, shape=(5, 5), dtype=dtype, **kwargs)\n d = da.fromfunction(func, shape=(5, 5), chunks=(2, 2), dtype=dtype, **kwargs)\n\n assert_eq(d, a)\n\n d2 = da.fromfunction(func, shape=(5, 5), chunks=(2, 2), dtype=dtype, **kwargs)\n\n assert same_keys(d, d2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_repeat_test_repeat.for_r_in_1_2_3_4_.assert_all_concat_d_repea": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_repeat_test_repeat.for_r_in_1_2_3_4_.assert_all_concat_d_repea", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 591, "end_line": 623, "span_ids": ["test_repeat"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repeat():\n x = np.random.random((10, 11, 13))\n d = da.from_array(x, chunks=(4, 5, 3))\n\n repeats = [0, 1, 2, 5]\n axes = [-3, -2, -1, 0, 1, 2]\n\n for r in repeats:\n for a in axes:\n assert_eq(x.repeat(r, axis=a), d.repeat(r, axis=a))\n\n assert_eq(d.repeat(2, 0), da.repeat(d, 2, 0))\n\n with pytest.raises(NotImplementedError):\n da.repeat(d, np.arange(10))\n\n with pytest.raises(NotImplementedError):\n da.repeat(d, 2, None)\n\n with pytest.raises(NotImplementedError):\n da.repeat(d, 2)\n\n for invalid_axis in [3, -4]:\n with pytest.raises(ValueError):\n da.repeat(d, 2, axis=invalid_axis)\n\n x = np.arange(5)\n d = da.arange(5, chunks=(2,))\n\n assert_eq(x.repeat(3), d.repeat(3))\n\n for r in [1, 2, 3, 4]:\n assert all(concat(d.repeat(r).chunks))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_basic_test_tile_basic.assert_eq_np_tile_b_reps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_basic_test_tile_basic.assert_eq_np_tile_b_reps", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 626, "end_line": 632, "span_ids": ["test_tile_basic"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"reps\", [2, (2, 2), (1, 2), (2, 1), (2, 3, 4, 0)])\ndef test_tile_basic(reps):\n a = da.asarray([0, 1, 2])\n b = [[1, 2], [3, 4]]\n\n assert_eq(np.tile(a.compute(), reps), da.tile(a, reps))\n assert_eq(np.tile(b, reps), da.tile(b, reps))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_chunks_test_tile_neg_reps.with_pytest_raises_ValueE.da_tile_d_reps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_chunks_test_tile_neg_reps.with_pytest_raises_ValueE.da_tile_d_reps_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 635, "end_line": 651, "span_ids": ["test_tile_neg_reps", "test_tile_chunks"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape, chunks\", [((10,), (1,)), ((10, 11, 13), (4, 5, 3))])\n@pytest.mark.parametrize(\"reps\", [0, 1, 2, 3, 5, (1,), (1, 2)])\ndef test_tile_chunks(shape, chunks, reps):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=chunks)\n\n assert_eq(np.tile(x, reps), da.tile(d, reps))\n\n\n@pytest.mark.parametrize(\"shape, chunks\", [((10,), (1,)), ((10, 11, 13), (4, 5, 3))])\n@pytest.mark.parametrize(\"reps\", [-1, -5])\ndef test_tile_neg_reps(shape, chunks, reps):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=chunks)\n\n with pytest.raises(ValueError):\n da.tile(d, reps)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_zero_reps_test_tile_zero_reps.assert_eq_np_tile_x_reps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_zero_reps_test_tile_zero_reps.assert_eq_np_tile_x_reps", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 654, "end_line": 660, "span_ids": ["test_tile_zero_reps"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape, chunks\", [((10,), (1,)), ((10, 11, 13), (4, 5, 3))])\n@pytest.mark.parametrize(\"reps\", [0, (0,), (2, 0), (0, 3, 0, 4)])\ndef test_tile_zero_reps(shape, chunks, reps):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=chunks)\n\n assert_eq(np.tile(x, reps), da.tile(d, reps))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_empty_array_test_tile_empty_array.assert_eq_np_tile_x_reps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_empty_array_test_tile_empty_array.assert_eq_np_tile_x_reps", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 663, "end_line": 669, "span_ids": ["test_tile_empty_array"], "tokens": 104}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape, chunks\", [((1, 1, 0), (1, 1, 0)), ((2, 0), (1, 0))])\n@pytest.mark.parametrize(\"reps\", [2, (3, 2, 5)])\ndef test_tile_empty_array(shape, chunks, reps):\n x = np.empty(shape)\n d = da.from_array(x, chunks=chunks)\n\n assert_eq(np.tile(x, reps), da.tile(d, reps))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_0_width_test_pad_0_width.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_0_width_test_pad_0_width.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 653, "end_line": 674, "span_ids": ["test_pad_0_width"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, pad_width, mode, kwargs\",\n [\n ((10, 11), (4, 5), 0, \"constant\", {\"constant_values\": 2}),\n ((10, 11), (4, 5), 0, \"edge\", {}),\n ((10, 11), (4, 5), 0, \"linear_ramp\", {\"end_values\": 2}),\n ((10, 11), (4, 5), 0, \"reflect\", {}),\n ((10, 11), (4, 5), 0, \"symmetric\", {}),\n ((10, 11), (4, 5), 0, \"wrap\", {}),\n ((10, 11), (4, 5), 0, \"empty\", {}),\n ],\n)\ndef test_pad_0_width(shape, chunks, pad_width, mode, kwargs):\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=chunks)\n\n np_r = np.pad(np_a, pad_width, mode, **kwargs)\n da_r = da.pad(da_a, pad_width, mode, **kwargs)\n\n assert da_r is da_a\n\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_test_pad.if_mode_empty_.else_.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_test_pad.if_mode_empty_.else_.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 677, "end_line": 720, "span_ids": ["test_pad"], "tokens": 570}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, pad_width, mode, kwargs\",\n [\n ((10,), (3,), 1, \"constant\", {}),\n ((10,), (3,), 2, \"constant\", {\"constant_values\": -1}),\n ((10,), (3,), ((2, 3)), \"constant\", {\"constant_values\": (-1, -2)}),\n (\n (10, 11),\n (4, 5),\n ((1, 4), (2, 3)),\n \"constant\",\n {\"constant_values\": ((-1, -2), (2, 1))},\n ),\n ((10,), (3,), 3, \"edge\", {}),\n ((10,), (3,), 3, \"linear_ramp\", {}),\n ((10,), (3,), 3, \"linear_ramp\", {\"end_values\": 0}),\n (\n (10, 11),\n (4, 5),\n ((1, 4), (2, 3)),\n \"linear_ramp\",\n {\"end_values\": ((-1, -2), (4, 3))},\n ),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"reflect\", {}),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"symmetric\", {}),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"wrap\", {}),\n ((10,), (3,), ((2, 3)), \"maximum\", {\"stat_length\": (1, 2)}),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"mean\", {\"stat_length\": ((3, 4), (2, 1))}),\n ((10,), (3,), ((2, 3)), \"minimum\", {\"stat_length\": (2, 3)}),\n ((10,), (3,), 1, \"empty\", {}),\n ],\n)\ndef test_pad(shape, chunks, pad_width, mode, kwargs):\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=chunks)\n\n np_r = np.pad(np_a, pad_width, mode, **kwargs)\n da_r = da.pad(da_a, pad_width, mode, **kwargs)\n\n if mode == \"empty\":\n # empty pads lead to undefined values which may be different\n assert_eq(np_r[pad_width:-pad_width], da_r[pad_width:-pad_width])\n else:\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_3d_data_test_pad_3d_data.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_3d_data_test_pad_3d_data.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 723, "end_line": 773, "span_ids": ["test_pad_3d_data"], "tokens": 395}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [np.uint8, np.int16, np.float32, bool])\n@pytest.mark.parametrize(\n \"pad_widths\", [2, (2,), (2, 3), ((2, 3),), ((3, 1), (0, 0), (2, 0))]\n)\n@pytest.mark.parametrize(\n \"mode\",\n [\n \"constant\",\n \"edge\",\n \"linear_ramp\",\n \"maximum\",\n \"mean\",\n \"minimum\",\n pytest.param(\n \"reflect\",\n marks=pytest.mark.skip(\n reason=\"Bug when pad_width is larger than dimension: https://github.com/dask/dask/issues/5303\"\n ),\n ),\n pytest.param(\n \"symmetric\",\n marks=pytest.mark.skip(\n reason=\"Bug when pad_width is larger than dimension: https://github.com/dask/dask/issues/5303\"\n ),\n ),\n pytest.param(\n \"wrap\",\n marks=pytest.mark.skip(\n reason=\"Bug when pad_width is larger than dimension: https://github.com/dask/dask/issues/5303\"\n ),\n ),\n pytest.param(\n \"median\",\n marks=pytest.mark.skip(reason=\"Not implemented\"),\n ),\n pytest.param(\n \"empty\",\n marks=pytest.mark.skip(\n reason=\"Empty leads to undefined values, which may be different\"\n ),\n ),\n ],\n)\ndef test_pad_3d_data(dtype, pad_widths, mode):\n np_a = np.arange(2 * 3 * 4).reshape(2, 3, 4).astype(dtype)\n da_a = da.from_array(np_a, chunks=\"auto\")\n\n np_r = np.pad(np_a, pad_widths, mode=mode)\n da_r = da.pad(da_a, pad_widths, mode=mode)\n\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_operator_dispatch_property.return.wrapped": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_operator_dispatch_property.return.wrapped", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 33, "span_ids": ["imports", "wrap", "dispatch_property"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\n\nimport numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array import Array\nfrom dask.array.chunk_types import is_valid_array_chunk, is_valid_chunk_type\nfrom dask.array.utils import assert_eq\n\n\ndef wrap(func_name):\n \"\"\"\n Wrap a function.\n \"\"\"\n\n def wrapped(self, *a, **kw):\n a = getattr(self.arr, func_name)(*a, **kw)\n return a if not isinstance(a, np.ndarray) else type(self)(a)\n\n return wrapped\n\n\ndef dispatch_property(prop_name):\n \"\"\"\n Wrap a simple property.\n \"\"\"\n\n @property\n def wrapped(self, *a, **kw):\n return getattr(self.arr, prop_name)\n\n return wrapped", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray_EncapsulateNDArray.__array__.return.np_asarray_self_arr_arg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray_EncapsulateNDArray.__array__.return.np_asarray_self_arr_arg", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 51, "span_ids": ["EncapsulateNDArray.__array__", "EncapsulateNDArray.__init__", "EncapsulateNDArray"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class EncapsulateNDArray(np.lib.mixins.NDArrayOperatorsMixin):\n \"\"\"\n A class that \"mocks\" ndarray by encapsulating an ndarray and using\n protocols to \"look like\" an ndarray. Basically tests whether Dask\n works fine with something that is essentially an array but uses\n protocols instead of being an actual array. Must be manually\n registered as a valid chunk type to be considered a downcast type\n of Dask array in the type casting hierarchy.\n \"\"\"\n\n __array_priority__ = 20\n\n def __init__(self, arr):\n self.arr = arr\n\n def __array__(self, *args, **kwargs):\n return np.asarray(self.arr, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_function___EncapsulateNDArray.__setitem__.wrap___setitem___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_function___EncapsulateNDArray.__setitem__.wrap___setitem___", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 53, "end_line": 68, "span_ids": ["EncapsulateNDArray.__array_function__", "EncapsulateNDArray:5"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class EncapsulateNDArray(np.lib.mixins.NDArrayOperatorsMixin):\n\n def __array_function__(self, f, t, arrs, kw):\n if not all(\n issubclass(ti, (type(self), np.ndarray) + np.ScalarType) for ti in t\n ):\n return NotImplemented\n arrs = tuple(\n arr if not isinstance(arr, type(self)) else arr.arr for arr in arrs\n )\n t = tuple(ti for ti in t if not issubclass(ti, type(self)))\n print(t)\n a = self.arr.__array_function__(f, t, arrs, kw)\n return a if not isinstance(a, np.ndarray) else type(self)(a)\n\n __getitem__ = wrap(\"__getitem__\")\n\n __setitem__ = wrap(\"__setitem__\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_ufunc___da_register_chunk_type_En": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_EncapsulateNDArray.__array_ufunc___da_register_chunk_type_En", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 71, "end_line": 91, "span_ids": ["EncapsulateNDArray.__array_ufunc__", "EncapsulateNDArray:9", "impl"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class EncapsulateNDArray(np.lib.mixins.NDArrayOperatorsMixin):\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n if not all(\n isinstance(i, (type(self), np.ndarray) + np.ScalarType) for i in inputs\n ):\n return NotImplemented\n inputs = tuple(i if not isinstance(i, type(self)) else i.arr for i in inputs)\n a = getattr(ufunc, method)(*inputs, **kwargs)\n return a if not isinstance(a, np.ndarray) else type(self)(a)\n\n shape = dispatch_property(\"shape\")\n ndim = dispatch_property(\"ndim\")\n dtype = dispatch_property(\"dtype\")\n\n astype = wrap(\"astype\")\n sum = wrap(\"sum\")\n prod = wrap(\"prod\")\n reshape = wrap(\"reshape\")\n squeeze = wrap(\"squeeze\")\n\n\nda.register_chunk_type(EncapsulateNDArray)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_WrappedArray_WrappedArray.__setitem__.self_arr_key_value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_WrappedArray_WrappedArray.__setitem__.self_arr_key_value", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 92, "end_line": 137, "span_ids": ["WrappedArray.__array_function__", "WrappedArray.__array_ufunc__", "WrappedArray", "WrappedArray._downcast_args", "WrappedArray.__array__", "WrappedArray.__dask_graph__", "WrappedArray.__init__", "WrappedArray.__getitem__", "WrappedArray:3", "WrappedArray.__setitem__"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class WrappedArray(np.lib.mixins.NDArrayOperatorsMixin):\n \"\"\"\n Another mock duck array class (like EncapsulateNDArray), but\n designed to be above Dask in the type casting hierarchy (that is,\n WrappedArray wraps Dask Array) and be even more minimal in API.\n Tests that Dask defers properly to upcast types.\n \"\"\"\n\n def __init__(self, arr, **attrs):\n self.arr = arr\n self.attrs = attrs\n\n def __array__(self, *args, **kwargs):\n return np.asarray(self.arr, *args, **kwargs)\n\n def _downcast_args(self, args):\n for arg in args:\n if isinstance(arg, type(self)):\n yield arg.arr\n elif isinstance(arg, (tuple, list)):\n yield type(arg)(self._downcast_args(arg))\n else:\n yield arg\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n inputs = tuple(self._downcast_args(inputs))\n return type(self)(getattr(ufunc, method)(*inputs, **kwargs), **self.attrs)\n\n def __array_function__(self, func, types, args, kwargs):\n args = tuple(self._downcast_args(args))\n return type(self)(func(*args, **kwargs), **self.attrs)\n\n def __dask_graph__(self):\n # Note: make sure that dask dusk arrays do not interfere with the\n # dispatch mechanism. The return value here, doesn't matter.\n return ...\n\n shape = dispatch_property(\"shape\")\n ndim = dispatch_property(\"ndim\")\n dtype = dispatch_property(\"dtype\")\n\n def __getitem__(self, key):\n return type(self)(self.arr[key], **self.attrs)\n\n def __setitem__(self, key, value):\n self.arr[key] = value", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_binary_operation_type_precedence_test_binary_operation_type_precedence.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_binary_operation_type_precedence_test_binary_operation_type_precedence.assert_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 141, "end_line": 184, "span_ids": ["test_binary_operation_type_precedence"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"op\",\n [\n operator.add,\n operator.eq,\n operator.gt,\n operator.ge,\n operator.lt,\n operator.le,\n operator.mod,\n operator.mul,\n operator.ne,\n operator.pow,\n operator.sub,\n operator.truediv,\n operator.floordiv,\n np.add,\n np.subtract,\n ],\n)\n@pytest.mark.parametrize(\n \"arr_upcast, arr_downcast\",\n [\n (\n WrappedArray(np.random.random((10, 10))),\n da.random.random((10, 10), chunks=(5, 5)),\n ),\n (\n da.random.random((10, 10), chunks=(5, 5)),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n (\n WrappedArray(np.random.random((10, 10))),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n ],\n)\ndef test_binary_operation_type_precedence(op, arr_upcast, arr_downcast):\n \"\"\"Test proper dispatch on binary operators and NumPy ufuncs\"\"\"\n assert (\n type(op(arr_upcast, arr_downcast))\n == type(op(arr_downcast, arr_upcast))\n == type(arr_upcast)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_array_chunk_test_is_valid_array_chunk.assert_is_valid_array_chu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_array_chunk_test_is_valid_array_chunk.assert_is_valid_array_chu", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 187, "end_line": 206, "span_ids": ["test_is_valid_array_chunk"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"arr, result\",\n [\n (WrappedArray(np.arange(4)), False),\n (da.from_array(np.arange(4)), False),\n (EncapsulateNDArray(np.arange(4)), True),\n (np.ma.masked_array(np.arange(4), [True, False, True, False]), True),\n (np.arange(4), True),\n (None, True),\n # float/int/str scalars are not valid array chunks,\n # but ops on float/int/str etc scalars do get handled\n # by Dask\n (0.0, False),\n (0, False),\n (\"\", False),\n ],\n)\ndef test_is_valid_array_chunk(arr, result):\n \"\"\"Test is_valid_array_chunk for correctness\"\"\"\n assert is_valid_array_chunk(arr) is result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_from_itertools_import_com_test_fft.assert_eq_da_fft_darr_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_from_itertools_import_com_test_fft.assert_eq_da_fft_darr_n", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 46, "span_ids": ["test_cant_fft_chunked_axis", "imports", "test_fft"], "tokens": 344}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from itertools import combinations_with_replacement\n\nimport numpy as np\nimport pytest\n\nimport dask.array as da\nimport dask.array.fft\nfrom dask.array.core import normalize_chunks\nfrom dask.array.fft import fft_wrap\nfrom dask.array.utils import assert_eq, same_keys\n\nall_1d_funcnames = [\"fft\", \"ifft\", \"rfft\", \"irfft\", \"hfft\", \"ihfft\"]\n\nall_nd_funcnames = [\n \"fft2\",\n \"ifft2\",\n \"fftn\",\n \"ifftn\",\n \"rfft2\",\n \"irfft2\",\n \"rfftn\",\n \"irfftn\",\n]\n\nnparr = np.arange(100).reshape(10, 10)\ndarr = da.from_array(nparr, chunks=(1, 10))\ndarr2 = da.from_array(nparr, chunks=(10, 1))\ndarr3 = da.from_array(nparr, chunks=(10, 10))\n\n\n@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\ndef test_cant_fft_chunked_axis(funcname):\n da_fft = getattr(da.fft, funcname)\n\n bad_darr = da.from_array(nparr, chunks=(5, 5))\n for i in range(bad_darr.ndim):\n with pytest.raises(ValueError):\n da_fft(bad_darr, axis=i)\n\n\n@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\ndef test_fft(funcname):\n da_fft = getattr(da.fft, funcname)\n np_fft = getattr(np.fft, funcname)\n\n assert_eq(da_fft(darr), np_fft(nparr))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft2n_shapes_test_fft2n_shapes.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft2n_shapes_test_fft2n_shapes.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 61, "span_ids": ["test_fft2n_shapes"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", all_nd_funcnames)\ndef test_fft2n_shapes(funcname):\n da_fft = getattr(dask.array.fft, funcname)\n np_fft = getattr(np.fft, funcname)\n assert_eq(da_fft(darr3), np_fft(nparr))\n assert_eq(da_fft(darr3, (8, 9)), np_fft(nparr, (8, 9)))\n assert_eq(da_fft(darr3, (8, 9), axes=(1, 0)), np_fft(nparr, (8, 9), axes=(1, 0)))\n assert_eq(\n da_fft(darr3, (12, 11), axes=(1, 0)), np_fft(nparr, (12, 11), axes=(1, 0))\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_n_kwarg_test_fft_n_kwarg.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_n_kwarg_test_fft_n_kwarg.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 74, "span_ids": ["test_fft_n_kwarg"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\ndef test_fft_n_kwarg(funcname):\n da_fft = getattr(da.fft, funcname)\n np_fft = getattr(np.fft, funcname)\n\n assert_eq(da_fft(darr, 5), np_fft(nparr, 5))\n assert_eq(da_fft(darr, 13), np_fft(nparr, 13))\n assert_eq(da_fft(darr2, axis=0), np_fft(nparr, axis=0))\n assert_eq(da_fft(darr2, 5, axis=0), np_fft(nparr, 5, axis=0))\n assert_eq(da_fft(darr2, 13, axis=0), np_fft(nparr, 13, axis=0))\n assert_eq(da_fft(darr2, 12, axis=0), np_fft(nparr, 12, axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_consistent_names_test_wrap_bad_kind.with_pytest_raises_ValueE.fft_wrap_np_ones_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fft_consistent_names_test_wrap_bad_kind.with_pytest_raises_ValueE.fft_wrap_np_ones_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 88, "span_ids": ["test_fft_consistent_names", "test_wrap_bad_kind"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\ndef test_fft_consistent_names(funcname):\n da_fft = getattr(da.fft, funcname)\n\n assert same_keys(da_fft(darr, 5), da_fft(darr, 5))\n assert same_keys(da_fft(darr2, 5, axis=0), da_fft(darr2, 5, axis=0))\n assert not same_keys(da_fft(darr, 5), da_fft(darr, 13))\n\n\ndef test_wrap_bad_kind():\n with pytest.raises(ValueError):\n fft_wrap(np.ones)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_nd_ffts_axes_test_nd_ffts_axes.for_num_axes_in_range_1_.for_axes_in_combinations_.if_len_set_axes_len_a.else_.assert_eq_r_er_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_nd_ffts_axes_test_nd_ffts_axes.for_num_axes_in_range_1_.for_axes_in_combinations_.if_len_set_axes_len_a.else_.assert_eq_r_er_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 91, "end_line": 116, "span_ids": ["test_nd_ffts_axes"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", all_nd_funcnames)\n@pytest.mark.parametrize(\"dtype\", [\"float32\", \"float64\"])\ndef test_nd_ffts_axes(funcname, dtype):\n np_fft = getattr(np.fft, funcname)\n da_fft = getattr(da.fft, funcname)\n\n shape = (7, 8, 9)\n chunk_size = (3, 3, 3)\n a = np.arange(np.prod(shape), dtype=dtype).reshape(shape)\n d = da.from_array(a, chunks=chunk_size)\n\n for num_axes in range(1, d.ndim):\n for axes in combinations_with_replacement(range(d.ndim), num_axes):\n cs = list(chunk_size)\n for i in axes:\n cs[i] = shape[i]\n d2 = d.rechunk(cs)\n if len(set(axes)) < len(axes):\n with pytest.raises(ValueError):\n da_fft(d2, axes=axes)\n else:\n r = da_fft(d2, axes=axes)\n er = np_fft(a, axes=axes)\n assert r.dtype == er.dtype\n assert r.shape == er.shape\n assert_eq(r, er)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_ffts_test_wrap_ffts.if_modname_scipy_fftp.else_.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_ffts_test_wrap_ffts.if_modname_scipy_fftp.else_.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 148, "span_ids": ["test_wrap_ffts"], "tokens": 407}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"modname\", [\"numpy.fft\", \"scipy.fftpack\"])\n@pytest.mark.parametrize(\"funcname\", all_1d_funcnames)\n@pytest.mark.parametrize(\"dtype\", [\"float32\", \"float64\"])\ndef test_wrap_ffts(modname, funcname, dtype):\n fft_mod = pytest.importorskip(modname)\n try:\n func = getattr(fft_mod, funcname)\n except AttributeError:\n pytest.skip(f\"`{modname}` missing function `{funcname}`.\")\n\n darrc = darr.astype(dtype)\n darr2c = darr2.astype(dtype)\n nparrc = nparr.astype(dtype)\n\n if modname == \"scipy.fftpack\" and \"rfft\" in funcname:\n with pytest.raises(ValueError):\n fft_wrap(func)\n else:\n wfunc = fft_wrap(func)\n assert wfunc(darrc).dtype == func(nparrc).dtype\n assert wfunc(darrc).shape == func(nparrc).shape\n assert_eq(wfunc(darrc), func(nparrc))\n assert_eq(wfunc(darrc, axis=1), func(nparrc, axis=1))\n assert_eq(wfunc(darr2c, axis=0), func(nparrc, axis=0))\n assert_eq(wfunc(darrc, n=len(darrc) - 1), func(nparrc, n=len(darrc) - 1))\n assert_eq(\n wfunc(darrc, axis=1, n=darrc.shape[1] - 1),\n func(nparrc, n=darrc.shape[1] - 1),\n )\n assert_eq(\n wfunc(darr2c, axis=0, n=darr2c.shape[0] - 1),\n func(nparrc, axis=0, n=darr2c.shape[0] - 1),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_fftns_test_wrap_fftns.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_wrap_fftns_test_wrap_fftns.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 151, "end_line": 174, "span_ids": ["test_wrap_fftns"], "tokens": 341}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"modname\", [\"numpy.fft\", \"scipy.fftpack\"])\n@pytest.mark.parametrize(\"funcname\", all_nd_funcnames)\n@pytest.mark.parametrize(\"dtype\", [\"float32\", \"float64\"])\ndef test_wrap_fftns(modname, funcname, dtype):\n fft_mod = pytest.importorskip(modname)\n try:\n func = getattr(fft_mod, funcname)\n except AttributeError:\n pytest.skip(f\"`{modname}` missing function `{funcname}`.\")\n\n darrc = darr.astype(dtype).rechunk(darr.shape)\n darr2c = darr2.astype(dtype).rechunk(darr2.shape)\n nparrc = nparr.astype(dtype)\n\n wfunc = fft_wrap(func)\n assert wfunc(darrc).dtype == func(nparrc).dtype\n assert wfunc(darrc).shape == func(nparrc).shape\n assert_eq(wfunc(darrc), func(nparrc))\n assert_eq(wfunc(darrc, axes=(1, 0)), func(nparrc, axes=(1, 0)))\n assert_eq(wfunc(darr2c, axes=(0, 1)), func(nparrc, axes=(0, 1)))\n assert_eq(\n wfunc(darr2c, (darr2c.shape[0] - 1, darr2c.shape[1] - 1), (0, 1)),\n func(nparrc, (nparrc.shape[0] - 1, nparrc.shape[1] - 1), (0, 1)),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftfreq_test_fftfreq.assert_eq_r1_r2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftfreq_test_fftfreq.assert_eq_r1_r2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 180, "end_line": 191, "span_ids": ["test_fftfreq"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [1, 2, 3, 6, 7])\n@pytest.mark.parametrize(\"d\", [1.0, 0.5, 2 * np.pi])\n@pytest.mark.parametrize(\"c\", [lambda m: m, lambda m: (1, m - 1)])\ndef test_fftfreq(n, d, c):\n c = c(n)\n\n r1 = np.fft.fftfreq(n, d)\n r2 = da.fft.fftfreq(n, d, chunks=c)\n\n assert normalize_chunks(c, r2.shape) == r2.chunks\n\n assert_eq(r1, r2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_rfftfreq_test_rfftfreq.assert_eq_r1_r2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_rfftfreq_test_rfftfreq.assert_eq_r1_r2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 205, "span_ids": ["test_rfftfreq"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [1, 2, 3, 6, 7])\n@pytest.mark.parametrize(\"d\", [1.0, 0.5, 2 * np.pi])\n@pytest.mark.parametrize(\"c\", [lambda m: (m // 2 + 1,), lambda m: (1, m // 2)])\ndef test_rfftfreq(n, d, c):\n c = [ci for ci in c(n) if ci != 0]\n\n r1 = np.fft.rfftfreq(n, d)\n r2 = da.fft.rfftfreq(n, d, chunks=c)\n\n assert normalize_chunks(c, r2.shape) == r2.chunks\n\n assert_eq(r1, r2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_test_fftshift.assert_eq_d_r_a_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_test_fftshift.assert_eq_d_r_a_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 208, "end_line": 231, "span_ids": ["test_fftshift"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", [\"fftshift\", \"ifftshift\"])\n@pytest.mark.parametrize(\"axes\", [None, 0, 1, 2, (0, 1), (1, 2), (0, 2), (0, 1, 2)])\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [[(5, 6, 7), (2, 3, 4)], [(5, 6, 7), (2, 6, 4)], [(5, 6, 7), (5, 6, 7)]],\n)\ndef test_fftshift(funcname, shape, chunks, axes):\n np_func = getattr(np.fft, funcname)\n da_func = getattr(da.fft, funcname)\n\n a = np.arange(np.prod(shape)).reshape(shape)\n d = da.from_array(a, chunks=chunks)\n\n a_r = np_func(a, axes)\n d_r = da_func(d, axes)\n\n for each_d_chunks, each_d_r_chunks in zip(d.chunks, d_r.chunks):\n if len(each_d_chunks) == 1:\n assert len(each_d_r_chunks) == 1\n assert each_d_r_chunks == each_d_chunks\n else:\n assert len(each_d_r_chunks) != 1\n\n assert_eq(d_r, a_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_identity_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_fft.py_test_fftshift_identity_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_fft.py", "file_name": "test_fft.py", "file_type": "text/x-python", "category": "test", "start_line": 234, "end_line": 259, "span_ids": ["test_fftshift_identity"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"funcname1, funcname2\", [(\"fftshift\", \"ifftshift\"), (\"ifftshift\", \"fftshift\")]\n)\n@pytest.mark.parametrize(\"axes\", [None, 0, 1, 2, (0, 1), (1, 2), (0, 2), (0, 1, 2)])\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [[(5, 6, 7), (2, 3, 4)], [(5, 6, 7), (2, 6, 4)], [(5, 6, 7), (5, 6, 7)]],\n)\ndef test_fftshift_identity(funcname1, funcname2, shape, chunks, axes):\n da_func1 = getattr(da.fft, funcname1)\n da_func2 = getattr(da.fft, funcname2)\n\n a = np.arange(np.prod(shape)).reshape(shape)\n d = da.from_array(a, chunks=chunks)\n\n d_r = da_func1(da_func2(d, axes), axes)\n\n for each_d_chunks, each_d_r_chunks in zip(d.chunks, d_r.chunks):\n if len(each_d_chunks) == 1:\n assert len(each_d_r_chunks) == 1\n assert each_d_r_chunks == each_d_chunks\n else:\n assert len(each_d_r_chunks) != 1\n\n assert_eq(d_r, d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_input_validation_01_test_apply_gufunc_axes_input_validation_01.None_2.apply_gufunc_foo_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_input_validation_01_test_apply_gufunc_axes_input_validation_01.None_2.apply_gufunc_foo_i_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 58, "span_ids": ["test_apply_gufunc_axes_input_validation_01"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axes_input_validation_01():\n def foo(x):\n return np.mean(x, axis=-1)\n\n a = da.random.normal(size=(20, 30), chunks=30)\n\n with pytest.raises(ValueError):\n apply_gufunc(foo, \"(i)->()\", a, axes=0)\n\n apply_gufunc(foo, \"(i)->()\", a, axes=[0])\n apply_gufunc(foo, \"(i)->()\", a, axes=[(0,)])\n apply_gufunc(foo, \"(i)->()\", a, axes=[0, tuple()])\n apply_gufunc(foo, \"(i)->()\", a, axes=[(0,), tuple()])\n\n with pytest.raises(ValueError):\n apply_gufunc(foo, \"(i)->()\", a, axes=[(0, 1)])\n\n with pytest.raises(ValueError):\n apply_gufunc(foo, \"(i)->()\", a, axes=[0, 0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_02_test__validate_normalize_axes_02.None_2._validate_normalize_axes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_02_test__validate_normalize_axes_02.None_2._validate_normalize_axes_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 96, "span_ids": ["test__validate_normalize_axes_02"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test__validate_normalize_axes_02():\n i, o = _validate_normalize_axes(None, 0, False, [(\"i\",), (\"i\",)], ())\n assert i == [(0,), (0,)]\n assert o == [()]\n\n i, o = _validate_normalize_axes(None, 0, False, [(\"i\",)], (\"i\",))\n assert i == [(0,)]\n assert o == [(0,)]\n\n i, o = _validate_normalize_axes(None, 0, True, [(\"i\",), (\"i\",)], ())\n assert i == [(0,), (0,)]\n assert o == [(0,)]\n\n with pytest.raises(ValueError):\n _validate_normalize_axes(None, (0,), False, [(\"i\",), (\"i\",)], ())\n\n with pytest.raises(ValueError):\n _validate_normalize_axes(None, 0, False, [(\"i\",), (\"j\",)], ())\n\n with pytest.raises(ValueError):\n _validate_normalize_axes(None, 0, False, [(\"i\",), (\"j\",)], (\"j\",))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_03_test__validate_normalize_axes_03.None_2._validate_normalize_axes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test__validate_normalize_axes_03_test__validate_normalize_axes_03.None_2._validate_normalize_axes_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 99, "end_line": 111, "span_ids": ["test__validate_normalize_axes_03"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test__validate_normalize_axes_03():\n i, o = _validate_normalize_axes(None, 0, True, [(\"i\",)], ())\n assert i == [(0,)]\n assert o == [(0,)]\n\n with pytest.raises(ValueError):\n _validate_normalize_axes(None, 0, True, [(\"i\",)], (\"i\",))\n\n with pytest.raises(ValueError):\n _validate_normalize_axes([(0, 1), (0, 1)], None, True, [(\"i\", \"j\")], (\"i\", \"j\"))\n\n with pytest.raises(ValueError):\n _validate_normalize_axes([(0,), (0,)], None, True, [(\"i\",), (\"j\",)], ())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01_test_apply_gufunc_01.assert_std_compute_shap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01_test_apply_gufunc_01.assert_std_compute_shap", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 114, "end_line": 123, "span_ids": ["test_apply_gufunc_01"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_01():\n def stats(x):\n return np.mean(x, axis=-1), np.std(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n result = apply_gufunc(stats, \"(i)->(),()\", a, output_dtypes=2 * (a.dtype,))\n mean, std = result\n assert isinstance(result, tuple)\n assert mean.compute().shape == (10, 20)\n assert std.compute().shape == (10, 20)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01b_test_apply_gufunc_01b.assert_std_compute_shap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_01b_test_apply_gufunc_01b.assert_std_compute_shap", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 126, "end_line": 135, "span_ids": ["test_apply_gufunc_01b"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_01b():\n def stats(x):\n return np.mean(x, axis=-1), np.std(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=5)\n mean, std = apply_gufunc(\n stats, \"(i)->(),()\", a, output_dtypes=2 * (a.dtype,), allow_rechunk=True\n )\n assert mean.compute().shape == (10, 20)\n assert std.compute().shape == (10, 20)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_test_apply_gufunc_output_dtypes_string.assert_mean_compute_sha": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_test_apply_gufunc_output_dtypes_string.assert_mean_compute_sha", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 145, "span_ids": ["test_apply_gufunc_output_dtypes_string"], "tokens": 106}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"vectorize\", [False, True])\ndef test_apply_gufunc_output_dtypes_string(vectorize):\n def stats(x):\n return np.mean(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n mean = apply_gufunc(stats, \"(i)->()\", a, output_dtypes=\"f\", vectorize=vectorize)\n assert mean.compute().shape == (10, 20)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_pass_additional_kwargs_test_apply_gufunc_02.assert_c_compute_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_pass_additional_kwargs_test_apply_gufunc_02.assert_c_compute_shape_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 170, "end_line": 187, "span_ids": ["test_apply_gufunc_02", "test_apply_gufunc_pass_additional_kwargs"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_pass_additional_kwargs():\n def foo(x, bar):\n assert bar == 2\n return x\n\n ret = apply_gufunc(foo, \"()->()\", 1.0, output_dtypes=float, bar=2)\n assert_eq(ret, np.array(1.0, dtype=float))\n\n\ndef test_apply_gufunc_02():\n def outer_product(x, y):\n return np.einsum(\"...i,...j->...ij\", x, y)\n\n a = da.random.normal(size=(20, 30), chunks=(5, 30))\n b = da.random.normal(size=(10, 1, 40), chunks=(10, 1, 40))\n c = apply_gufunc(outer_product, \"(i),(j)->(i,j)\", a, b, output_dtypes=a.dtype)\n\n assert c.compute().shape == (10, 20, 30, 40)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_scalar_output_test_apply_gufunc_elemwise_01b.with_pytest_raises_ValueE.apply_gufunc_add_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_scalar_output_test_apply_gufunc_elemwise_01b.with_pytest_raises_ValueE.apply_gufunc_add_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 181, "end_line": 206, "span_ids": ["test_apply_gufunc_elemwise_01", "test_apply_gufunc_scalar_output", "test_apply_gufunc_elemwise_01b"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_scalar_output():\n def foo():\n return 1\n\n x = apply_gufunc(foo, \"->()\", output_dtypes=int)\n assert x.compute() == 1\n\n\ndef test_apply_gufunc_elemwise_01():\n def add(x, y):\n return x + y\n\n a = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"a\")\n b = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"b\")\n z = apply_gufunc(add, \"(),()->()\", a, b, output_dtypes=a.dtype)\n assert_eq(z, np.array([2, 4, 6]))\n\n\ndef test_apply_gufunc_elemwise_01b():\n def add(x, y):\n return x + y\n\n a = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"a\")\n b = da.from_array(np.array([1, 2, 3]), chunks=1, name=\"b\")\n with pytest.raises(ValueError):\n apply_gufunc(add, \"(),()->()\", a, b, output_dtypes=a.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_elemwise_02_test_apply_gufunc_elemwise_02.assert_eq_z2_np_array_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_elemwise_02_test_apply_gufunc_elemwise_02.assert_eq_z2_np_array_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 209, "end_line": 218, "span_ids": ["test_apply_gufunc_elemwise_02"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_elemwise_02():\n def addmul(x, y):\n assert x.shape in ((2,), (1,))\n return x + y, x * y\n\n a = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"a\")\n b = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"b\")\n z1, z2 = apply_gufunc(addmul, \"(),()->(),()\", a, b, output_dtypes=2 * (a.dtype,))\n assert_eq(z1, np.array([2, 4, 6]))\n assert_eq(z2, np.array([1, 4, 9]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_vector_output_test_apply_gufunc_two_scalar_output.assert_y_compute_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_vector_output_test_apply_gufunc_two_scalar_output.assert_y_compute_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 266, "span_ids": ["test_apply_gufunc_elemwise_core", "test_apply_gufunc_two_scalar_output", "test_gufunc_vector_output", "test_apply_gufunc_elemwise_loop"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gufunc_vector_output():\n def foo():\n return np.array([1, 2, 3], dtype=int)\n\n x = apply_gufunc(foo, \"->(i_0)\", output_dtypes=int, output_sizes={\"i_0\": 3})\n assert x.chunks == ((3,),)\n assert_eq(x, np.array([1, 2, 3]))\n\n\ndef test_apply_gufunc_elemwise_loop():\n def foo(x):\n assert x.shape in ((2,), (1,))\n return 2 * x\n\n a = da.from_array(np.array([1, 2, 3]), chunks=2, name=\"a\")\n z = apply_gufunc(foo, \"()->()\", a, output_dtypes=int)\n assert z.chunks == ((2, 1),)\n assert_eq(z, np.array([2, 4, 6]))\n\n\ndef test_apply_gufunc_elemwise_core():\n def foo(x):\n assert x.shape == (3,)\n return 2 * x\n\n a = da.from_array(np.array([1, 2, 3]), chunks=3, name=\"a\")\n z = apply_gufunc(foo, \"(i)->(i)\", a, output_dtypes=int)\n assert z.chunks == ((3,),)\n assert_eq(z, np.array([2, 4, 6]))\n\n\n# TODO: In case single tuple output will get enabled:\n# def test_apply_gufunc_one_scalar_output():\n# def foo():\n# return 1,\n# x, = apply_gufunc(foo, \"->(),\", output_dtypes=(int,))\n# assert x.compute() == 1\n\n\ndef test_apply_gufunc_two_scalar_output():\n def foo():\n return 1, 2\n\n x, y = apply_gufunc(foo, \"->(),()\", output_dtypes=(int, int))\n assert x.compute() == 1\n assert y.compute() == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_two_mixed_outputs_test_apply_gufunc_output_dtypes.assert_eq_y_dy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_two_mixed_outputs_test_apply_gufunc_output_dtypes.assert_eq_y_dy_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 290, "span_ids": ["test_apply_gufunc_output_dtypes", "test_apply_gufunc_two_mixed_outputs"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_two_mixed_outputs():\n def foo():\n return 1, np.ones((2, 3), dtype=float)\n\n x, y = apply_gufunc(\n foo, \"->(),(i,j)\", output_dtypes=(int, float), output_sizes={\"i\": 2, \"j\": 3}\n )\n assert x.compute() == 1\n assert y.chunks == ((2,), (3,))\n assert_eq(y, np.ones((2, 3), dtype=float))\n\n\n@pytest.mark.parametrize(\"output_dtypes\", [int, (int,)])\ndef test_apply_gufunc_output_dtypes(output_dtypes):\n def foo(x):\n return y\n\n x = np.random.randn(10)\n y = x.astype(int)\n dy = apply_gufunc(foo, \"()->()\", x, output_dtypes=output_dtypes)\n # print(x, x.compute())\n assert_eq(y, dy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_two_inputs_test_gufunc_two_inputs.assert_eq_x_3_np_ones_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_two_inputs_test_gufunc_two_inputs.assert_eq_x_3_np_ones_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 293, "end_line": 300, "span_ids": ["test_gufunc_two_inputs"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gufunc_two_inputs():\n def foo(x, y):\n return np.einsum(\"...ij,...jk->ik\", x, y)\n\n a = da.ones((2, 3), chunks=100, dtype=int)\n b = da.ones((3, 4), chunks=100, dtype=int)\n x = apply_gufunc(foo, \"(i,j),(j,k)->(i,k)\", a, b, output_dtypes=int)\n assert_eq(x, 3 * np.ones((2, 4), dtype=int))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_same_dimsizes_test_apply_gufunc_check_coredim_chunksize.assert_consists_of_multi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_same_dimsizes_test_apply_gufunc_check_coredim_chunksize.assert_consists_of_multi", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 366, "end_line": 385, "span_ids": ["test_apply_gufunc_check_coredim_chunksize", "test_apply_gufunc_check_same_dimsizes"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_check_same_dimsizes():\n def foo(x, y):\n return x + y\n\n a = da.random.normal(size=(3,), chunks=(2,))\n b = da.random.normal(size=(4,), chunks=(2,))\n\n with pytest.raises(ValueError) as excinfo:\n apply_gufunc(foo, \"(),()->()\", a, b, output_dtypes=float, allow_rechunk=True)\n assert \"different lengths in arrays\" in str(excinfo.value)\n\n\ndef test_apply_gufunc_check_coredim_chunksize():\n def foo(x):\n return np.sum(x, axis=-1)\n\n a = da.random.normal(size=(8,), chunks=3)\n with pytest.raises(ValueError) as excinfo:\n da.apply_gufunc(foo, \"(i)->()\", a, output_dtypes=float, allow_rechunk=False)\n assert \"consists of multiple chunks\" in str(excinfo.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_inhomogeneous_chunksize_test_apply_gufunc_check_inhomogeneous_chunksize.assert_with_different_ch": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_check_inhomogeneous_chunksize_test_apply_gufunc_check_inhomogeneous_chunksize.assert_with_different_ch", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 388, "end_line": 399, "span_ids": ["test_apply_gufunc_check_inhomogeneous_chunksize"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_check_inhomogeneous_chunksize():\n def foo(x, y):\n return x + y\n\n a = da.random.normal(size=(8,), chunks=((2, 2, 2, 2),))\n b = da.random.normal(size=(8,), chunks=((2, 3, 3),))\n\n with pytest.raises(ValueError) as excinfo:\n da.apply_gufunc(\n foo, \"(),()->()\", a, b, output_dtypes=float, allow_rechunk=False\n )\n assert \"with different chunksize present\" in str(excinfo.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_infer_dtype_test_apply_gufunc_infer_dtype.assert_eq_z1_dx_dy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_infer_dtype_test_apply_gufunc_infer_dtype.assert_eq_z1_dx_dy_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 402, "end_line": 441, "span_ids": ["test_apply_gufunc_infer_dtype"], "tokens": 374}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_infer_dtype():\n x = np.arange(50).reshape((5, 10))\n y = np.arange(10)\n dx = da.from_array(x, chunks=5)\n dy = da.from_array(y, chunks=5)\n\n def foo(x, *args, **kwargs):\n cast = kwargs.pop(\"cast\", \"i8\")\n return (x + sum(args)).astype(cast)\n\n dz = apply_gufunc(foo, \"(),(),()->()\", dx, dy, 1)\n z = foo(dx, dy, 1)\n assert_eq(dz, z)\n\n dz = apply_gufunc(foo, \"(),(),()->()\", dx, dy, 1, cast=\"f8\")\n z = foo(dx, dy, 1, cast=\"f8\")\n assert_eq(dz, z)\n\n dz = apply_gufunc(foo, \"(),(),()->()\", dx, dy, 1, cast=\"f8\", output_dtypes=\"f8\")\n z = foo(dx, dy, 1, cast=\"f8\")\n assert_eq(dz, z)\n\n def foo(x):\n raise RuntimeError(\"Woops\")\n\n with pytest.raises(ValueError) as e:\n apply_gufunc(foo, \"()->()\", dx)\n msg = str(e.value)\n assert msg.startswith(\"`dtype` inference failed\")\n assert \"Please specify the dtype explicitly\" in msg\n assert \"RuntimeError\" in msg\n\n # Multiple outputs\n def foo(x, y):\n return x + y, x - y\n\n z0, z1 = apply_gufunc(foo, \"(),()->(),()\", dx, dy)\n\n assert_eq(z0, dx + dy)\n assert_eq(z1, dx - dy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_01_test_apply_gufunc_axis_02.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_01_test_apply_gufunc_axis_02.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 444, "end_line": 468, "span_ids": ["test_apply_gufunc_axis_01", "test_apply_gufunc_axis_02"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_apply_gufunc_axis_01(keepdims):\n def mymedian(x):\n return np.median(x, axis=-1)\n\n a = np.random.randn(10, 5)\n da_ = da.from_array(a, chunks=2)\n\n m = np.median(a, axis=0, keepdims=keepdims)\n dm = apply_gufunc(\n mymedian, \"(i)->()\", da_, axis=0, keepdims=keepdims, allow_rechunk=True\n )\n assert_eq(m, dm)\n\n\ndef test_apply_gufunc_axis_02():\n def myfft(x):\n return np.fft.fft(x, axis=-1)\n\n a = np.random.randn(10, 5)\n da_ = da.from_array(a, chunks=2)\n\n m = np.fft.fft(a, axis=0)\n dm = apply_gufunc(myfft, \"(i)->(i)\", da_, axis=0, allow_rechunk=True)\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_02b_test_apply_gufunc_axis_02b.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_02b_test_apply_gufunc_axis_02b.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 471, "end_line": 483, "span_ids": ["test_apply_gufunc_axis_02b"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axis_02b():\n def myfilter(x, cn=10, axis=-1):\n y = np.fft.fft(x, axis=axis)\n y[cn:-cn] = 0\n nx = np.fft.ifft(y, axis=axis)\n return np.real(nx)\n\n a = np.random.randn(3, 6, 4)\n da_ = da.from_array(a, chunks=2)\n\n m = myfilter(a, axis=1)\n dm = apply_gufunc(myfilter, \"(i)->(i)\", da_, axis=1, allow_rechunk=True)\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_03_test_apply_gufunc_axis_03.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_03_test_apply_gufunc_axis_03.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 486, "end_line": 497, "span_ids": ["test_apply_gufunc_axis_03"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axis_03():\n def mydiff(x):\n return np.diff(x, axis=-1)\n\n a = np.random.randn(3, 6, 4)\n da_ = da.from_array(a, chunks=2)\n\n m = np.diff(a, axis=1)\n dm = apply_gufunc(\n mydiff, \"(i)->(i)\", da_, axis=1, output_sizes={\"i\": 5}, allow_rechunk=True\n )\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_keepdims_test_apply_gufunc_axis_keepdims.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axis_keepdims_test_apply_gufunc_axis_keepdims.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 500, "end_line": 512, "span_ids": ["test_apply_gufunc_axis_keepdims"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [-2, -1, None])\ndef test_apply_gufunc_axis_keepdims(axis):\n def mymedian(x):\n return np.median(x, axis=-1)\n\n a = np.random.randn(10, 5)\n da_ = da.from_array(a, chunks=2)\n\n m = np.median(a, axis=-1 if not axis else axis, keepdims=True)\n dm = apply_gufunc(\n mymedian, \"(i)->()\", da_, axis=axis, keepdims=True, allow_rechunk=True\n )\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_01_test_apply_gufunc_axes_01.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_01_test_apply_gufunc_axes_01.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 515, "end_line": 527, "span_ids": ["test_apply_gufunc_axes_01"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axes\", [[0, 1], [(0,), (1,)]])\ndef test_apply_gufunc_axes_01(axes):\n def mystats(x, y):\n return np.std(x, axis=-1) * np.mean(y, axis=-1)\n\n a = np.random.randn(10, 5)\n b = np.random.randn(5, 6)\n da_ = da.from_array(a, chunks=2)\n db_ = da.from_array(b, chunks=2)\n\n m = np.std(a, axis=0) * np.mean(b, axis=1)\n dm = apply_gufunc(mystats, \"(i),(j)->()\", da_, db_, axes=axes, allow_rechunk=True)\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_02_test_apply_gufunc_axes_02.assert_eq_m_dm_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_02_test_apply_gufunc_axes_02.assert_eq_m_dm_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 530, "end_line": 549, "span_ids": ["test_apply_gufunc_axes_02"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axes_02():\n def matmul(x, y):\n return np.einsum(\"...ij,...jk->...ik\", x, y)\n\n a = np.random.randn(3, 2, 1)\n b = np.random.randn(3, 7, 5)\n\n da_ = da.from_array(a, chunks=2)\n db = da.from_array(b, chunks=3)\n\n m = np.einsum(\"jiu,juk->uik\", a, b)\n dm = apply_gufunc(\n matmul,\n \"(i,j),(j,k)->(i,k)\",\n da_,\n db,\n axes=[(1, 0), (0, -1), (-2, -1)],\n allow_rechunk=True,\n )\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_two_kept_coredims_test_apply_gufunc_axes_two_kept_coredims.assert_c_compute_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_axes_two_kept_coredims_test_apply_gufunc_axes_two_kept_coredims.assert_c_compute_shape_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 552, "end_line": 560, "span_ids": ["test_apply_gufunc_axes_two_kept_coredims"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_axes_two_kept_coredims():\n a = da.random.normal(size=(20, 30), chunks=(10, 30))\n b = da.random.normal(size=(10, 1, 40), chunks=(5, 1, 40))\n\n def outer_product(x, y):\n return np.einsum(\"i,j->ij\", x, y)\n\n c = apply_gufunc(outer_product, \"(i),(j)->(i,j)\", a, b, vectorize=True)\n assert c.compute().shape == (10, 20, 30, 40)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_01_test_apply_gufunc_via_numba_01.assert_eq_x_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_01_test_apply_gufunc_via_numba_01.assert_eq_x_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 563, "end_line": 579, "span_ids": ["test_apply_gufunc_via_numba_01"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_via_numba_01():\n numba = pytest.importorskip(\"numba\")\n\n @numba.guvectorize(\n [(numba.float64[:], numba.float64[:], numba.float64[:])], \"(n),(n)->(n)\"\n )\n def g(x, y, res):\n for i in range(x.shape[0]):\n res[i] = x[i] + y[i]\n\n a = da.random.normal(size=(20, 30), chunks=30)\n b = da.random.normal(size=(20, 30), chunks=30)\n\n x = a + b\n y = g(a, b, axis=0)\n\n assert_eq(x, y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_02_test_apply_gufunc_via_numba_02.assert_eq_x_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_via_numba_02_test_apply_gufunc_via_numba_02.assert_eq_x_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 580, "end_line": 593, "span_ids": ["test_apply_gufunc_via_numba_02"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_via_numba_02():\n numba = pytest.importorskip(\"numba\")\n\n @numba.guvectorize([(numba.float64[:], numba.float64[:])], \"(n)->()\")\n def mysum(x, res):\n res[0] = 0.0\n for i in range(x.shape[0]):\n res[0] += x[i]\n\n a = da.random.normal(size=(20, 30), chunks=30)\n\n x = a.sum(axis=0, keepdims=True)\n y = mysum(a, axis=0, keepdims=True)\n assert_eq(x, y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_preserve_meta_type_test_preserve_meta_type.assert_eq_mean_mean_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_preserve_meta_type_test_preserve_meta_type.assert_eq_mean_mean_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 646, "end_line": 661, "span_ids": ["test_preserve_meta_type"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_preserve_meta_type():\n sparse = pytest.importorskip(\"sparse\")\n\n def stats(x):\n return np.sum(x, axis=-1), np.mean(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n a = a.map_blocks(sparse.COO.from_numpy)\n sum, mean = apply_gufunc(stats, \"(i)->(),()\", a, output_dtypes=2 * (a.dtype,))\n\n assert isinstance(a._meta, sparse.COO)\n assert isinstance(sum._meta, sparse.COO)\n assert isinstance(mean._meta, sparse.COO)\n\n assert_eq(sum, sum)\n assert_eq(mean, mean)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_test_imread.with_random_images_4_5_.assert_im_compute_dtype": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_test_imread.with_random_images_4_5_.assert_im_compute_dtype", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_image.py", "file_name": "test_image.py", "file_type": "text/x-python", "category": "test", "start_line": 25, "end_line": 33, "span_ids": ["test_imread"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_imread():\n with random_images(4, (5, 6, 3)) as globstring:\n im = da_imread(globstring)\n assert im.shape == (4, 5, 6, 3)\n assert im.chunks == ((1, 1, 1, 1), (5,), (6,), (3,))\n assert im.dtype == \"uint8\"\n\n assert im.compute().shape == (4, 5, 6, 3)\n assert im.compute().dtype == \"uint8\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_with_custom_function_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_test_imread_with_custom_function_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_image.py", "file_name": "test_image.py", "file_type": "text/x-python", "category": "test", "start_line": 36, "end_line": 53, "span_ids": ["test_preprocess", "test_imread_with_custom_function"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_imread_with_custom_function():\n def imread2(fn):\n return np.ones((2, 3, 4), dtype=\"i1\")\n\n with random_images(4, (5, 6, 3)) as globstring:\n im = da_imread(globstring, imread=imread2)\n assert (im.compute() == np.ones((4, 2, 3, 4), dtype=\"u1\")).all()\n\n\ndef test_preprocess():\n def preprocess(x):\n x[:] = 1\n return x[:, :, 0]\n\n with random_images(4, (2, 3, 4)) as globstring:\n im = da_imread(globstring, preprocess=preprocess)\n assert (im.compute() == np.ones((4, 2, 3), dtype=\"u1\")).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain_test_tsqr_uncertain.if_vary_rows_.m.mat_shape_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain_test_tsqr_uncertain.if_vary_rows_.m.mat_shape_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 196, "span_ids": ["test_tsqr_uncertain"], "tokens": 748}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n mat = np.random.rand(m_min * 2, n_max)\n m, n = m_min * 2, n_max\n mat[0:m_min, 0] += 1\n _c0 = mat[:, 0]\n _r0 = mat[0, :]\n c0 = da.from_array(_c0, chunks=m_min, name=\"c\")\n r0 = da.from_array(_r0, chunks=n_max, name=\"r\")\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n if vary_rows:\n data = data[c0 > 0.5, :]\n mat = mat[_c0 > 0.5, :]\n m = mat.shape[0]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_vary_cols__test_tsqr_uncertain._full_matrix_returned": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_vary_cols__test_tsqr_uncertain._full_matrix_returned", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 197, "end_line": 214, "span_ids": ["test_tsqr_uncertain"], "tokens": 731}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n # ... other code\n if vary_cols:\n data = data[:, r0 > 0.5]\n mat = mat[:, _r0 > 0.5]\n n = mat.shape[1]\n\n # qr\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n\n # svd\n m_u = m\n n_u = min(m, n)\n n_s = n_q\n m_vh = n_q\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 216, "end_line": 244, "span_ids": ["test_tsqr_uncertain"], "tokens": 978}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n # ... other code\n\n if error_type is None:\n # test QR\n q, r = tsqr(data)\n q = q.compute() # because uncertainty\n r = r.compute()\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, np.dot(q, r)) # accuracy check\n assert_eq(np.eye(n_q, n_q), np.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, np.triu(r)) # r must be upper triangular\n\n # test SVD\n u, s, vh = tsqr(data, compute_svd=True)\n u = u.compute() # because uncertainty\n s = s.compute()\n vh = vh.compute()\n s_exact = np.linalg.svd(mat)[1]\n assert_eq(s, s_exact) # s must contain the singular values\n assert_eq((m_u, n_u), u.shape) # shape check\n assert_eq((n_s,), s.shape) # shape check\n assert_eq((d_vh, d_vh), vh.shape) # shape check\n assert_eq(np.eye(n_u, n_u), np.dot(u.T, u)) # u must be orthonormal\n assert_eq(np.eye(d_vh, d_vh), np.dot(vh, vh.T)) # vh must be orthonormal\n assert_eq(mat, np.dot(np.dot(u, np.diag(s)), vh[:n_q])) # accuracy check\n else:\n with pytest.raises(error_type):\n q, r = tsqr(data)\n with pytest.raises(error_type):\n u, s, vh = tsqr(data, compute_svd=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_zero_height_chunks_test_tsqr_zero_height_chunks.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_tsqr_zero_height_chunks_test_tsqr_zero_height_chunks.None_13", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 276, "span_ids": ["test_tsqr_zero_height_chunks"], "tokens": 400}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tsqr_zero_height_chunks():\n m_q = 10\n n_q = 5\n m_r = 5\n n_r = 5\n\n # certainty\n mat = np.random.rand(10, 5)\n x = da.from_array(mat, chunks=((4, 0, 1, 0, 5), (5,)))\n q, r = da.linalg.qr(x)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(n_q, n_q), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n\n # uncertainty\n mat2 = np.vstack([mat, -(np.ones((10, 5)))])\n v2 = mat2[:, 0]\n x2 = da.from_array(mat2, chunks=5)\n c = da.from_array(v2, chunks=5)\n x = x2[c >= 0, :] # remove the ones added above to yield mat\n q, r = da.linalg.qr(x)\n q = q.compute() # because uncertainty\n r = r.compute()\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, np.dot(q, r)) # accuracy check\n assert_eq(np.eye(n_q, n_q), np.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, np.triu(r)) # r must be upper triangular", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_sfqr_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_sfqr_data_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 360, "span_ids": ["test_sfqr"], "tokens": 793}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, ValueError), # tall-skinny regular blocks\n (20, 10, (3, 10), ValueError), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), ValueError), # tall-skinny irregular fat layers\n (\n 40,\n 10,\n ((15, 5, 5, 8, 7), 10),\n ValueError,\n ), # tall-skinny non-uniform chunks (why?)\n (\n 128,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 300,\n 10,\n (40, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), None), # short-fat regular blocks\n (10, 40, (10, 15), None), # short-fat irregular blocks\n (10, 40, (10, (15, 5, 5, 8, 7)), None), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_sfqr(m, n, chunks, error_type):\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n m_qtq = n_q\n\n if error_type is None:\n q, r = sfqr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(m_qtq, m_qtq), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n else:\n with pytest.raises(error_type):\n q, r = sfqr(data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_qr_test_qr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_qr_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_qr_test_qr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_qr_data_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 363, "end_line": 419, "span_ids": ["test_qr"], "tokens": 756}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, None), # tall-skinny regular blocks\n (20, 10, (3, 10), None), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers\n (40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)\n (128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2\n (300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3\n (300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), None), # short-fat regular blocks\n (10, 40, (10, 15), None), # short-fat irregular blocks\n (10, 40, (10, (15, 5, 5, 8, 7)), None), # short-fat non-uniform chunks (why?)\n (20, 20, 10, NotImplementedError), # 2x2 regular blocks\n ],\n)\ndef test_qr(m, n, chunks, error_type):\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n m_qtq = n_q\n\n if error_type is None:\n q, r = qr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(m_qtq, m_qtq), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n else:\n with pytest.raises(error_type):\n q, r = qr(data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_linalg_consistent_names_test_linalg_consistent_names.assert_same_keys_v1_v2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_linalg_consistent_names_test_linalg_consistent_names.assert_same_keys_v1_v2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 422, "end_line": 436, "span_ids": ["test_linalg_consistent_names"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_linalg_consistent_names():\n m, n = 20, 10\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=(10, n), name=\"A\")\n\n q1, r1 = qr(data)\n q2, r2 = qr(data)\n assert same_keys(q1, q2)\n assert same_keys(r1, r2)\n\n u1, s1, v1 = svd(data)\n u2, s2, v2 = svd(data)\n assert same_keys(u1, u2)\n assert same_keys(s1, s2)\n assert same_keys(v1, v2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_dask_svd_self_consistent_test_dask_svd_self_consistent.for_d_e_e_in_zip_d_u_d.assert_d_e_dtype_e_dty": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_dask_svd_self_consistent_test_dask_svd_self_consistent.for_d_e_e_in_zip_d_u_d.assert_d_e_dtype_e_dty", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 439, "end_line": 449, "span_ids": ["test_dask_svd_self_consistent"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"m,n\", [(10, 20), (15, 15), (20, 10)])\ndef test_dask_svd_self_consistent(m, n):\n a = np.random.rand(m, n)\n d_a = da.from_array(a, chunks=(3, n), name=\"A\")\n\n d_u, d_s, d_vt = da.linalg.svd(d_a)\n u, s, vt = da.compute(d_u, d_s, d_vt)\n\n for d_e, e in zip([d_u, d_s, d_vt], [u, s, vt]):\n assert d_e.shape == e.shape\n assert d_e.dtype == e.dtype", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_1_test_lu_1.for_A_chunk_in_zip_A3_._check_lu_result_dp_dl_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_1_test_lu_1.for_A_chunk_in_zip_A3_._check_lu_result_dp_dl_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 524, "end_line": 563, "span_ids": ["test_lu_1"], "tokens": 500}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_lu_1():\n A1 = np.array([[7, 3, -1, 2], [3, 8, 1, -4], [-1, 1, 4, -1], [2, -4, -1, 6]])\n\n A2 = np.array(\n [\n [7, 0, 0, 0, 0, 0],\n [0, 8, 0, 0, 0, 0],\n [0, 0, 4, 0, 0, 0],\n [0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 3, 0],\n [0, 0, 0, 0, 0, 5],\n ]\n )\n # without shuffle\n for A, chunk in zip([A1, A2], [2, 2]):\n dA = da.from_array(A, chunks=(chunk, chunk))\n p, l, u = scipy.linalg.lu(A)\n dp, dl, du = da.linalg.lu(dA)\n assert_eq(p, dp, check_graph=False)\n assert_eq(l, dl, check_graph=False)\n assert_eq(u, du, check_graph=False)\n _check_lu_result(dp, dl, du, A)\n\n A3 = np.array(\n [\n [7, 3, 2, 1, 4, 1],\n [7, 11, 5, 2, 5, 2],\n [21, 25, 16, 10, 16, 5],\n [21, 41, 18, 13, 16, 11],\n [14, 46, 23, 24, 21, 22],\n [0, 56, 29, 17, 14, 8],\n ]\n )\n\n # with shuffle\n for A, chunk in zip([A3], [2]):\n dA = da.from_array(A, chunks=(chunk, chunk))\n p, l, u = scipy.linalg.lu(A)\n dp, dl, du = da.linalg.lu(dA)\n _check_lu_result(dp, dl, du, A)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_2_test_lu_3._check_lu_result_dp_dl_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_2_test_lu_3._check_lu_result_dp_dl_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 566, "end_line": 586, "span_ids": ["test_lu_2", "test_lu_3"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.parametrize(\"size\", [10, 20, 30, 50])\n@pytest.mark.filterwarnings(\"ignore:Increasing:dask.array.core.PerformanceWarning\")\ndef test_lu_2(size):\n np.random.seed(10)\n A = np.random.randint(0, 10, (size, size))\n\n dA = da.from_array(A, chunks=(5, 5))\n dp, dl, du = da.linalg.lu(dA)\n _check_lu_result(dp, dl, du, A)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"size\", [50, 100, 200])\ndef test_lu_3(size):\n np.random.seed(10)\n A = np.random.randint(0, 10, (size, size))\n\n dA = da.from_array(A, chunks=(25, 25))\n dp, dl, du = da.linalg.lu(dA)\n _check_lu_result(dp, dl, du, A)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_errors_test_lu_errors.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lu_errors_test_lu_errors.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 589, "end_line": 600, "span_ids": ["test_lu_errors"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_lu_errors():\n A = np.random.randint(0, 11, (10, 10, 10))\n dA = da.from_array(A, chunks=(5, 5, 5))\n pytest.raises(ValueError, lambda: da.linalg.lu(dA))\n\n A = np.random.randint(0, 11, (10, 8))\n dA = da.from_array(A, chunks=(5, 4))\n pytest.raises(ValueError, lambda: da.linalg.lu(dA))\n\n A = np.random.randint(0, 11, (20, 20))\n dA = da.from_array(A, chunks=(5, 4))\n pytest.raises(ValueError, lambda: da.linalg.lu(dA))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_vector_test_solve_triangular_vector.assert_eq_dAl_dot_res_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_vector_test_solve_triangular_vector.assert_eq_dAl_dot_res_b", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 603, "end_line": 624, "span_ids": ["test_solve_triangular_vector"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10), (70, 20)])\ndef test_solve_triangular_vector(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 11, (shape, shape))\n b = np.random.randint(1, 11, shape)\n\n # upper\n Au = np.triu(A)\n dAu = da.from_array(Au, (chunk, chunk))\n db = da.from_array(b, chunk)\n res = da.linalg.solve_triangular(dAu, db)\n assert_eq(res, scipy.linalg.solve_triangular(Au, b))\n assert_eq(dAu.dot(res), b.astype(float))\n\n # lower\n Al = np.tril(A)\n dAl = da.from_array(Al, (chunk, chunk))\n db = da.from_array(b, chunk)\n res = da.linalg.solve_triangular(dAl, db, lower=True)\n assert_eq(res, scipy.linalg.solve_triangular(Al, b, lower=True))\n assert_eq(dAl.dot(res), b.astype(float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix_test_solve_triangular_matrix.assert_eq_dAl_dot_res_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix_test_solve_triangular_matrix.assert_eq_dAl_dot_res_b", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 627, "end_line": 648, "span_ids": ["test_solve_triangular_matrix"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10), (50, 20)])\ndef test_solve_triangular_matrix(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 10, (shape, shape))\n b = np.random.randint(1, 10, (shape, 5))\n\n # upper\n Au = np.triu(A)\n dAu = da.from_array(Au, (chunk, chunk))\n db = da.from_array(b, (chunk, 5))\n res = da.linalg.solve_triangular(dAu, db)\n assert_eq(res, scipy.linalg.solve_triangular(Au, b))\n assert_eq(dAu.dot(res), b.astype(float))\n\n # lower\n Al = np.tril(A)\n dAl = da.from_array(Al, (chunk, chunk))\n db = da.from_array(b, (chunk, 5))\n res = da.linalg.solve_triangular(dAl, db, lower=True)\n assert_eq(res, scipy.linalg.solve_triangular(Al, b, lower=True))\n assert_eq(dAl.dot(res), b.astype(float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix2_test_solve_triangular_matrix2.assert_eq_dAl_dot_res_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_matrix2_test_solve_triangular_matrix2.assert_eq_dAl_dot_res_b", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 651, "end_line": 672, "span_ids": ["test_solve_triangular_matrix2"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10), (50, 20)])\ndef test_solve_triangular_matrix2(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 10, (shape, shape))\n b = np.random.randint(1, 10, (shape, shape))\n\n # upper\n Au = np.triu(A)\n dAu = da.from_array(Au, (chunk, chunk))\n db = da.from_array(b, (chunk, chunk))\n res = da.linalg.solve_triangular(dAu, db)\n assert_eq(res, scipy.linalg.solve_triangular(Au, b))\n assert_eq(dAu.dot(res), b.astype(float))\n\n # lower\n Al = np.tril(A)\n dAl = da.from_array(Al, (chunk, chunk))\n db = da.from_array(b, (chunk, chunk))\n res = da.linalg.solve_triangular(dAl, db, lower=True)\n assert_eq(res, scipy.linalg.solve_triangular(Al, b, lower=True))\n assert_eq(dAl.dot(res), b.astype(float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_errors_test_solve_triangular_errors.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_triangular_errors_test_solve_triangular_errors.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 675, "end_line": 686, "span_ids": ["test_solve_triangular_errors"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_solve_triangular_errors():\n A = np.random.randint(0, 10, (10, 10, 10))\n b = np.random.randint(1, 10, 10)\n dA = da.from_array(A, chunks=(5, 5, 5))\n db = da.from_array(b, chunks=5)\n pytest.raises(ValueError, lambda: da.linalg.solve_triangular(dA, db))\n\n A = np.random.randint(0, 10, (10, 10))\n b = np.random.randint(1, 10, 10)\n dA = da.from_array(A, chunks=(3, 3))\n db = da.from_array(b, chunks=5)\n pytest.raises(ValueError, lambda: da.linalg.solve_triangular(dA, db))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_test_solve.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_test_solve.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 689, "end_line": 718, "span_ids": ["test_solve"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10)])\ndef test_solve(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 10, (shape, shape))\n dA = da.from_array(A, (chunk, chunk))\n\n # vector\n b = np.random.randint(1, 10, shape)\n db = da.from_array(b, chunk)\n\n res = da.linalg.solve(dA, db)\n assert_eq(res, scipy.linalg.solve(A, b), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)\n\n # tall-and-skinny matrix\n b = np.random.randint(1, 10, (shape, 5))\n db = da.from_array(b, (chunk, 5))\n\n res = da.linalg.solve(dA, db)\n assert_eq(res, scipy.linalg.solve(A, b), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)\n\n # matrix\n b = np.random.randint(1, 10, (shape, shape))\n db = da.from_array(b, (chunk, chunk))\n\n res = da.linalg.solve(dA, db)\n assert_eq(res, scipy.linalg.solve(A, b), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_inv__get_symmat.return.lA_dot_lA_T_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_inv__get_symmat.return.lA_dot_lA_T_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 721, "end_line": 737, "span_ids": ["test_inv", "_get_symmat"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (50, 10)])\ndef test_inv(shape, chunk):\n np.random.seed(1)\n\n A = np.random.randint(1, 10, (shape, shape))\n dA = da.from_array(A, (chunk, chunk))\n\n res = da.linalg.inv(dA)\n assert_eq(res, scipy.linalg.inv(A), check_graph=False)\n assert_eq(dA.dot(res), np.eye(shape, dtype=float), check_graph=False)\n\n\ndef _get_symmat(size):\n np.random.seed(1)\n A = np.random.randint(1, 21, (size, size))\n lA = np.tril(A)\n return lA.dot(lA.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_sym_pos_test_solve_sym_pos.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_solve_sym_pos_test_solve_sym_pos.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 740, "end_line": 769, "span_ids": ["test_solve_sym_pos"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (30, 6)])\ndef test_solve_sym_pos(shape, chunk):\n np.random.seed(1)\n\n A = _get_symmat(shape)\n dA = da.from_array(A, (chunk, chunk))\n\n # vector\n b = np.random.randint(1, 10, shape)\n db = da.from_array(b, chunk)\n\n res = da.linalg.solve(dA, db, sym_pos=True)\n assert_eq(res, scipy.linalg.solve(A, b, sym_pos=True), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)\n\n # tall-and-skinny matrix\n b = np.random.randint(1, 10, (shape, 5))\n db = da.from_array(b, (chunk, 5))\n\n res = da.linalg.solve(dA, db, sym_pos=True)\n assert_eq(res, scipy.linalg.solve(A, b, sym_pos=True), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)\n\n # matrix\n b = np.random.randint(1, 10, (shape, shape))\n db = da.from_array(b, (chunk, chunk))\n\n res = da.linalg.solve(dA, db, sym_pos=True)\n assert_eq(res, scipy.linalg.solve(A, b, sym_pos=True), check_graph=False)\n assert_eq(dA.dot(res), b.astype(float), check_graph=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_no_chunks_svd_test_no_chunks_svd.for_chunks_in_np_nan_.assert_eq_abs_u_abs_du_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_no_chunks_svd_test_no_chunks_svd.for_chunks_in_np_nan_.assert_eq_abs_u_abs_du_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 814, "end_line": 832, "span_ids": ["test_no_chunks_svd"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_chunks_svd():\n x = np.random.random((100, 10))\n u, s, v = np.linalg.svd(x, full_matrices=False)\n\n for chunks in [((np.nan,) * 10, (10,)), ((np.nan,) * 10, (np.nan,))]:\n dx = da.from_array(x, chunks=(10, 10))\n dx._chunks = chunks\n\n du, ds, dv = da.linalg.svd(dx)\n\n assert_eq(s, ds)\n assert_eq(u.dot(np.diag(s)).dot(v), du.dot(da.diag(ds)).dot(dv))\n assert_eq(du.T.dot(du), np.eye(10))\n assert_eq(dv.T.dot(dv), np.eye(10))\n\n dx = da.from_array(x, chunks=(10, 10))\n dx._chunks = ((np.nan,) * 10, (np.nan,))\n assert_eq(abs(v), abs(dv))\n assert_eq(abs(u), abs(du))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_supported_array_shapes_test_svd_supported_array_shapes.assert_eq_dv_nv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_supported_array_shapes_test_svd_supported_array_shapes.assert_eq_dv_nv_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 852, "end_line": 873, "span_ids": ["test_svd_supported_array_shapes"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [(10, -1), (-1, 10), (9, -1), (-1, 9)])\n@pytest.mark.parametrize(\"shape\", [(10, 100), (100, 10), (10, 10)])\ndef test_svd_supported_array_shapes(chunks, shape):\n # Test the following cases for tall-skinny, short-fat and square arrays:\n # - no chunking\n # - chunking that contradicts shape (e.g. a 10x100 array with 9x100 chunks)\n # - chunking that aligns with shape (e.g. a 10x100 array with 10x9 chunks)\n x = np.random.random(shape)\n dx = da.from_array(x, chunks=chunks)\n\n du, ds, dv = da.linalg.svd(dx)\n du, dv = da.compute(du, dv)\n\n nu, ns, nv = np.linalg.svd(x, full_matrices=False)\n\n # Correct signs before comparison\n du, dv = svd_flip(du, dv)\n nu, nv = svd_flip(nu, nv)\n\n assert_eq(du, nu)\n assert_eq(ds, ns)\n assert_eq(dv, nv)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_incompatible_chunking_test_svd_incompatible_dimensions.with_pytest_raises_ValueE.da_linalg_svd_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_incompatible_chunking_test_svd_incompatible_dimensions.with_pytest_raises_ValueE.da_linalg_svd_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 876, "end_line": 888, "span_ids": ["test_svd_incompatible_chunking", "test_svd_incompatible_dimensions"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_svd_incompatible_chunking():\n with pytest.raises(\n NotImplementedError, match=\"Array must be chunked in one dimension only\"\n ):\n x = da.random.random((10, 10), chunks=(5, 5))\n da.linalg.svd(x)\n\n\n@pytest.mark.parametrize(\"ndim\", [0, 1, 3])\ndef test_svd_incompatible_dimensions(ndim):\n with pytest.raises(ValueError, match=\"Array must be 2D\"):\n x = da.random.random((10,) * ndim, chunks=(-1,) * ndim)\n da.linalg.svd(x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_ndim_test_norm_any_ndim.assert_eq_a_r_d_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_ndim_test_norm_any_ndim.assert_eq_a_r_d_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 983, "end_line": 1001, "span_ids": ["test_norm_any_ndim"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n sys.platform == \"darwin\" and _np_version < parse_version(\"1.22\"),\n reason=\"https://github.com/dask/dask/issues/7189\",\n strict=False,\n)\n@pytest.mark.parametrize(\n \"shape, chunks, axis\",\n [[(5,), (2,), None], [(5,), (2,), 0], [(5,), (2,), (0,)], [(5, 6), (2, 2), None]],\n)\n@pytest.mark.parametrize(\"norm\", [None, 1, -1, np.inf, -np.inf])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_any_ndim(shape, chunks, axis, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n\n a_r = np.linalg.norm(a, ord=norm, axis=axis, keepdims=keepdims)\n d_r = da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)\n\n assert_eq(a_r, d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_slice_test_norm_any_slice.for_firstaxis_in_range_le.for_secondaxis_in_range_l.assert_eq_a_r_d_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_any_slice_test_norm_any_slice.for_firstaxis_in_range_le.for_secondaxis_in_range_l.assert_eq_a_r_d_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 1004, "end_line": 1034, "span_ids": ["test_norm_any_slice"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.xfail(\n sys.platform == \"darwin\" and _np_version < parse_version(\"1.22\"),\n reason=\"https://github.com/dask/dask/issues/7189\",\n strict=False,\n)\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [\n [(5,), (2,)],\n [(5, 3), (2, 2)],\n [(4, 5, 3), (2, 2, 2)],\n [(4, 5, 2, 3), (2, 2, 2, 2)],\n [(2, 5, 2, 4, 3), (2, 2, 2, 2, 2)],\n ],\n)\n@pytest.mark.parametrize(\"norm\", [None, 1, -1, np.inf, -np.inf])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_any_slice(shape, chunks, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n\n for firstaxis in range(len(shape)):\n for secondaxis in range(len(shape)):\n if firstaxis != secondaxis:\n axis = (firstaxis, secondaxis)\n else:\n axis = firstaxis\n a_r = np.linalg.norm(a, ord=norm, axis=axis, keepdims=keepdims)\n d_r = da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)\n assert_eq(a_r, d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_1dim_test_norm_1dim.assert_eq_a_r_d_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_1dim_test_norm_1dim.assert_eq_a_r_d_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 935, "end_line": 946, "span_ids": ["test_norm_1dim"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, axis\", [[(5,), (2,), None], [(5,), (2,), 0], [(5,), (2,), (0,)]]\n)\n@pytest.mark.parametrize(\"norm\", [0, 2, -2, 0.5])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_1dim(shape, chunks, axis, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n\n a_r = np.linalg.norm(a, ord=norm, axis=axis, keepdims=keepdims)\n d_r = da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)\n assert_eq(a_r, d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_2dim_test_norm_2dim.assert_eq_a_r_d_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_2dim_test_norm_2dim.assert_eq_a_r_d_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 949, "end_line": 966, "span_ids": ["test_norm_2dim"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, axis\",\n [[(5, 6), (2, 2), None], [(5, 6), (2, 2), (0, 1)], [(5, 6), (2, 2), (1, 0)]],\n)\n@pytest.mark.parametrize(\"norm\", [\"fro\", \"nuc\", 2, -2])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_2dim(shape, chunks, axis, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n\n # Need one chunk on last dimension for svd.\n if norm == \"nuc\" or norm == 2 or norm == -2:\n d = d.rechunk({-1: -1})\n\n a_r = np.linalg.norm(a, ord=norm, axis=axis, keepdims=keepdims)\n d_r = da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)\n\n assert_eq(a_r, d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_implemented_errors_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_norm_implemented_errors_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 969, "end_line": 981, "span_ids": ["test_norm_implemented_errors"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, axis\",\n [[(3, 2, 4), (2, 2, 2), (1, 2)], [(2, 3, 4, 5), (2, 2, 2, 2), (-1, -2)]],\n)\n@pytest.mark.parametrize(\"norm\", [\"nuc\", 2, -2])\n@pytest.mark.parametrize(\"keepdims\", [False, True])\ndef test_norm_implemented_errors(shape, chunks, axis, norm, keepdims):\n a = np.random.random(shape)\n d = da.from_array(a, chunks=chunks)\n if len(shape) > 2 and len(axis) == 2:\n with pytest.raises(NotImplementedError):\n da.linalg.norm(d, ord=norm, axis=axis, keepdims=keepdims)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_random_test_tokenize_masked_array.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_random_test_tokenize_masked_array.None_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 23, "span_ids": ["imports", "test_tokenize_masked_array"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\nfrom copy import deepcopy\nfrom itertools import product\n\nimport numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq\nfrom dask.base import tokenize\n\npytest.importorskip(\"dask.array.ma\")\n\n\ndef test_tokenize_masked_array():\n m = np.ma.masked_array([1, 2, 3], mask=[True, True, False], fill_value=10)\n m2 = np.ma.masked_array([1, 2, 3], mask=[True, True, False], fill_value=0)\n m3 = np.ma.masked_array([1, 2, 3], mask=False, fill_value=10)\n assert tokenize(m) == tokenize(m)\n assert tokenize(m2) == tokenize(m2)\n assert tokenize(m3) == tokenize(m3)\n assert tokenize(m) != tokenize(m2)\n assert tokenize(m) != tokenize(m3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_from_array_masked_array_test_copy_deepcopy.assert_isinstance_y2_comp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_from_array_masked_array_test_copy_deepcopy.assert_isinstance_y2_comp", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 26, "end_line": 46, "span_ids": ["test_copy_deepcopy", "test_from_array_masked_array"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array_masked_array():\n m = np.ma.masked_array([1, 2, 3], mask=[True, True, False], fill_value=10)\n dm = da.from_array(m, chunks=(2,), asarray=False)\n assert_eq(dm, m)\n\n\ndef test_copy_deepcopy():\n t = np.ma.masked_array([1, 2], mask=[0, 1])\n x = da.from_array(t, chunks=t.shape, asarray=False)\n # x = da.arange(5, chunks=(2,))\n y = x.copy()\n memo = {}\n y2 = deepcopy(x, memo=memo)\n\n xx = da.ma.masked_where([False, True], [1, 2])\n assert_eq(x, xx)\n\n assert_eq(y, t)\n assert isinstance(y.compute(), np.ma.masked_array)\n assert_eq(y2, t)\n assert isinstance(y2.compute(), np.ma.masked_array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_functions_functions._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_functions_functions._", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 77, "span_ids": ["impl:2"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "functions = [\n lambda x: x,\n lambda x: da.expm1(x),\n lambda x: 2 * x,\n lambda x: x / 2,\n lambda x: x**2,\n lambda x: x + x,\n lambda x: x * x,\n lambda x: x[0],\n lambda x: x[:, 1],\n lambda x: x[:1, None, 1:3],\n lambda x: x.T,\n lambda x: da.transpose(x, (1, 2, 0)),\n lambda x: x.sum(),\n lambda x: x.dot(np.arange(x.shape[-1])),\n lambda x: x.dot(np.eye(x.shape[-1])),\n lambda x: da.tensordot(x, np.ones(x.shape[:2]), axes=[(0, 1), (0, 1)]),\n lambda x: x.sum(axis=0),\n lambda x: x.max(axis=0),\n lambda x: x.sum(axis=(1, 2)),\n lambda x: x.astype(np.complex128),\n lambda x: x.map_blocks(lambda x: x * 2),\n lambda x: x.round(1),\n lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2])),\n lambda x: abs(x),\n lambda x: x > 0.5,\n lambda x: x.rechunk((4, 4, 4)),\n lambda x: x.rechunk((2, 2, 1)),\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_basic_test_basic.if_yy_shape_.assert_isinstance_zz_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_basic_test_basic.if_yy_shape_.assert_isinstance_zz_np_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 80, "end_line": 94, "span_ids": ["test_basic"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\ndef test_basic(func):\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n x[x < 0.4] = 0\n\n y = da.ma.masked_equal(x, 0)\n\n xx = func(x)\n yy = func(y)\n\n assert_eq(xx, da.ma.filled(yy, 0))\n\n if yy.shape:\n zz = yy.compute()\n assert isinstance(zz, np.ma.masked_array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_tensordot_test_tensordot.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_tensordot_test_tensordot.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 97, "end_line": 117, "span_ids": ["test_tensordot"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tensordot():\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n x[x < 0.4] = 0\n y = da.random.random((4, 3, 2), chunks=(2, 2, 1))\n y[y < 0.4] = 0\n\n xx = da.ma.masked_equal(x, 0)\n yy = da.ma.masked_equal(y, 0)\n\n assert_eq(\n da.tensordot(x, y, axes=(2, 0)),\n da.ma.filled(da.tensordot(xx, yy, axes=(2, 0)), 0),\n )\n assert_eq(\n da.tensordot(x, y, axes=(1, 1)),\n da.ma.filled(da.tensordot(xx, yy, axes=(1, 1)), 0),\n )\n assert_eq(\n da.tensordot(x, y, axes=((1, 2), (1, 0))),\n da.ma.filled(da.tensordot(xx, yy, axes=((1, 2), (1, 0))), 0),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_check_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_check_m", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 120, "end_line": 134, "span_ids": ["test_mixed_concatenate"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\n@pytest.mark.filterwarnings(\"ignore::numpy.ComplexWarning\") # abs() in assert_eq\ndef test_mixed_concatenate(func):\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n y = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n\n y[y < 0.4] = 0\n yy = da.ma.masked_equal(y, 0)\n\n d = da.concatenate([x, y], axis=0)\n s = da.concatenate([x, yy], axis=0)\n\n dd = func(d)\n ss = func(s)\n assert_eq(dd, ss, check_meta=False, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_check_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_check_m", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 149, "span_ids": ["test_mixed_random"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\n@pytest.mark.filterwarnings(\"ignore::numpy.ComplexWarning\") # abs() in assert_eq\ndef test_mixed_random(func):\n d = da.random.random((4, 3, 4), chunks=(1, 2, 2))\n d[d < 0.4] = 0\n\n fn = lambda x: np.ma.masked_equal(x, 0) if random.random() < 0.5 else x\n s = d.map_blocks(fn)\n\n dd = func(d)\n ss = func(s)\n\n assert_eq(dd, ss, check_meta=False, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_output_type_test_mixed_output_type.assert_isinstance_zz_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_mixed_output_type_test_mixed_output_type.assert_isinstance_zz_np_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 152, "end_line": 162, "span_ids": ["test_mixed_output_type"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mixed_output_type():\n y = da.random.random((10, 10), chunks=(5, 5))\n y[y < 0.4] = 0\n\n y = da.ma.masked_equal(y, 0)\n x = da.zeros((10, 1), chunks=(5, 1))\n\n z = da.concatenate([x, y], axis=1)\n assert z.shape == (10, 11)\n zz = z.compute()\n assert isinstance(zz, np.ma.masked_array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_creation_functions_test_creation_functions.assert_eq_da_ma_fix_inval": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_creation_functions_test_creation_functions.assert_eq_da_ma_fix_inval", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 165, "end_line": 212, "span_ids": ["test_creation_functions"], "tokens": 644}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_creation_functions():\n x = np.array([-2, -1, 0, 1, 2] * 20).reshape((10, 10))\n y = np.array([-2, 0, 1, 1, 0] * 2)\n dx = da.from_array(x, chunks=5)\n dy = da.from_array(y, chunks=4)\n\n sol = np.ma.masked_greater(x, y)\n for (a, b) in product([dx, x], [dy, y]):\n assert_eq(da.ma.masked_greater(a, b), sol)\n\n # These are all the same as masked_greater, just check for correct op\n assert_eq(da.ma.masked_greater(dx, 0), np.ma.masked_greater(x, 0))\n assert_eq(da.ma.masked_greater_equal(dx, 0), np.ma.masked_greater_equal(x, 0))\n assert_eq(da.ma.masked_less(dx, 0), np.ma.masked_less(x, 0))\n assert_eq(da.ma.masked_less_equal(dx, 0), np.ma.masked_less_equal(x, 0))\n assert_eq(da.ma.masked_equal(dx, 0), np.ma.masked_equal(x, 0))\n assert_eq(da.ma.masked_not_equal(dx, 0), np.ma.masked_not_equal(x, 0))\n\n # masked_where\n assert_eq(da.ma.masked_where(False, dx), np.ma.masked_where(False, x))\n assert_eq(da.ma.masked_where(dx > 2, dx), np.ma.masked_where(x > 2, x))\n\n with pytest.raises(IndexError):\n da.ma.masked_where((dx > 2)[:, 0], dx)\n\n assert_eq(da.ma.masked_inside(dx, -1, 1), np.ma.masked_inside(x, -1, 1))\n assert_eq(da.ma.masked_outside(dx, -1, 1), np.ma.masked_outside(x, -1, 1))\n assert_eq(da.ma.masked_values(dx, -1), np.ma.masked_values(x, -1))\n\n # masked_equal and masked_values in numpy sets the fill_value to `value`,\n # which can sometimes be an array. This is hard to support in dask, so we\n # forbid it. Check that this isn't supported:\n with pytest.raises(ValueError):\n da.ma.masked_equal(dx, dy)\n\n with pytest.raises(ValueError):\n da.ma.masked_values(dx, dy)\n\n y = x.astype(\"f8\")\n y[0, 0] = y[7, 5] = np.nan\n dy = da.from_array(y, chunks=5)\n\n assert_eq(da.ma.masked_invalid(dy), np.ma.masked_invalid(y))\n\n my = np.ma.masked_greater(y, 0)\n dmy = da.ma.masked_greater(dy, 0)\n\n assert_eq(da.ma.fix_invalid(dmy, fill_value=0), np.ma.fix_invalid(my, fill_value=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_filled_assert_eq_ma.if_res_is_np_ma_masked_.else_.assert_eq_a_b_equal_nan": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_filled_assert_eq_ma.if_res_is_np_ma_masked_.else_.assert_eq_a_b_equal_nan", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 215, "end_line": 236, "span_ids": ["assert_eq_ma", "test_filled"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_filled():\n x = np.array([-2, -1, 0, 1, 2] * 20).reshape((10, 10))\n dx = da.from_array(x, chunks=5)\n\n mx = np.ma.masked_equal(x, 0)\n mdx = da.ma.masked_equal(dx, 0)\n\n assert_eq(da.ma.filled(mdx), np.ma.filled(mx))\n assert_eq(da.ma.filled(mdx, -5), np.ma.filled(mx, -5))\n\n\ndef assert_eq_ma(a, b):\n res = a.compute()\n if res is np.ma.masked:\n assert res is b\n else:\n assert type(res) == type(b)\n if hasattr(res, \"mask\"):\n np.testing.assert_equal(res.mask, b.mask)\n a = da.ma.filled(a)\n b = np.ma.filled(b)\n assert_eq(a, b, equal_nan=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_test_reductions.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_test_reductions.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 239, "end_line": 264, "span_ids": ["test_reductions"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", (\"i8\", \"f8\"))\n@pytest.mark.parametrize(\n \"reduction\", [\"sum\", \"prod\", \"mean\", \"var\", \"std\", \"min\", \"max\", \"any\", \"all\"]\n)\ndef test_reductions(dtype, reduction):\n x = (np.random.RandomState(42).rand(11, 11) * 10).astype(dtype)\n dx = da.from_array(x, chunks=(4, 4))\n mx = np.ma.masked_greater(x, 5)\n mdx = da.ma.masked_greater(dx, 5)\n\n dfunc = getattr(da, reduction)\n func = getattr(np, reduction)\n\n assert_eq_ma(dfunc(mdx), func(mx))\n assert_eq_ma(dfunc(mdx, axis=0), func(mx, axis=0))\n assert_eq_ma(dfunc(mdx, keepdims=True, split_every=4), func(mx, keepdims=True))\n assert_eq_ma(dfunc(mdx, axis=0, split_every=2), func(mx, axis=0))\n assert_eq_ma(\n dfunc(mdx, axis=0, keepdims=True, split_every=2),\n func(mx, axis=0, keepdims=True),\n )\n assert_eq_ma(dfunc(mdx, axis=1, split_every=2), func(mx, axis=1))\n assert_eq_ma(\n dfunc(mdx, axis=1, keepdims=True, split_every=2),\n func(mx, axis=1, keepdims=True),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_allmasked_test_reductions_allmasked.assert_eq_ma_dfunc_dx_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_reductions_allmasked_test_reductions_allmasked.assert_eq_ma_dfunc_dx_f", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 267, "end_line": 278, "span_ids": ["test_reductions_allmasked"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", (\"i8\", \"f8\"))\n@pytest.mark.parametrize(\n \"reduction\", [\"sum\", \"prod\", \"mean\", \"var\", \"std\", \"min\", \"max\", \"any\", \"all\"]\n)\ndef test_reductions_allmasked(dtype, reduction):\n x = np.ma.masked_array([1, 2], mask=True)\n dx = da.from_array(x, asarray=False)\n\n dfunc = getattr(da, reduction)\n func = getattr(np, reduction)\n\n assert_eq_ma(dfunc(dx), func(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_arg_reductions_test_arg_reductions.assert_eq_ma_dfunc_dmx_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_arg_reductions_test_arg_reductions.assert_eq_ma_dfunc_dmx_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 281, "end_line": 294, "span_ids": ["test_arg_reductions"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"reduction\", [\"argmin\", \"argmax\"])\ndef test_arg_reductions(reduction):\n x = np.random.random((10, 10, 10))\n dx = da.from_array(x, chunks=(3, 4, 5))\n mx = np.ma.masked_greater(x, 0.4)\n dmx = da.ma.masked_greater(dx, 0.4)\n\n dfunc = getattr(da, reduction)\n func = getattr(np, reduction)\n\n assert_eq_ma(dfunc(dmx), func(mx))\n assert_eq_ma(dfunc(dmx, 0), func(mx, 0))\n assert_eq_ma(dfunc(dmx, 1), func(mx, 1))\n assert_eq_ma(dfunc(dmx, 2), func(mx, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_cumulative_test_cumulative.for_axis_in_0_1_2_.assert_eq_ma_dmx_cumprod_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_cumulative_test_cumulative.for_axis_in_0_1_2_.assert_eq_ma_dmx_cumprod_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 297, "end_line": 305, "span_ids": ["test_cumulative"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cumulative():\n x = np.random.RandomState(0).rand(20, 24, 13)\n dx = da.from_array(x, chunks=(6, 5, 4))\n mx = np.ma.masked_greater(x, 0.4)\n dmx = da.ma.masked_greater(dx, 0.4)\n\n for axis in [0, 1, 2]:\n assert_eq_ma(dmx.cumsum(axis=axis), mx.cumsum(axis=axis))\n assert_eq_ma(dmx.cumprod(axis=axis), mx.cumprod(axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_accessors_test_accessors.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_accessors_test_accessors.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 308, "end_line": 317, "span_ids": ["test_accessors"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_accessors():\n x = np.random.random((10, 10))\n dx = da.from_array(x, chunks=(3, 4))\n mx = np.ma.masked_greater(x, 0.4)\n dmx = da.ma.masked_greater(dx, 0.4)\n\n assert_eq(da.ma.getmaskarray(dmx), np.ma.getmaskarray(mx))\n assert_eq(da.ma.getmaskarray(dx), np.ma.getmaskarray(x))\n assert_eq(da.ma.getdata(dmx), np.ma.getdata(mx))\n assert_eq(da.ma.getdata(dx), np.ma.getdata(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_masked_array_test_masked_array.with_pytest_raises_np_ma_.da_ma_masked_array_dx_ma": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_masked_array_test_masked_array.with_pytest_raises_np_ma_.da_ma_masked_array_dx_ma", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 320, "end_line": 347, "span_ids": ["test_masked_array"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_masked_array():\n x = np.random.random((10, 10)).astype(\"f4\")\n dx = da.from_array(x, chunks=(3, 4))\n f1 = da.from_array(np.array(1), chunks=())\n\n fill_values = [(None, None), (0.5, 0.5), (1, f1)]\n for data, (df, f) in product([x, dx], fill_values):\n assert_eq(\n da.ma.masked_array(data, fill_value=df), np.ma.masked_array(x, fill_value=f)\n )\n assert_eq(\n da.ma.masked_array(data, mask=data > 0.4, fill_value=df),\n np.ma.masked_array(x, mask=x > 0.4, fill_value=f),\n )\n assert_eq(\n da.ma.masked_array(data, mask=data > 0.4, fill_value=df),\n np.ma.masked_array(x, mask=x > 0.4, fill_value=f),\n )\n assert_eq(\n da.ma.masked_array(data, fill_value=df, dtype=\"f8\"),\n np.ma.masked_array(x, fill_value=f, dtype=\"f8\"),\n )\n\n with pytest.raises(ValueError):\n da.ma.masked_array(dx, fill_value=dx)\n\n with pytest.raises(np.ma.MaskError):\n da.ma.masked_array(dx, mask=dx[:3, :3])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_set_fill_value_test_set_fill_value.with_pytest_raises_ValueE.da_ma_set_fill_value_dmx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_set_fill_value_test_set_fill_value.with_pytest_raises_ValueE.da_ma_set_fill_value_dmx_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 350, "end_line": 368, "span_ids": ["test_set_fill_value"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_fill_value():\n x = np.random.randint(0, 10, (10, 10))\n dx = da.from_array(x, chunks=(3, 4))\n mx = np.ma.masked_greater(x, 3)\n dmx = da.ma.masked_greater(dx, 3)\n\n da.ma.set_fill_value(dmx, -10)\n np.ma.set_fill_value(mx, -10)\n assert_eq_ma(dmx, mx)\n\n da.ma.set_fill_value(dx, -10)\n np.ma.set_fill_value(x, -10)\n assert_eq_ma(dx, x)\n\n with pytest.raises(TypeError):\n da.ma.set_fill_value(dmx, 1e20)\n\n with pytest.raises(ValueError):\n da.ma.set_fill_value(dmx, dx)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_average_weights_with_masked_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_masked.py_test_average_weights_with_masked_array_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_masked.py", "file_name": "test_masked.py", "file_type": "text/x-python", "category": "test", "start_line": 371, "end_line": 396, "span_ids": ["test_arithmetic_results_in_masked", "test_average_weights_with_masked_array"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_average_weights_with_masked_array():\n mask = np.array([[True, False], [True, True], [False, True]])\n data = np.arange(6).reshape((3, 2))\n a = np.ma.array(data, mask=mask)\n d_a = da.ma.masked_array(data=data, mask=mask, chunks=2)\n\n weights = np.array([0.25, 0.75])\n d_weights = da.from_array(weights, chunks=2)\n\n np_avg = np.ma.average(a, weights=weights, axis=1)\n da_avg = da.ma.average(d_a, weights=d_weights, axis=1)\n\n assert_eq(np_avg, da_avg)\n\n\ndef test_arithmetic_results_in_masked():\n mask = np.array([[True, False], [True, True], [False, True]])\n x = np.arange(6).reshape((3, 2))\n masked = np.ma.array(x, mask=mask)\n dx = da.from_array(x, chunks=(2, 2))\n\n res = dx + masked\n sol = x + masked\n assert_eq(res, sol)\n assert isinstance(res.compute(), np.ma.masked_array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_test_min_max_round_funcs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_test_min_max_round_funcs_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_numpy_compat.py", "file_name": "test_numpy_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 48, "span_ids": ["test_min_max_round_funcs"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_min_max_round_funcs():\n # Regression test for gh-5031\n image = da.from_array(np.array([[0, 1], [1, 2]]), chunks=(1, 2))\n # These use __array_function__ (and min/max/round are aliased,\n # to amin/amax/round_ in numpy)\n assert int(np.min(image)) == 0\n assert int(np.max(image)) == 2\n assert np.round(image)[1, 1] == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_pytest_test_fuse_getitem.for_inp_expected_in_pair.assert_result_y_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_pytest_test_fuse_getitem.for_inp_expected_in_pair.assert_result_y_ex", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 108, "span_ids": ["test_fuse_getitem", "imports"], "tokens": 979}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\n\nimport dask\nimport dask.array as da\nfrom dask.array.core import getter, getter_nofancy\nfrom dask.array.optimization import (\n fuse_slice,\n getitem,\n optimize,\n optimize_blockwise,\n optimize_slices,\n)\nfrom dask.array.utils import assert_eq\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.optimization import fuse\nfrom dask.utils import SerializableLock\n\n\ndef test_fuse_getitem():\n pairs = [\n (\n (getter, (getter, \"x\", slice(1000, 2000)), slice(15, 20)),\n (getter, \"x\", slice(1015, 1020)),\n ),\n (\n (\n getitem,\n (getter, \"x\", (slice(1000, 2000), slice(100, 200))),\n (slice(15, 20), slice(50, 60)),\n ),\n (getter, \"x\", (slice(1015, 1020), slice(150, 160))),\n ),\n (\n (\n getitem,\n (getter_nofancy, \"x\", (slice(1000, 2000), slice(100, 200))),\n (slice(15, 20), slice(50, 60)),\n ),\n (getter_nofancy, \"x\", (slice(1015, 1020), slice(150, 160))),\n ),\n ((getter, (getter, \"x\", slice(1000, 2000)), 10), (getter, \"x\", 1010)),\n (\n (getitem, (getter, \"x\", (slice(1000, 2000), 10)), (slice(15, 20),)),\n (getter, \"x\", (slice(1015, 1020), 10)),\n ),\n (\n (getitem, (getter_nofancy, \"x\", (slice(1000, 2000), 10)), (slice(15, 20),)),\n (getter_nofancy, \"x\", (slice(1015, 1020), 10)),\n ),\n (\n (getter, (getter, \"x\", (10, slice(1000, 2000))), (slice(15, 20),)),\n (getter, \"x\", (10, slice(1015, 1020))),\n ),\n (\n (\n getter,\n (getter, \"x\", (slice(1000, 2000), slice(100, 200))),\n (slice(None, None), slice(50, 60)),\n ),\n (getter, \"x\", (slice(1000, 2000), slice(150, 160))),\n ),\n (\n (getter, (getter, \"x\", (None, slice(None, None))), (slice(None, None), 5)),\n (getter, \"x\", (None, 5)),\n ),\n (\n (\n getter,\n (getter, \"x\", (slice(1000, 2000), slice(10, 20))),\n (slice(5, 10),),\n ),\n (getter, \"x\", (slice(1005, 1010), slice(10, 20))),\n ),\n (\n (\n getitem,\n (getitem, \"x\", (slice(1000, 2000),)),\n (slice(5, 10), slice(10, 20)),\n ),\n (getitem, \"x\", (slice(1005, 1010), slice(10, 20))),\n ),\n (\n (getter, (getter, \"x\", slice(1000, 2000), False, False), slice(15, 20)),\n (getter, \"x\", slice(1015, 1020)),\n ),\n (\n (getter, (getter, \"x\", slice(1000, 2000)), slice(15, 20), False, False),\n (getter, \"x\", slice(1015, 1020)),\n ),\n (\n (\n getter,\n (getter_nofancy, \"x\", slice(1000, 2000), False, False),\n slice(15, 20),\n False,\n False,\n ),\n (getter_nofancy, \"x\", slice(1015, 1020), False, False),\n ),\n ]\n\n for inp, expected in pairs:\n result = optimize_slices({\"y\": inp})\n assert result == {\"y\": expected}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getitem_lock_test_fuse_getitem_lock.for_inp_expected_in_pair.assert_result_y_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getitem_lock_test_fuse_getitem_lock.for_inp_expected_in_pair.assert_result_y_ex", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 103, "end_line": 154, "span_ids": ["test_fuse_getitem_lock"], "tokens": 386}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_getitem_lock():\n lock1 = SerializableLock()\n lock2 = SerializableLock()\n\n pairs = [\n (\n (getter, (getter, \"x\", slice(1000, 2000), True, lock1), slice(15, 20)),\n (getter, \"x\", slice(1015, 1020), True, lock1),\n ),\n (\n (\n getitem,\n (getter, \"x\", (slice(1000, 2000), slice(100, 200)), True, lock1),\n (slice(15, 20), slice(50, 60)),\n ),\n (getter, \"x\", (slice(1015, 1020), slice(150, 160)), True, lock1),\n ),\n (\n (\n getitem,\n (\n getter_nofancy,\n \"x\",\n (slice(1000, 2000), slice(100, 200)),\n True,\n lock1,\n ),\n (slice(15, 20), slice(50, 60)),\n ),\n (getter_nofancy, \"x\", (slice(1015, 1020), slice(150, 160)), True, lock1),\n ),\n (\n (\n getter,\n (getter, \"x\", slice(1000, 2000), True, lock1),\n slice(15, 20),\n True,\n lock2,\n ),\n (\n getter,\n (getter, \"x\", slice(1000, 2000), True, lock1),\n slice(15, 20),\n True,\n lock2,\n ),\n ),\n ]\n\n for inp, expected in pairs:\n result = optimize_slices({\"y\": inp})\n assert result == {\"y\": expected}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_with_getitem_fusion_test_optimize_with_getitem_fusion.assert_len_result_len_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_with_getitem_fusion_test_optimize_with_getitem_fusion.assert_len_result_len_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 167, "span_ids": ["test_optimize_with_getitem_fusion"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_with_getitem_fusion():\n dsk = {\n \"a\": \"some-array\",\n \"b\": (getter, \"a\", (slice(10, 20), slice(100, 200))),\n \"c\": (getter, \"b\", (5, slice(50, 60))),\n }\n\n result = optimize(dsk, [\"c\"])\n expected_task = (getter, \"some-array\", (15, slice(150, 160)))\n assert any(v == expected_task for v in result.values())\n assert len(result) < len(dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_slicing_test_optimize_slicing.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_slicing_test_optimize_slicing.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 170, "end_line": 191, "span_ids": ["test_optimize_slicing"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_slicing():\n dsk = {\n \"a\": (range, 10),\n \"b\": (getter, \"a\", (slice(None, None, None),)),\n \"c\": (getter, \"b\", (slice(None, None, None),)),\n \"d\": (getter, \"c\", (slice(0, 5, None),)),\n \"e\": (getter, \"d\", (slice(None, None, None),)),\n }\n\n expected = {\"e\": (getter, (range, 10), (slice(0, 5, None),))}\n result = optimize_slices(fuse(dsk, [], rename_keys=False)[0])\n assert result == expected\n\n # protect output keys\n expected = {\n \"c\": (getter, (range, 10), (slice(0, None, None),)),\n \"d\": (getter, \"c\", (slice(0, 5, None),)),\n \"e\": (getter, \"d\", (slice(None, None, None),)),\n }\n result = optimize_slices(fuse(dsk, [\"c\", \"d\", \"e\"], rename_keys=False)[0])\n\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_test_fuse_slice.None_1.fuse_slice_None_np_array": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_test_fuse_slice.None_1.fuse_slice_None_np_array", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 218, "span_ids": ["test_fuse_slice"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_slice():\n assert fuse_slice(slice(10, 15), slice(0, 5, 2)) == slice(10, 15, 2)\n\n assert fuse_slice((slice(100, 200),), (None, slice(10, 20))) == (\n None,\n slice(110, 120),\n )\n assert fuse_slice((slice(100, 200),), (slice(10, 20), None)) == (\n slice(110, 120),\n None,\n )\n assert fuse_slice((1,), (None,)) == (1, None)\n assert fuse_slice((1, slice(10, 20)), (None, None, 3, None)) == (\n 1,\n None,\n None,\n 13,\n None,\n )\n\n with pytest.raises(NotImplementedError):\n fuse_slice(slice(10, 15, 2), -1)\n # Regression test for #3076\n with pytest.raises(NotImplementedError):\n fuse_slice(None, np.array([0, 0]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_with_lists_test_fuse_slice_with_lists.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slice_with_lists_test_fuse_slice_with_lists.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 232, "span_ids": ["test_fuse_slice_with_lists"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_slice_with_lists():\n assert fuse_slice(slice(10, 20, 2), [1, 2, 3]) == [12, 14, 16]\n assert fuse_slice([10, 20, 30, 40, 50], [3, 1, 2]) == [40, 20, 30]\n assert fuse_slice([10, 20, 30, 40, 50], 3) == 40\n assert fuse_slice([10, 20, 30, 40, 50], -1) == 50\n assert fuse_slice([10, 20, 30, 40, 50], slice(1, None, 2)) == [20, 40]\n assert fuse_slice(\n (slice(None), slice(0, 10), [1, 2, 3]), (slice(None), slice(1, 5), slice(None))\n ) == (slice(0, None), slice(1, 5), [1, 2, 3])\n assert fuse_slice(\n (slice(None), slice(None), [1, 2, 3]), (slice(None), slice(1, 5), 1)\n ) == (slice(0, None), slice(1, 5), 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_nonfusible_fancy_indexing_test_nonfusible_fancy_indexing.for_a_b_in_cases_.with_pytest_raises_NotImp.fuse_slice_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_nonfusible_fancy_indexing_test_nonfusible_fancy_indexing.for_a_b_in_cases_.with_pytest_raises_NotImp.fuse_slice_a_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 235, "end_line": 247, "span_ids": ["test_nonfusible_fancy_indexing"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nonfusible_fancy_indexing():\n nil = slice(None)\n cases = [ # x[:, list, :][int, :, :]\n ((nil, [1, 2, 3], nil), (0, nil, nil)),\n # x[int, :, :][:, list, :]\n ((0, nil, nil), (nil, [1, 2, 3], nil)),\n # x[:, list, :, :][:, :, :, int]\n ((nil, [1, 2], nil, nil), (nil, nil, nil, 0)),\n ]\n\n for a, b in cases:\n with pytest.raises(NotImplementedError):\n fuse_slice(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_hard_fuse_slice_cases_test_dont_fuse_numpy_arrays.for_chunks_in_5_10_.assert_sum_isinstance_v_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_hard_fuse_slice_cases_test_dont_fuse_numpy_arrays.for_chunks_in_5_10_.assert_sum_isinstance_v_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 250, "end_line": 263, "span_ids": ["test_hard_fuse_slice_cases", "test_dont_fuse_numpy_arrays"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hard_fuse_slice_cases():\n dsk = {\n \"x\": (getter, (getter, \"x\", (None, slice(None, None))), (slice(None, None), 5))\n }\n assert optimize_slices(dsk) == {\"x\": (getter, \"x\", (None, 5))}\n\n\ndef test_dont_fuse_numpy_arrays():\n x = np.ones(10)\n for chunks in [(5,), (10,)]:\n y = da.from_array(x, chunks=(10,))\n\n dsk = y.__dask_optimize__(y.dask, y.__dask_keys__())\n assert sum(isinstance(v, np.ndarray) for v in dsk.values()) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slices_with_alias_test_fuse_slices_with_alias.assert_dsk2_fused_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_slices_with_alias_test_fuse_slices_with_alias.assert_dsk2_fused_key_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 286, "end_line": 297, "span_ids": ["test_fuse_slices_with_alias"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_slices_with_alias():\n dsk = {\n \"x\": np.arange(16).reshape((4, 4)),\n (\"dx\", 0, 0): (getter, \"x\", (slice(0, 4), slice(0, 4))),\n (\"alias\", 0, 0): (\"dx\", 0, 0),\n (\"dx2\", 0): (getitem, (\"alias\", 0, 0), (slice(None), 0)),\n }\n keys = [(\"dx2\", 0)]\n dsk2 = optimize(dsk, keys)\n assert len(dsk2) == 3\n fused_key = (dsk2.keys() - {\"x\", (\"dx2\", 0)}).pop()\n assert dsk2[fused_key] == (getter, \"x\", (slice(0, 4), 0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_dont_fuse_fancy_indexing_in_getter_nofancy_test_dont_fuse_fancy_indexing_in_getter_nofancy.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_dont_fuse_fancy_indexing_in_getter_nofancy_test_dont_fuse_fancy_indexing_in_getter_nofancy.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 299, "end_line": 310, "span_ids": ["test_dont_fuse_fancy_indexing_in_getter_nofancy"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_fuse_fancy_indexing_in_getter_nofancy():\n dsk = {\n \"a\": (\n getitem,\n (getter_nofancy, \"x\", (slice(10, 20, None), slice(100, 200, None))),\n ([1, 3], slice(50, 60, None)),\n )\n }\n assert optimize_slices(dsk) == dsk\n\n dsk = {\"a\": (getitem, (getter_nofancy, \"x\", [1, 2, 3]), 0)}\n assert optimize_slices(dsk) == dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getter_with_asarray_test_fuse_getter_with_asarray.assert_eq_z_x_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_getter_with_asarray_test_fuse_getter_with_asarray.assert_eq_z_x_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 313, "end_line": 331, "span_ids": ["test_fuse_getter_with_asarray"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [10, 5, 3])\ndef test_fuse_getter_with_asarray(chunks):\n x = np.ones(10) * 1234567890\n y = da.ones(10, chunks=chunks)\n z = x + y\n dsk = z.__dask_optimize__(z.dask, z.__dask_keys__())\n assert any(v is x for v in dsk.values())\n for v in dsk.values():\n s = str(v)\n assert s.count(\"getitem\") + s.count(\"getter\") <= 1\n if v is not x:\n assert \"1234567890\" not in s\n n_getters = len([v for v in dsk.values() if v[0] in (getitem, getter)])\n if y.npartitions > 1:\n assert n_getters == y.npartitions\n else:\n assert n_getters == 0\n\n assert_eq(z, x + 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy.for_orig_final_in_opts_.assert_optimize_slices_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy_test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy.for_orig_final_in_opts_.assert_optimize_slices_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 357, "span_ids": ["test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"get,remove\", [(getter, False), (getter_nofancy, False), (getitem, True)]\n)\ndef test_remove_no_op_slices_if_get_is_not_getter_or_getter_nofancy(get, remove):\n # Test that no-op slices are removed as long as get is not getter or\n # getter_nofancy. This ensures that `get` calls are always made in all\n # tasks created by `from_array`, even after optimization\n null = slice(0, None)\n opts = [\n (\n (get, \"x\", null, False, False),\n \"x\" if remove else (get, \"x\", null, False, False),\n ),\n (\n (getitem, (get, \"x\", null, False, False), null),\n \"x\" if remove else (get, \"x\", null, False, False),\n ),\n (\n (getitem, (get, \"x\", (null, null), False, False), ()),\n \"x\" if remove else (get, \"x\", (null, null), False, False),\n ),\n ]\n for orig, final in opts:\n assert optimize_slices({\"a\": orig}) == {\"a\": final}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_turn_off_fusion_test_turn_off_fusion.assert_len_a_len_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_turn_off_fusion_test_turn_off_fusion.assert_len_a_len_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 360, "end_line": 371, "span_ids": ["test_turn_off_fusion"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"blockwise fusion does not respect this, which is ok\")\ndef test_turn_off_fusion():\n x = da.ones(10, chunks=(5,))\n y = da.sum(x + 1 + 2 + 3)\n\n a = y.__dask_optimize__(y.dask, y.__dask_keys__())\n\n with dask.config.set({\"optimization.fuse.ave-width\": 0}):\n b = y.__dask_optimize__(y.dask, y.__dask_keys__())\n\n assert dask.get(a, y.__dask_keys__()) == dask.get(b, y.__dask_keys__())\n assert len(a) < len(b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_gh3937_test_gh3937.y_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_gh3937_test_gh3937.y_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 374, "end_line": 383, "span_ids": ["test_gh3937"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gh3937():\n # test for github issue #3937\n x = da.from_array([1, 2, 3.0], (2,))\n x = da.concatenate((x, [x[-1]]))\n y = x.rechunk((2,))\n # This will produce Integral type indices that are not ints (np.int64), failing\n # the optimizer\n y = da.coarsen(np.sum, y, {0: 2})\n # How to trigger the optimizer explicitly?\n y.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_test_overlap_internal_asymmetric.assert_same_keys_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_test_overlap_internal_asymmetric.assert_same_keys_overlap_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 72, "end_line": 94, "span_ids": ["test_overlap_internal_asymmetric"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_internal_asymmetric():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n result = overlap_internal(d, {0: (2, 0), 1: (1, 0)})\n assert result.chunks == ((4, 6), (4, 5))\n\n expected = np.array(\n [\n [0, 1, 2, 3, 3, 4, 5, 6, 7],\n [8, 9, 10, 11, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 27, 28, 29, 30, 31],\n [16, 17, 18, 19, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 59, 60, 61, 62, 63],\n ]\n )\n assert_eq(result, expected)\n assert same_keys(overlap_internal(d, {0: (2, 0), 1: (1, 0)}), result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_small_test_overlap_internal_asymmetric_small.assert_same_keys_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_internal_asymmetric_small_test_overlap_internal_asymmetric_small.assert_same_keys_overlap_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 97, "end_line": 135, "span_ids": ["test_overlap_internal_asymmetric_small"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_internal_asymmetric_small():\n x = np.arange(32).reshape((2, 16))\n d = da.from_array(x, chunks=(2, 4))\n\n result = overlap_internal(d, {0: (0, 0), 1: (1, 1)})\n assert result.chunks == ((2,), (5, 6, 6, 5))\n\n expected = np.array(\n [\n [0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15],\n [\n 16,\n 17,\n 18,\n 19,\n 20,\n 19,\n 20,\n 21,\n 22,\n 23,\n 24,\n 23,\n 24,\n 25,\n 26,\n 27,\n 28,\n 27,\n 28,\n 29,\n 30,\n 31,\n ],\n ]\n )\n\n assert_eq(result, expected)\n assert same_keys(overlap_internal(d, {0: (0, 0), 1: (1, 1)}), result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 120, "end_line": 136, "span_ids": ["test_periodic", "test_trim_internal"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_trim_internal():\n d = da.ones((40, 60), chunks=(10, 10))\n e = trim_internal(d, axes={0: 1, 1: 2}, boundary=\"reflect\")\n\n assert e.chunks == ((8, 8, 8, 8), (6, 6, 6, 6, 6, 6))\n\n\ndef test_periodic():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n e = periodic(d, axis=0, depth=2)\n assert e.shape[0] == d.shape[0] + 4\n assert e.shape[1] == d.shape[1]\n\n assert_eq(e[1, :], d[-1, :])\n assert_eq(e[0, :], d[-2, :])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_reflect_test_reflect.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_reflect_test_reflect.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 167, "span_ids": ["test_reflect"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reflect():\n x = np.arange(10)\n d = da.from_array(x, chunks=(5, 5))\n\n e = reflect(d, axis=0, depth=2)\n expected = np.array([1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8])\n assert_eq(e, expected)\n\n e = reflect(d, axis=0, depth=1)\n expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])\n assert_eq(e, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_test_nearest.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_test_nearest.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 170, "end_line": 180, "span_ids": ["test_nearest"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nearest():\n x = np.arange(10)\n d = da.from_array(x, chunks=(5, 5))\n\n e = nearest(d, axis=0, depth=2)\n expected = np.array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9])\n assert_eq(e, expected)\n\n e = nearest(d, axis=0, depth=1)\n expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])\n assert_eq(e, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_constant_test_constant.assert_eq_e_1_np_on": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_constant_test_constant.assert_eq_e_1_np_on", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 183, "end_line": 192, "span_ids": ["test_constant"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_constant():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n e = constant(d, axis=0, depth=2, value=10)\n assert e.shape[0] == d.shape[0] + 4\n assert e.shape[1] == d.shape[1]\n\n assert_eq(e[1, :], np.ones(8, dtype=x.dtype) * 10)\n assert_eq(e[-1, :], np.ones(8, dtype=x.dtype) * 10)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_boundaries_test_boundaries.assert_eq_e_expected_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_boundaries_test_boundaries.assert_eq_e_expected_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 195, "end_line": 217, "span_ids": ["test_boundaries"], "tokens": 458}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_boundaries():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n e = boundaries(d, {0: 2, 1: 1}, {0: 0, 1: \"periodic\"})\n\n expected = np.array(\n [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [7, 0, 1, 2, 3, 4, 5, 6, 7, 0],\n [15, 8, 9, 10, 11, 12, 13, 14, 15, 8],\n [23, 16, 17, 18, 19, 20, 21, 22, 23, 16],\n [31, 24, 25, 26, 27, 28, 29, 30, 31, 24],\n [39, 32, 33, 34, 35, 36, 37, 38, 39, 32],\n [47, 40, 41, 42, 43, 44, 45, 46, 47, 40],\n [55, 48, 49, 50, 51, 52, 53, 54, 55, 48],\n [63, 56, 57, 58, 59, 60, 61, 62, 63, 56],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n )\n assert_eq(e, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_asymmetric_overlap_boundary_exception_test_map_overlap.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_asymmetric_overlap_boundary_exception_test_map_overlap.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 268, "end_line": 320, "span_ids": ["test_map_overlap", "test_asymmetric_overlap_boundary_exception"], "tokens": 583}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_asymmetric_overlap_boundary_exception():\n x = da.arange(10, chunks=5)\n with pytest.raises(NotImplementedError):\n x.map_overlap(\n lambda x: x + len(x), depth={0: (0, 2)}, boundary=\"reflect\", dtype=x.dtype\n )\n\n\ndef test_map_overlap():\n x = da.arange(10, chunks=5)\n y = x.map_overlap(lambda x: x + len(x), depth=2, dtype=x.dtype, boundary=\"reflect\")\n assert_eq(y, np.arange(10) + 5 + 2 + 2)\n\n x = da.arange(10, chunks=5)\n y = x.map_overlap(\n lambda x: x + len(x), depth=np.int64(2), dtype=x.dtype, boundary=\"reflect\"\n )\n assert all([(type(s) is int) for s in y.shape])\n assert_eq(y, np.arange(10) + 5 + 2 + 2)\n\n x = da.ones((10, 10), chunks=(3, 4))\n z = x.map_overlap(lambda x: x, depth={1: 5}, boundary=\"reflect\")\n assert z.chunks[0] == x.chunks[0] # don't rechunk the first dimension\n\n x = np.arange(16).reshape((4, 4))\n d = da.from_array(x, chunks=(2, 2))\n exp1 = d.map_overlap(\n lambda x: x + x.size, depth=1, dtype=d.dtype, boundary=\"reflect\"\n )\n exp2 = d.map_overlap(\n lambda x: x + x.size,\n depth={0: 1, 1: 1},\n boundary={0: \"reflect\", 1: \"none\"},\n dtype=d.dtype,\n )\n exp3 = d.map_overlap(\n lambda x: x + x.size, depth={1: 1}, boundary={1: \"reflect\"}, dtype=d.dtype\n )\n exp4 = d.map_overlap(\n lambda x: x + x.size,\n depth={1: (1, 0)},\n boundary={0: \"none\", 1: \"none\"},\n dtype=d.dtype,\n )\n assert_eq(exp1, x + 16)\n assert_eq(exp2, x + 12)\n assert_eq(exp3, x + 8)\n assert_eq(\n exp4,\n np.block(\n [[x[0:2, 0:2] + 4, x[0:2, 2:4] + 6], [x[2:4, 0:2] + 4, x[2:4, 2:4] + 6]]\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_variadic_test_map_overlap_multiarray_variadic.assert_all_x_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_variadic_test_map_overlap_multiarray_variadic.assert_all_x_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 451, "end_line": 476, "span_ids": ["test_map_overlap_multiarray_variadic"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_multiarray_variadic():\n # Test overlapping row slices from 3D arrays\n xs = [\n # Dim 0 will unify to chunks of size 4 for all:\n da.ones((12, 1, 1), chunks=((12,), 1, 1)),\n da.ones((12, 8, 1), chunks=((8, 4), 8, 1)),\n da.ones((12, 8, 4), chunks=((4, 8), 8, 4)),\n ]\n\n def func(*args):\n return np.array([sum(x.size for x in args)])\n\n x = da.map_overlap(\n func,\n *xs,\n chunks=(1,),\n depth=1,\n trim=False,\n drop_axis=[1, 2],\n boundary=\"reflect\",\n )\n\n # Each func call should get 4 rows from each array padded by 1 in each dimension\n size_per_slice = sum(np.pad(x[:4], 1, mode=\"constant\").size for x in xs)\n assert x.shape == (3,)\n assert all(x.compute() == size_per_slice)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_overlap_test_nearest_overlap.assert_array_almost_equal": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_nearest_overlap_test_nearest_overlap.assert_array_almost_equal", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 551, "end_line": 557, "span_ids": ["test_nearest_overlap"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nearest_overlap():\n a = np.arange(144).reshape(12, 12).astype(float)\n\n darr = da.from_array(a, chunks=(6, 6))\n garr = overlap(darr, depth={0: 5, 1: 5}, boundary={0: \"nearest\", 1: \"nearest\"})\n tarr = trim_internal(garr, {0: 5, 1: 5}, boundary=\"nearest\")\n assert_array_almost_equal(tarr, a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_one_chunk_along_axis_test_constant_boundaries.assert_b_chunks_darr_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_one_chunk_along_axis_test_constant_boundaries.assert_b_chunks_darr_c", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 524, "end_line": 535, "span_ids": ["test_constant_boundaries", "test_one_chunk_along_axis"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_one_chunk_along_axis():\n a = np.arange(2 * 9).reshape(2, 9)\n darr = da.from_array(a, chunks=((2,), (2, 2, 2, 3)))\n g = overlap(darr, depth=0, boundary=0)\n assert a.shape == g.shape\n\n\ndef test_constant_boundaries():\n a = np.arange(1 * 9).reshape(1, 9)\n darr = da.from_array(a, chunks=((1,), (2, 2, 2, 3)))\n b = boundaries(darr, {0: 0, 1: 0}, {0: 0, 1: 0})\n assert b.chunks == darr.chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_small_test_no_shared_keys_with_different_depths.da_compute_r_scheduler_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_small_test_no_shared_keys_with_different_depths.da_compute_r_scheduler_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 718, "end_line": 747, "span_ids": ["test_no_shared_keys_with_different_depths", "test_overlap_small"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_small():\n x = da.ones((10, 10), chunks=(5, 5))\n\n y = x.map_overlap(lambda x: x, depth=1, boundary=\"none\")\n assert len(y.dask) < 200\n\n y = x.map_overlap(lambda x: x, depth=1, boundary=\"none\")\n assert len(y.dask) < 100\n\n\ndef test_no_shared_keys_with_different_depths():\n da.random.seed(0)\n a = da.random.random((9, 9), chunks=(3, 3))\n\n def check(x):\n assert x.shape == (3, 3)\n return x\n\n r = [\n a.map_overlap(\n lambda a: a + 1,\n dtype=a.dtype,\n depth={j: int(i == j) for j in range(a.ndim)},\n boundary=\"none\",\n ).map_blocks(check, dtype=a.dtype)\n for i in range(a.ndim)\n ]\n\n assert set(r[0].dask) & set(r[1].dask) == set(a.dask)\n da.compute(*r, scheduler=\"single-threaded\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_small_test_overlap_few_dimensions_small.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_small_test_overlap_few_dimensions_small.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 750, "end_line": 766, "span_ids": ["test_overlap_few_dimensions_small"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_few_dimensions_small():\n x = da.ones((20, 20), chunks=(10, 10))\n\n a = x.map_overlap(lambda x: x, depth={0: 1}, boundary=\"none\")\n assert_eq(x, a)\n assert any(isinstance(k[1], float) for k in a.dask)\n assert all(isinstance(k[2], int) for k in a.dask)\n\n b = x.map_overlap(lambda x: x, depth={1: 1}, boundary=\"none\")\n assert_eq(x, b)\n assert all(isinstance(k[1], int) for k in b.dask)\n assert any(isinstance(k[2], float) for k in b.dask)\n\n c = x.map_overlap(lambda x: x, depth={0: 1, 1: 1}, boundary=\"none\")\n assert_eq(x, c)\n assert any(isinstance(k[1], float) for k in c.dask)\n assert any(isinstance(k[2], float) for k in c.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_test_overlap_few_dimensions.assert_len_c_dask_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_few_dimensions_test_overlap_few_dimensions.assert_len_c_dask_10_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 769, "end_line": 779, "span_ids": ["test_overlap_few_dimensions"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap_few_dimensions():\n x = da.ones((100, 100), chunks=(10, 10))\n\n a = x.map_overlap(lambda x: x, depth={0: 1}, boundary=\"none\")\n b = x.map_overlap(lambda x: x, depth={1: 1}, boundary=\"none\")\n c = x.map_overlap(lambda x: x, depth={0: 1, 1: 1}, boundary=\"none\")\n\n assert len(a.dask) == len(b.dask)\n assert len(a.dask) < len(c.dask)\n\n assert len(c.dask) < 10 * len(a.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentile_with_categoricals_test_percentile_with_categoricals.assert_same_keys_da_perce": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentile_with_categoricals_test_percentile_with_categoricals.assert_same_keys_da_perce", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_percentiles.py", "file_name": "test_percentiles.py", "file_type": "text/x-python", "category": "test", "start_line": 55, "end_line": 71, "span_ids": ["test_percentile_with_categoricals"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skip\ndef test_percentile_with_categoricals():\n try:\n import pandas as pd\n except ImportError:\n return\n x0 = pd.Categorical([\"Alice\", \"Bob\", \"Charlie\", \"Dennis\", \"Alice\", \"Alice\"])\n x1 = pd.Categorical([\"Alice\", \"Bob\", \"Charlie\", \"Dennis\", \"Alice\", \"Alice\"])\n\n dsk = {(\"x\", 0): x0, (\"x\", 1): x1}\n\n x = da.Array(dsk, \"x\", chunks=((6, 6),))\n\n p = da.percentile(x, [50])\n assert (p.compute().categories == x0.categories).all()\n assert (p.compute().codes == [0]).all()\n assert same_keys(da.percentile(x, [50]), da.percentile(x, [50]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_unknown_chunk_sizes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_unknown_chunk_sizes_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_percentiles.py", "file_name": "test_percentiles.py", "file_type": "text/x-python", "category": "test", "start_line": 109, "end_line": 121, "span_ids": ["test_unknown_chunk_sizes"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@percentile_internal_methods\ndef test_unknown_chunk_sizes(internal_method):\n x = da.random.random(1000, chunks=(100,))\n x._chunks = ((np.nan,) * 10,)\n\n result = da.percentile(x, 50, internal_method=internal_method).compute()\n assert 0.1 < result < 0.9\n\n a, b = da.percentile(x, [40, 60], internal_method=internal_method).compute()\n assert 0.1 < a < 0.9\n assert 0.1 < b < 0.9\n assert a < b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_pytest_test_determinisim_through_dask_values.assert_eq_samples_1_samp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_pytest_test_determinisim_through_dask_values.assert_eq_samples_1_samp", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 53, "span_ids": ["test_doc_randomstate", "imports", "test_RandomState", "test_serializability", "test_concurrency", "test_determinisim_through_dask_values"], "tokens": 399}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\n\nimport dask\nimport dask.array as da\nfrom dask.array.core import Array\nfrom dask.array.random import exponential, normal, random\nfrom dask.array.utils import assert_eq\nfrom dask.multiprocessing import _dumps, _loads\nfrom dask.utils import key_split\n\n\ndef test_RandomState():\n state = da.random.RandomState(5)\n x = state.normal(10, 1, size=10, chunks=5)\n assert_eq(x, x)\n\n state = da.random.RandomState(5)\n y = state.normal(10, 1, size=10, chunks=5)\n assert_eq(x, y)\n\n\ndef test_concurrency():\n state = da.random.RandomState(5)\n x = state.normal(10, 1, size=10, chunks=2)\n\n state = da.random.RandomState(5)\n y = state.normal(10, 1, size=10, chunks=2)\n assert (x.compute(scheduler=\"processes\") == y.compute(scheduler=\"processes\")).all()\n\n\ndef test_doc_randomstate():\n assert \"mean\" in da.random.RandomState(5).normal.__doc__\n\n\ndef test_serializability():\n state = da.random.RandomState(5)\n x = state.normal(10, 1, size=10, chunks=5)\n\n y = _loads(_dumps(x))\n\n assert_eq(x, y)\n\n\ndef test_determinisim_through_dask_values():\n samples_1 = da.random.RandomState(42).normal(size=1000, chunks=10)\n samples_2 = da.random.RandomState(42).normal(size=1000, chunks=10)\n\n assert set(samples_1.dask) == set(samples_2.dask)\n assert_eq(samples_1, samples_2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_randomstate_consistent_names_test_randomstate_consistent_names.assert_sorted_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_randomstate_consistent_names_test_randomstate_consistent_names.assert_sorted_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 56, "end_line": 64, "span_ids": ["test_randomstate_consistent_names"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_randomstate_consistent_names():\n state1 = da.random.RandomState(42)\n state2 = da.random.RandomState(42)\n assert sorted(state1.normal(size=(100, 100), chunks=(10, 10)).dask) == sorted(\n state2.normal(size=(100, 100), chunks=(10, 10)).dask\n )\n assert sorted(\n state1.normal(size=100, loc=4.5, scale=5.0, chunks=10).dask\n ) == sorted(state2.normal(size=100, loc=4.5, scale=5.0, chunks=10).dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_test_parametrized_random_function.assert_len_y_90": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_test_parametrized_random_function.assert_len_y_90", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 67, "end_line": 90, "span_ids": ["test_parametrized_random_function", "test_random"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random():\n a = random((10, 10), chunks=(5, 5))\n assert isinstance(a, Array)\n assert isinstance(a.name, str) and a.name\n assert a.shape == (10, 10)\n assert a.chunks == ((5, 5), (5, 5))\n\n x = set(np.array(a).flat)\n\n assert len(x) > 90\n\n\ndef test_parametrized_random_function():\n a = exponential(1000, (10, 10), chunks=(5, 5))\n assert isinstance(a, Array)\n assert isinstance(a.name, str) and a.name\n assert a.shape == (10, 10)\n assert a.chunks == ((5, 5), (5, 5))\n\n x = np.array(a)\n assert 10 < x.mean() < 100000\n\n y = set(x.flat)\n assert len(y) > 90", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_kwargs_test_consistent_across_sizes.assert_eq_x1_x3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_kwargs_test_consistent_across_sizes.assert_eq_x1_x3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 135, "span_ids": ["test_consistent_across_sizes", "test_kwargs", "test_docs", "test_can_make_really_big_random_array", "test_random_seed", "test_unique_names"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_kwargs():\n a = normal(loc=10.0, scale=0.1, size=(10, 10), chunks=(5, 5))\n assert isinstance(a, Array)\n x = np.array(a)\n assert 8 < x.mean() < 12\n\n\ndef test_unique_names():\n a = random((10, 10), chunks=(5, 5))\n b = random((10, 10), chunks=(5, 5))\n\n assert a.name != b.name\n\n\ndef test_docs():\n assert \"exponential\" in exponential.__doc__\n assert \"exponential\" in exponential.__name__\n assert \"# doctest: +SKIP\" in normal.__doc__\n\n\ndef test_can_make_really_big_random_array():\n normal(10, 1, (1000000, 1000000), chunks=(100000, 100000))\n\n\ndef test_random_seed():\n da.random.seed(123)\n x = da.random.normal(size=10, chunks=5)\n y = da.random.normal(size=10, chunks=5)\n\n da.random.seed(123)\n a = da.random.normal(size=10, chunks=5)\n b = da.random.normal(size=10, chunks=5)\n\n assert_eq(x, a)\n assert_eq(y, b)\n\n\ndef test_consistent_across_sizes():\n x1 = da.random.RandomState(123).random(20, chunks=20)\n x2 = da.random.RandomState(123).random(100, chunks=20)[:20]\n x3 = da.random.RandomState(123).random(200, chunks=20)[:20]\n assert_eq(x1, x2)\n assert_eq(x1, x3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_all_test_random_all.da_random_standard_t_2_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_random_all_test_random_all.da_random_standard_t_2_s", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 177, "span_ids": ["test_random_all"], "tokens": 666}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random_all():\n da.random.beta(1, 2, size=5, chunks=3).compute()\n da.random.binomial(10, 0.5, size=5, chunks=3).compute()\n da.random.chisquare(1, size=5, chunks=3).compute()\n da.random.exponential(1, size=5, chunks=3).compute()\n da.random.f(1, 2, size=5, chunks=3).compute()\n da.random.gamma(5, 1, size=5, chunks=3).compute()\n da.random.geometric(1, size=5, chunks=3).compute()\n da.random.gumbel(1, size=5, chunks=3).compute()\n da.random.hypergeometric(1, 2, 3, size=5, chunks=3).compute()\n da.random.laplace(size=5, chunks=3).compute()\n da.random.logistic(size=5, chunks=3).compute()\n da.random.lognormal(size=5, chunks=3).compute()\n da.random.logseries(0.5, size=5, chunks=3).compute()\n da.random.multinomial(20, [1 / 6.0] * 6, size=5, chunks=3).compute()\n da.random.negative_binomial(5, 0.5, size=5, chunks=3).compute()\n da.random.noncentral_chisquare(2, 2, size=5, chunks=3).compute()\n\n da.random.noncentral_f(2, 2, 3, size=5, chunks=3).compute()\n da.random.normal(2, 2, size=5, chunks=3).compute()\n da.random.pareto(1, size=5, chunks=3).compute()\n da.random.poisson(size=5, chunks=3).compute()\n\n da.random.power(1, size=5, chunks=3).compute()\n da.random.rayleigh(size=5, chunks=3).compute()\n da.random.random_sample(size=5, chunks=3).compute()\n\n da.random.triangular(1, 2, 3, size=5, chunks=3).compute()\n da.random.uniform(size=5, chunks=3).compute()\n da.random.vonmises(2, 3, size=5, chunks=3).compute()\n da.random.wald(1, 2, size=5, chunks=3).compute()\n\n da.random.weibull(2, size=5, chunks=3).compute()\n da.random.zipf(2, size=5, chunks=3).compute()\n\n da.random.standard_cauchy(size=5, chunks=3).compute()\n da.random.standard_exponential(size=5, chunks=3).compute()\n da.random.standard_gamma(2, size=5, chunks=3).compute()\n da.random.standard_normal(size=5, chunks=3).compute()\n da.random.standard_t(2, size=5, chunks=3).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_array_broadcasting_test_multinomial.for_size_chunks_in_5_.assert_x_shape_y_shape": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_array_broadcasting_test_multinomial.for_size_chunks_in_5_.assert_x_shape_y_shape", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 180, "end_line": 226, "span_ids": ["test_multinomial", "test_array_broadcasting"], "tokens": 580}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not hasattr(np, \"broadcast_to\"), reason='requires numpy 1.10 method \"broadcast_to\"'\n)\ndef test_array_broadcasting():\n arr = np.arange(6).reshape((2, 3))\n daones = da.ones((2, 3, 4), chunks=3)\n assert da.random.poisson(arr, chunks=3).compute().shape == (2, 3)\n\n for x in (arr, daones):\n y = da.random.normal(x, 2, chunks=3)\n assert y.shape == x.shape\n assert y.compute().shape == x.shape\n\n y = da.random.normal(daones, 2, chunks=3)\n assert set(daones.dask).issubset(set(y.dask))\n\n assert da.random.normal(\n np.ones((1, 4)), da.ones((2, 3, 4), chunks=(2, 3, 4)), chunks=(2, 3, 4)\n ).compute().shape == (2, 3, 4)\n assert da.random.normal(\n scale=np.ones((1, 4)),\n loc=da.ones((2, 3, 4), chunks=(2, 3, 4)),\n size=(2, 2, 3, 4),\n chunks=(2, 2, 3, 4),\n ).compute().shape == (2, 2, 3, 4)\n\n with pytest.raises(ValueError):\n da.random.normal(arr, np.ones((3, 1)), size=(2, 3, 4), chunks=3)\n\n for o in (np.ones(100), da.ones(100, chunks=(50,)), 1):\n a = da.random.normal(1000 * o, 0.01, chunks=(50,))\n assert 800 < a.mean().compute() < 1200\n\n # ensure that mis-matched chunks align well\n x = np.arange(10) ** 3\n y = da.from_array(x, chunks=(1,))\n z = da.random.normal(y, 0.01, chunks=(10,))\n\n assert 0.8 < z.mean().compute() / x.mean() < 1.2\n\n\ndef test_multinomial():\n for size, chunks in [(5, 3), ((5, 4), (2, 3))]:\n x = da.random.multinomial(20, [1 / 6.0] * 6, size=size, chunks=chunks)\n y = np.random.multinomial(20, [1 / 6.0] * 6, size=size)\n\n assert x.shape == y.shape == x.compute().shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_choice_test_choice.assert_len_res_len_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_choice_test_choice.assert_len_res_len_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 234, "end_line": 292, "span_ids": ["test_choice"], "tokens": 623}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_choice():\n np_dtype = np.random.choice(1, size=()).dtype\n size = (10, 3)\n chunks = 4\n x = da.random.choice(3, size=size, chunks=chunks)\n assert x.dtype == np_dtype\n assert x.shape == size\n res = x.compute()\n assert res.dtype == np_dtype\n assert res.shape == size\n\n py_a = [1, 3, 5, 7, 9]\n np_a = np.array(py_a, dtype=\"f8\")\n da_a = da.from_array(np_a, chunks=2)\n\n for a in [py_a, np_a, da_a]:\n x = da.random.choice(a, size=size, chunks=chunks)\n res = x.compute()\n expected_dtype = np.asarray(a).dtype\n assert x.dtype == expected_dtype\n assert res.dtype == expected_dtype\n assert set(np.unique(res)).issubset(np_a)\n\n np_p = np.array([0, 0.2, 0.2, 0.3, 0.3])\n da_p = da.from_array(np_p, chunks=2)\n\n for a, p in [(da_a, np_p), (np_a, da_p)]:\n x = da.random.choice(a, size=size, chunks=chunks, p=p)\n res = x.compute()\n assert x.dtype == np_a.dtype\n assert res.dtype == np_a.dtype\n assert set(np.unique(res)).issubset(np_a[1:])\n\n np_dtype = np.random.choice(1, size=(), p=np.array([1])).dtype\n x = da.random.choice(5, size=size, chunks=chunks, p=np_p)\n res = x.compute()\n assert x.dtype == np_dtype\n assert res.dtype == np_dtype\n\n errs = [\n (-1, None), # negative a\n (np_a[:, None], None), # a must be 1D\n (np_a, np_p[:, None]), # p must be 1D\n (np_a, np_p[:-2]), # a and p must match\n (3, np_p), # a and p must match\n (4, [0.2, 0.2, 0.3]),\n ] # p must sum to 1\n\n for (a, p) in errs:\n with pytest.raises(ValueError):\n da.random.choice(a, size=size, chunks=chunks, p=p)\n\n with pytest.raises(NotImplementedError):\n da.random.choice(da_a, size=size, chunks=chunks, replace=False)\n\n # Want to make sure replace=False works for a single-partition output array\n x = da.random.choice(da_a, size=da_a.shape[0], chunks=-1, replace=False)\n res = x.compute()\n assert len(res) == len(np.unique(res))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_create_with_auto_dimensions_test_permutation.assert_x_shape_100_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_create_with_auto_dimensions_test_permutation.assert_x_shape_100_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 295, "end_line": 327, "span_ids": ["test_create_with_auto_dimensions", "test_names", "test_permutation"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_create_with_auto_dimensions():\n with dask.config.set({\"array.chunk-size\": \"128MiB\"}):\n x = da.random.random((10000, 10000), chunks=(-1, \"auto\"))\n assert x.chunks == ((10000,), (1250,) * 8)\n\n y = da.random.random((10000, 10000), chunks=\"auto\")\n assert y.chunks == ((2500,) * 4, (2500,) * 4)\n\n\ndef test_names():\n name = da.random.normal(0, 1, size=(1000,), chunks=(500,)).name\n\n assert name.startswith(\"normal\")\n assert len(key_split(name)) < 10\n\n\ndef test_permutation():\n x = da.arange(12, chunks=3)\n y = da.random.permutation(x)\n\n assert y.shape == x.shape\n assert y.dtype == x.dtype\n\n y.compute() # smoke test\n\n a = da.random.RandomState(0)\n b = da.random.RandomState(0)\n r1 = a.permutation(x)\n r2 = b.permutation(x)\n assert_eq(r1, r2)\n\n x = da.random.permutation(100)\n assert x.shape == (100,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_external_randomstate_class_test_external_randomstate_class.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_external_randomstate_class_test_external_randomstate_class.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 330, "end_line": 350, "span_ids": ["test_external_randomstate_class"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_external_randomstate_class():\n randomgen = pytest.importorskip(\"randomgen\")\n\n rs = da.random.RandomState(\n RandomState=lambda seed: randomgen.RandomGenerator(randomgen.DSFMT(seed))\n )\n x = rs.normal(0, 1, size=10, chunks=(5,))\n assert_eq(x, x)\n\n rs = da.random.RandomState(\n RandomState=lambda seed: randomgen.RandomGenerator(randomgen.DSFMT(seed)),\n seed=123,\n )\n a = rs.normal(0, 1, size=10, chunks=(5,))\n rs = da.random.RandomState(\n RandomState=lambda seed: randomgen.RandomGenerator(randomgen.DSFMT(seed)),\n seed=123,\n )\n b = rs.normal(0, 1, size=10, chunks=(5,))\n assert a.name == b.name\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_auto_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_random.py_test_auto_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 348, "end_line": 374, "span_ids": ["test_randomstate_kwargs", "test_randint_dtype", "test_auto_chunks", "test_raises_bad_kwarg"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_auto_chunks():\n with dask.config.set({\"array.chunk-size\": \"50 MiB\"}):\n x = da.random.random((10000, 10000))\n assert 4 < x.npartitions < 32\n\n\ndef test_randint_dtype():\n x = da.random.randint(0, 255, size=10, dtype=\"uint8\")\n assert_eq(x, x)\n assert x.dtype == \"uint8\"\n assert x.compute().dtype == \"uint8\"\n\n\ndef test_raises_bad_kwarg():\n with pytest.raises(Exception) as info:\n da.random.standard_normal(size=(10,), dtype=\"float64\")\n\n assert \"dtype\" in str(info.value)\n\n\ndef test_randomstate_kwargs():\n cupy = pytest.importorskip(\"cupy\")\n\n rs = da.random.RandomState(RandomState=cupy.random.RandomState)\n x = rs.standard_normal((10, 5), dtype=np.float32)\n assert x.dtype == np.float32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_1_test_intersect_1.assert_answer_cross": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_1_test_intersect_1.assert_answer_cross", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 74, "span_ids": ["test_intersect_1"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_1():\n \"\"\"Convert 1 D chunks\"\"\"\n old = ((10, 10, 10, 10, 10),)\n new = ((25, 5, 20),)\n answer = [\n (((0, slice(0, 10)),), ((1, slice(0, 10)),), ((2, slice(0, 5)),)),\n (((2, slice(5, 10)),),),\n (((3, slice(0, 10)),), ((4, slice(0, 10)),)),\n ]\n cross = list(intersect_chunks(old_chunks=old, new_chunks=new))\n assert answer == cross", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_2_test_intersect_2.assert_answer_cross": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_2_test_intersect_2.assert_answer_cross", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 88, "span_ids": ["test_intersect_2"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_2():\n \"\"\"Convert 1 D chunks\"\"\"\n old = ((20, 20, 20, 20, 20),)\n new = ((58, 4, 20, 18),)\n answer = [\n (((0, slice(0, 20)),), ((1, slice(0, 20)),), ((2, slice(0, 18)),)),\n (((2, slice(18, 20)),), ((3, slice(0, 2)),)),\n (((3, slice(2, 20)),), ((4, slice(0, 2)),)),\n (((4, slice(2, 20)),),),\n ]\n cross = list(intersect_chunks(old_chunks=old, new_chunks=new))\n assert answer == cross", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_1d_test_rechunk_2d.assert_np_all_x2_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_1d_test_rechunk_2d.assert_np_all_x2_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 82, "end_line": 99, "span_ids": ["test_rechunk_2d", "test_rechunk_1d"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_1d():\n \"\"\"Try rechunking a random 1d matrix\"\"\"\n a = np.random.uniform(0, 1, 30)\n x = da.from_array(a, chunks=((10,) * 3,))\n new = ((5,) * 6,)\n x2 = rechunk(x, chunks=new)\n assert x2.chunks == new\n assert np.all(x2.compute() == a)\n\n\ndef test_rechunk_2d():\n \"\"\"Try rechunking a random 2d matrix\"\"\"\n a = np.random.uniform(0, 1, 300).reshape((10, 30))\n x = da.from_array(a, chunks=((1, 2, 3, 4), (5,) * 6))\n new = ((5, 5), (15,) * 2)\n x2 = rechunk(x, chunks=new)\n assert x2.chunks == new\n assert np.all(x2.compute() == a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_4d_test_rechunk_expand.assert_np_all_y_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_4d_test_rechunk_expand.assert_np_all_y_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 102, "end_line": 117, "span_ids": ["test_rechunk_expand", "test_rechunk_4d"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_4d():\n \"\"\"Try rechunking a random 4d matrix\"\"\"\n old = ((5, 5),) * 4\n a = np.random.uniform(0, 1, 10000).reshape((10,) * 4)\n x = da.from_array(a, chunks=old)\n new = ((10,),) * 4\n x2 = rechunk(x, chunks=new)\n assert x2.chunks == new\n assert np.all(x2.compute() == a)\n\n\ndef test_rechunk_expand():\n a = np.random.uniform(0, 1, 100).reshape((10, 10))\n x = da.from_array(a, chunks=(5, 5))\n y = x.rechunk(chunks=((3, 3, 3, 1), (3, 3, 3, 1)))\n assert np.all(y.compute() == a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_expand2_test_rechunk_expand2.for_off_off2_in_product_.if_a_off_off2_0_.assert_np_all_y_orig_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_expand2_test_rechunk_expand2.for_off_off2_in_product_.if_a_off_off2_0_.assert_np_all_y_orig_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 160, "end_line": 171, "span_ids": ["test_rechunk_expand2"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_expand2():\n (a, b) = (3, 2)\n orig = np.random.uniform(0, 1, a**b).reshape((a,) * b)\n for off, off2 in product(range(1, a - 1), range(1, a - 1)):\n old = ((a - off, off),) * b\n x = da.from_array(orig, chunks=old)\n new = ((a - off2, off2),) * b\n assert np.all(x.rechunk(chunks=new).compute() == orig)\n if a - off - off2 > 0:\n new = ((off, a - off2 - off, off2),) * b\n y = x.rechunk(chunks=new).compute()\n assert np.all(y == orig)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_method_test_rechunk_method.assert_np_all_x2_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_method_test_rechunk_method.assert_np_all_x2_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 143, "end_line": 151, "span_ids": ["test_rechunk_method"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_method():\n \"\"\"Test rechunking can be done as a method of dask array.\"\"\"\n old = ((5, 2, 3),) * 4\n new = ((3, 3, 3, 1),) * 4\n a = np.random.uniform(0, 1, 10000).reshape((10,) * 4)\n x = da.from_array(a, chunks=old)\n x2 = x.rechunk(chunks=new)\n assert x2.chunks == new\n assert np.all(x2.compute() == a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_blockshape_test_dtype.assert_x_rechunk_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_blockshape_test_dtype.assert_x_rechunk_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 154, "end_line": 168, "span_ids": ["test_rechunk_blockshape", "test_dtype"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_blockshape():\n \"\"\"Test that blockshape can be used.\"\"\"\n new_shape, new_chunks = (10, 10), (4, 3)\n new_blockdims = normalize_chunks(new_chunks, new_shape)\n old_chunks = ((4, 4, 2), (3, 3, 3, 1))\n a = np.random.uniform(0, 1, 100).reshape((10, 10))\n x = da.from_array(a, chunks=old_chunks)\n check1 = rechunk(x, chunks=new_chunks)\n assert check1.chunks == new_blockdims\n assert np.all(check1.compute() == a)\n\n\ndef test_dtype():\n x = da.ones(5, chunks=(2,))\n assert x.rechunk(chunks=(1,)).dtype == x.dtype", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_dict_test_rechunk_with_dict.assert_y_chunks_24_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_dict_test_rechunk_with_dict.assert_y_chunks_24_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 162, "end_line": 173, "span_ids": ["test_rechunk_with_dict"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_with_dict():\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk(chunks={0: 12})\n assert y.chunks == ((12, 12), (8, 8, 8))\n\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk(chunks={0: (12, 12)})\n assert y.chunks == ((12, 12), (8, 8, 8))\n\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk(chunks={0: -1})\n assert y.chunks == ((24,), (8, 8, 8))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_empty_input_test_rechunk_intermediates.assert_len_y_dask_30": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_empty_input_test_rechunk_intermediates.assert_len_y_dask_30", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 176, "end_line": 254, "span_ids": ["test_rechunk_zero_dim_array_II", "test_rechunk_empty_array", "test_rechunk_with_null_dimensions", "test_rechunk_with_integer", "test_rechunk_with_empty_input", "test_rechunk_minus_one", "test_rechunk_with_zero_placeholders", "test_rechunk_same", "test_rechunk_0d", "test_rechunk_intermediates", "test_rechunk_zero_dim_array", "test_rechunk_empty"], "tokens": 717}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_with_empty_input():\n x = da.ones((24, 24), chunks=(4, 8))\n assert x.rechunk(chunks={}).chunks == x.chunks\n pytest.raises(ValueError, lambda: x.rechunk(chunks=()))\n\n\ndef test_rechunk_with_null_dimensions():\n x = da.from_array(np.ones((24, 24)), chunks=(4, 8))\n assert x.rechunk(chunks=(None, 4)).chunks == da.ones((24, 24), chunks=(4, 4)).chunks\n\n\ndef test_rechunk_with_integer():\n x = da.from_array(np.arange(5), chunks=4)\n y = x.rechunk(3)\n assert y.chunks == ((3, 2),)\n assert (x.compute() == y.compute()).all()\n\n\ndef test_rechunk_0d():\n a = np.array(42)\n x = da.from_array(a, chunks=())\n y = x.rechunk(())\n assert y.chunks == ()\n assert y.compute() == a\n\n\n@pytest.mark.parametrize(\n \"arr\", [da.array([]), da.array([[], []]), da.array([[[]], [[]]])]\n)\ndef test_rechunk_empty_array(arr):\n arr.rechunk()\n assert arr.size == 0\n\n\ndef test_rechunk_empty():\n x = da.ones((0, 10), chunks=(5, 5))\n y = x.rechunk((2, 2))\n assert y.chunks == ((0,), (2,) * 5)\n assert_eq(x, y)\n\n\ndef test_rechunk_zero_dim_array():\n x = da.zeros((4, 0), chunks=3)\n y = x.rechunk({0: 4})\n assert y.chunks == ((4,), (0,))\n assert_eq(x, y)\n\n\ndef test_rechunk_zero_dim_array_II():\n x = da.zeros((4, 0, 6, 10), chunks=3)\n y = x.rechunk({0: 4, 2: 2})\n assert y.chunks == ((4,), (0,), (2, 2, 2), (3, 3, 3, 1))\n assert_eq(x, y)\n\n\ndef test_rechunk_same():\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk(x.chunks)\n assert x is y\n\n\ndef test_rechunk_with_zero_placeholders():\n x = da.ones((24, 24), chunks=((12, 12), (24, 0)))\n y = da.ones((24, 24), chunks=((12, 12), (12, 12)))\n y = y.rechunk(((12, 12), (24, 0)))\n assert x.chunks == y.chunks\n\n\ndef test_rechunk_minus_one():\n x = da.ones((24, 24), chunks=(4, 8))\n y = x.rechunk((-1, 8))\n assert y.chunks == ((24,), (8, 8, 8))\n assert_eq(x, y)\n\n\ndef test_rechunk_intermediates():\n x = da.random.normal(10, 0.1, (10, 10), chunks=(10, 1))\n y = x.rechunk((1, 10))\n assert len(y.dask) > 30", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_divide_to_width_test_divide_to_width.assert_chunks_4_4_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_divide_to_width_test_divide_to_width.assert_chunks_4_4_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 257, "end_line": 262, "span_ids": ["test_divide_to_width"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divide_to_width():\n chunks = divide_to_width((8, 9, 10), 10)\n assert chunks == (8, 9, 10)\n chunks = divide_to_width((8, 2, 9, 10, 11, 12), 4)\n # Note how 9 gives (3, 3, 3), not (4, 4, 1) or whatever\n assert chunks == (4, 4, 2, 3, 3, 3, 3, 3, 4, 3, 4, 4, 4, 4, 4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_merge_to_number__assert_steps.assert_steps_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_merge_to_number__assert_steps.assert_steps_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 317, "span_ids": ["_assert_steps", "test_merge_to_number", "_plan"], "tokens": 700}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_to_number():\n chunks = merge_to_number((10,) * 4, 5)\n assert chunks == (10, 10, 10, 10)\n chunks = merge_to_number((10,) * 4, 4)\n assert chunks == (10, 10, 10, 10)\n chunks = merge_to_number((10,) * 4, 3)\n assert chunks == (20, 10, 10)\n chunks = merge_to_number((10,) * 4, 2)\n assert chunks == (20, 20)\n chunks = merge_to_number((10,) * 4, 1)\n assert chunks == (40,)\n\n chunks = merge_to_number((10,) * 10, 2)\n assert chunks == (50,) * 2\n chunks = merge_to_number((10,) * 10, 3)\n assert chunks == (40, 30, 30)\n\n chunks = merge_to_number((5, 1, 1, 15, 10), 4)\n assert chunks == (5, 2, 15, 10)\n chunks = merge_to_number((5, 1, 1, 15, 10), 3)\n assert chunks == (7, 15, 10)\n chunks = merge_to_number((5, 1, 1, 15, 10), 2)\n assert chunks == (22, 10)\n chunks = merge_to_number((5, 1, 1, 15, 10), 1)\n assert chunks == (32,)\n\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 6)\n assert chunks == (2, 1, 1, 3, 1, 1)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 5)\n assert chunks == (2, 2, 3, 1, 1)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 4)\n assert chunks == (2, 2, 3, 2)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 3)\n assert chunks == (4, 3, 2)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 2)\n assert chunks == (4, 5)\n chunks = merge_to_number((1, 1, 1, 1, 3, 1, 1), 1)\n assert chunks == (9,)\n\n\ndef _plan(old_chunks, new_chunks, itemsize=1, block_size_limit=1e7, threshold=4):\n return plan_rechunk(\n old_chunks,\n new_chunks,\n itemsize=itemsize,\n block_size_limit=block_size_limit,\n threshold=threshold,\n )\n\n\ndef _assert_steps(steps, expected):\n assert len(steps) == len(expected)\n assert steps == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_test_plan_rechunk.for_i_in_range_len_steps_.assert_len_succ_1_le": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_test_plan_rechunk.for_i_in_range_len_steps_.assert_len_succ_1_le", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 320, "end_line": 386, "span_ids": ["test_plan_rechunk"], "tokens": 765}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_plan_rechunk():\n c = (20,) * 2 # coarse\n f = (2,) * 20 # fine\n nc = (float(\"nan\"),) * 2 # nan-coarse\n nf = (float(\"nan\"),) * 20 # nan-fine\n\n # Trivial cases\n steps = _plan((), ())\n _assert_steps(steps, [()])\n steps = _plan((c, ()), (f, ()))\n _assert_steps(steps, [(f, ())])\n\n # No intermediate required\n steps = _plan((c,), (f,))\n _assert_steps(steps, [(f,)])\n steps = _plan((f,), (c,))\n _assert_steps(steps, [(c,)])\n steps = _plan((c, c), (f, f))\n _assert_steps(steps, [(f, f)])\n steps = _plan((f, f), (c, c))\n _assert_steps(steps, [(c, c)])\n steps = _plan((f, c), (c, c))\n _assert_steps(steps, [(c, c)])\n steps = _plan((c, c, c, c), (c, f, c, c))\n _assert_steps(steps, [(c, f, c, c)])\n\n # An intermediate is used to reduce graph size\n steps = _plan((f, c), (c, f))\n _assert_steps(steps, [(c, c), (c, f)])\n\n steps = _plan((c + c, c + f), (f + f, c + c))\n _assert_steps(steps, [(c + c, c + c), (f + f, c + c)])\n\n # Same, with unknown dim\n steps = _plan((nc + nf, c + c, c + f), (nc + nf, f + f, c + c))\n _assert_steps(steps, steps)\n\n # Regression test for #5908\n steps = _plan((c, c), (f, f), threshold=1)\n _assert_steps(steps, [(f, f)])\n\n # Just at the memory limit => an intermediate is used\n steps = _plan((f, c), (c, f), block_size_limit=400)\n _assert_steps(steps, [(c, c), (c, f)])\n\n # Hitting the memory limit => partial merge\n m = (10,) * 4 # mid\n\n steps = _plan((f, c), (c, f), block_size_limit=399)\n _assert_steps(steps, [(m, c), (c, f)])\n\n steps2 = _plan((f, c), (c, f), block_size_limit=3999, itemsize=10)\n _assert_steps(steps2, steps)\n\n # Larger problem size => more intermediates\n c = (1000,) * 2 # coarse\n f = (2,) * 1000 # fine\n\n steps = _plan((f, c), (c, f), block_size_limit=99999)\n assert len(steps) == 3\n assert steps[-1] == (c, f)\n for i in range(len(steps) - 1):\n prev = steps[i]\n succ = steps[i + 1]\n # Merging on the first dim, splitting on the second dim\n assert len(succ[0]) <= len(prev[0]) / 2.0\n assert len(succ[1]) >= len(prev[1]) * 2.0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_5d_test_plan_rechunk_5d.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_5d_test_plan_rechunk_5d.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 389, "end_line": 400, "span_ids": ["test_plan_rechunk_5d"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_plan_rechunk_5d():\n # 5d problem\n c = (10,) * 1 # coarse\n f = (1,) * 10 # fine\n\n steps = _plan((c, c, c, c, c), (f, f, f, f, f))\n _assert_steps(steps, [(f, f, f, f, f)])\n steps = _plan((f, f, f, f, c), (c, c, c, f, f))\n _assert_steps(steps, [(c, c, c, f, c), (c, c, c, f, f)])\n # Only 1 dim can be merged at first\n steps = _plan((c, c, f, f, c), (c, c, c, f, f), block_size_limit=2e4)\n _assert_steps(steps, [(c, c, c, f, c), (c, c, c, f, f)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_asymmetric_test_rechunk_warning.assert_not_w": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_asymmetric_test_rechunk_warning.assert_not_w", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 433, "end_line": 450, "span_ids": ["test_rechunk_warning", "test_plan_rechunk_asymmetric"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_plan_rechunk_asymmetric():\n a = ((1,) * 1000, (80000000,))\n b = ((1000,), (80000,) * 1000)\n steps = plan_rechunk(a, b, itemsize=8)\n assert len(steps) > 1\n\n x = da.ones((1000, 80000000), chunks=(1, 80000000))\n y = x.rechunk((1000, x.shape[1] // 1000))\n assert len(y.dask) < 100000\n\n\ndef test_rechunk_warning():\n N = 20\n x = da.random.normal(size=(N, N, 100), chunks=(1, N, 100))\n with warnings.catch_warnings(record=True) as w:\n x = x.rechunk((N, 1, 100))\n\n assert not w", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_dont_concatenate_single_chunks_test_dont_concatenate_single_chunks.assert_not_any_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_dont_concatenate_single_chunks_test_dont_concatenate_single_chunks.assert_not_any_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 453, "end_line": 464, "span_ids": ["test_dont_concatenate_single_chunks"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape,chunks\", [[(4,), (2,)], [(4, 4), (2, 2)], [(4, 4), (4, 2)]]\n)\ndef test_dont_concatenate_single_chunks(shape, chunks):\n x = da.ones(shape, chunks=shape)\n y = x.rechunk(chunks)\n dsk = dict(y.dask)\n assert not any(\n funcname(task[0]).startswith(\"concat\")\n for task in dsk.values()\n if dask.istask(task)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_test_intersect_nan.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_test_intersect_nan.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 467, "end_line": 478, "span_ids": ["test_intersect_nan"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_nan():\n old_chunks = ((float(\"nan\"), float(\"nan\")), (8,))\n new_chunks = ((float(\"nan\"), float(\"nan\")), (4, 4))\n\n result = list(intersect_chunks(old_chunks, new_chunks))\n expected = [\n (((0, slice(0, None, None)), (0, slice(0, 4, None))),),\n (((0, slice(0, None, None)), (0, slice(4, 8, None))),),\n (((1, slice(0, None, None)), (0, slice(0, 4, None))),),\n (((1, slice(0, None, None)), (0, slice(4, 8, None))),),\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_single_test_intersect_nan_single.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_single_test_intersect_nan_single.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 481, "end_line": 490, "span_ids": ["test_intersect_nan_single"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_nan_single():\n old_chunks = ((float(\"nan\"),), (10,))\n new_chunks = ((float(\"nan\"),), (5, 5))\n\n result = list(intersect_chunks(old_chunks, new_chunks))\n expected = [\n (((0, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((0, slice(0, None, None)), (0, slice(5, 10, None))),),\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_long_test_intersect_nan_long.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_nan_long_test_intersect_nan_long.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 493, "end_line": 508, "span_ids": ["test_intersect_nan_long"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_nan_long():\n\n old_chunks = (tuple([float(\"nan\")] * 4), (10,))\n new_chunks = (tuple([float(\"nan\")] * 4), (5, 5))\n result = list(intersect_chunks(old_chunks, new_chunks))\n expected = [\n (((0, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((0, slice(0, None, None)), (0, slice(5, 10, None))),),\n (((1, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((1, slice(0, None, None)), (0, slice(5, 10, None))),),\n (((2, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((2, slice(0, None, None)), (0, slice(5, 10, None))),),\n (((3, slice(0, None, None)), (0, slice(0, 5, None))),),\n (((3, slice(0, None, None)), (0, slice(5, 10, None))),),\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_pandas_test_rechunk_unknown_from_pandas.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_pandas_test_rechunk_unknown_from_pandas.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 511, "end_line": 522, "span_ids": ["test_rechunk_unknown_from_pandas"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_unknown_from_pandas():\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n\n arr = np.random.randn(50, 10)\n x = dd.from_pandas(pd.DataFrame(arr), 2).values\n result = x.rechunk((None, (5, 5)))\n assert np.isnan(x.chunks[0]).all()\n assert np.isnan(result.chunks[0]).all()\n assert result.chunks[1] == (5, 5)\n expected = da.from_array(arr, chunks=((25, 25), (10,))).rechunk((None, (5, 5)))\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_array_test_rechunk_unknown_from_array.assert_eq_x_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_from_array_test_rechunk_unknown_from_array.assert_eq_x_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 525, "end_line": 534, "span_ids": ["test_rechunk_unknown_from_array"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_unknown_from_array():\n dd = pytest.importorskip(\"dask.dataframe\")\n # pd = pytest.importorskip('pandas')\n x = dd.from_array(da.ones(shape=(4, 4), chunks=(2, 2))).values\n # result = x.rechunk({1: 5})\n result = x.rechunk((None, 4))\n assert np.isnan(x.chunks[0]).all()\n assert np.isnan(result.chunks[0]).all()\n assert x.chunks[1] == (4,)\n assert_eq(x, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_test_rechunk_unknown.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_test_rechunk_unknown.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 537, "end_line": 561, "span_ids": ["test_rechunk_unknown"], "tokens": 387}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"x, chunks\",\n [\n (da.ones(shape=(50, 10), chunks=(25, 10)), (None, 5)),\n (da.ones(shape=(50, 10), chunks=(25, 10)), {1: 5}),\n (da.ones(shape=(50, 10), chunks=(25, 10)), (None, (5, 5))),\n (da.ones(shape=(1000, 10), chunks=(5, 10)), (None, 5)),\n (da.ones(shape=(1000, 10), chunks=(5, 10)), {1: 5}),\n (da.ones(shape=(1000, 10), chunks=(5, 10)), (None, (5, 5))),\n (da.ones(shape=(10, 10), chunks=(10, 10)), (None, 5)),\n (da.ones(shape=(10, 10), chunks=(10, 10)), {1: 5}),\n (da.ones(shape=(10, 10), chunks=(10, 10)), (None, (5, 5))),\n (da.ones(shape=(10, 10), chunks=(10, 2)), (None, 5)),\n (da.ones(shape=(10, 10), chunks=(10, 2)), {1: 5}),\n (da.ones(shape=(10, 10), chunks=(10, 2)), (None, (5, 5))),\n ],\n)\ndef test_rechunk_unknown(x, chunks):\n dd = pytest.importorskip(\"dask.dataframe\")\n y = dd.from_array(x).values\n result = y.rechunk(chunks)\n expected = x.rechunk(chunks)\n\n assert_chunks_match(result.chunks, expected.chunks)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_explicit_test_rechunk_unknown_raises.with_pytest_raises_ValueE.x_rechunk_None_5_5_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_unknown_explicit_test_rechunk_unknown_raises.with_pytest_raises_ValueE.x_rechunk_None_5_5_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 564, "end_line": 587, "span_ids": ["test_rechunk_unknown_explicit", "assert_chunks_match", "test_rechunk_unknown_raises"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_unknown_explicit():\n dd = pytest.importorskip(\"dask.dataframe\")\n x = da.ones(shape=(10, 10), chunks=(5, 2))\n y = dd.from_array(x).values\n result = y.rechunk(((float(\"nan\"), float(\"nan\")), (5, 5)))\n expected = x.rechunk((None, (5, 5)))\n assert_chunks_match(result.chunks, expected.chunks)\n assert_eq(result, expected)\n\n\ndef assert_chunks_match(left, right):\n for x, y in zip(left, right):\n if np.isnan(x).any():\n assert np.isnan(x).all()\n else:\n assert x == y\n\n\ndef test_rechunk_unknown_raises():\n dd = pytest.importorskip(\"dask.dataframe\")\n\n x = dd.from_array(da.ones(shape=(10, 10), chunks=(5, 5))).values\n with pytest.raises(ValueError):\n x.rechunk((None, (5, 5, 5)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_single_test_old_to_new.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_single_test_old_to_new.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 590, "end_line": 612, "span_ids": ["test_old_to_new_single", "test_old_to_new"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_old_to_new_single():\n old = ((float(\"nan\"), float(\"nan\")), (8,))\n new = ((float(\"nan\"), float(\"nan\")), (4, 4))\n result = _old_to_new(old, new)\n\n expected = [\n [[(0, slice(0, None, None))], [(1, slice(0, None, None))]],\n [[(0, slice(0, 4, None))], [(0, slice(4, 8, None))]],\n ]\n\n assert result == expected\n\n\ndef test_old_to_new():\n old = ((float(\"nan\"),), (10,))\n new = ((float(\"nan\"),), (5, 5))\n result = _old_to_new(old, new)\n expected = [\n [[(0, slice(0, None, None))]],\n [[(0, slice(0, 5, None))], [(0, slice(5, 10, None))]],\n ]\n\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_large_test_old_to_new_large.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_large_test_old_to_new_large.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 615, "end_line": 629, "span_ids": ["test_old_to_new_large"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_old_to_new_large():\n old = (tuple([float(\"nan\")] * 4), (10,))\n new = (tuple([float(\"nan\")] * 4), (5, 5))\n\n result = _old_to_new(old, new)\n expected = [\n [\n [(0, slice(0, None, None))],\n [(1, slice(0, None, None))],\n [(2, slice(0, None, None))],\n [(3, slice(0, None, None))],\n ],\n [[(0, slice(0, 5, None))], [(0, slice(5, 10, None))]],\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_changing_raises_test_old_to_new_known.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_changing_raises_test_old_to_new_known.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 632, "end_line": 651, "span_ids": ["test_old_to_new_known", "test_changing_raises"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_changing_raises():\n nan = float(\"nan\")\n with pytest.raises(ValueError) as record:\n _old_to_new(((nan, nan), (4, 4)), ((nan, nan, nan), (4, 4)))\n\n assert \"unchanging\" in str(record.value)\n\n\ndef test_old_to_new_known():\n old = ((10, 10, 10, 10, 10),)\n new = ((25, 5, 20),)\n result = _old_to_new(old, new)\n expected = [\n [\n [(0, slice(0, 10, None)), (1, slice(0, 10, None)), (2, slice(0, 5, None))],\n [(2, slice(5, 10, None))],\n [(3, slice(0, 10, None)), (4, slice(0, 10, None))],\n ]\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_dim_test_rechunk_avoid_needless_chunking.assert_len_dsk_8_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_dim_test_rechunk_avoid_needless_chunking.assert_len_dsk_8_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 654, "end_line": 671, "span_ids": ["test_rechunk_zero_dim", "test_rechunk_empty_chunks", "test_rechunk_avoid_needless_chunking"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_zero_dim():\n da = pytest.importorskip(\"dask.array\")\n\n x = da.ones((0, 10, 100), chunks=(0, 10, 10)).rechunk((0, 10, 50))\n assert len(x.compute()) == 0\n\n\ndef test_rechunk_empty_chunks():\n x = da.zeros((7, 24), chunks=((7,), (10, 0, 0, 9, 0, 5)))\n y = x.rechunk((2, 3))\n assert_eq(x, y)\n\n\ndef test_rechunk_avoid_needless_chunking():\n x = da.ones(16, chunks=2)\n y = x.rechunk(8)\n dsk = y.__dask_graph__()\n assert len(dsk) <= 8 + 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_1d_test_rechunk_auto_1d.assert_y_chunks_expec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_1d_test_rechunk_auto_1d.assert_y_chunks_expec", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 674, "end_line": 687, "span_ids": ["test_rechunk_auto_1d"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape,chunks,bs,expected\",\n [\n (100, 1, 10, (10,) * 10),\n (100, 50, 10, (10,) * 10),\n (100, 100, 10, (10,) * 10),\n (20, 7, 10, (7, 7, 6)),\n (20, (1, 1, 1, 1, 6, 2, 1, 7), 5, (5, 5, 5, 5)),\n ],\n)\ndef test_rechunk_auto_1d(shape, chunks, bs, expected):\n x = da.ones(shape, chunks=(chunks,))\n y = x.rechunk({0: \"auto\"}, block_size_limit=bs * x.dtype.itemsize)\n assert y.chunks == (expected,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_2d_test_rechunk_auto_2d._limited_by_largest": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_2d_test_rechunk_auto_2d._limited_by_largest", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 690, "end_line": 707, "span_ids": ["test_rechunk_auto_2d"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_auto_2d():\n x = da.ones((20, 20), chunks=(2, 2))\n y = x.rechunk({0: -1, 1: \"auto\"}, block_size_limit=20 * x.dtype.itemsize)\n assert y.chunks == ((20,), (1,) * 20)\n\n x = da.ones((20, 20), chunks=(2, 2))\n y = x.rechunk((-1, \"auto\"), block_size_limit=80 * x.dtype.itemsize)\n assert y.chunks == ((20,), (4,) * 5)\n\n x = da.ones((20, 20), chunks=((2, 2)))\n y = x.rechunk({0: \"auto\"}, block_size_limit=20 * x.dtype.itemsize)\n assert y.chunks[1] == x.chunks[1]\n assert y.chunks[0] == (10, 10)\n\n x = da.ones((20, 20), chunks=((2,) * 10, (2, 2, 2, 2, 2, 5, 5)))\n y = x.rechunk({0: \"auto\"}, block_size_limit=20 * x.dtype.itemsize)\n assert y.chunks[1] == x.chunks[1]\n assert y.chunks[0] == (4, 4, 4, 4, 4) # limited by largest", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_3d_test_rechunk_auto_3d._even_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_3d_test_rechunk_auto_3d._even_split", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 710, "end_line": 715, "span_ids": ["test_rechunk_auto_3d"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_auto_3d():\n x = da.ones((20, 20, 20), chunks=((2, 2, 2)))\n y = x.rechunk({0: \"auto\", 1: \"auto\"}, block_size_limit=200 * x.dtype.itemsize)\n assert y.chunks[2] == x.chunks[2]\n assert y.chunks[0] == (10, 10)\n assert y.chunks[1] == (10, 10) # even split", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_image_stack_test_rechunk_auto_image_stack.None_2.assert_z_chunks_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_auto_image_stack_test_rechunk_auto_image_stack.None_2.assert_z_chunks_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 718, "end_line": 733, "span_ids": ["test_rechunk_auto_image_stack"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [100, 1000])\ndef test_rechunk_auto_image_stack(n):\n with dask.config.set({\"array.chunk-size\": \"10MiB\"}):\n x = da.ones((n, 1000, 1000), chunks=(1, 1000, 1000), dtype=\"uint8\")\n y = x.rechunk(\"auto\")\n assert y.chunks == ((10,) * (n // 10), (1000,), (1000,))\n assert y.rechunk(\"auto\").chunks == y.chunks # idempotent\n\n with dask.config.set({\"array.chunk-size\": \"7MiB\"}):\n z = x.rechunk(\"auto\")\n assert z.chunks == ((5,) * (n // 5), (1000,), (1000,))\n\n with dask.config.set({\"array.chunk-size\": \"1MiB\"}):\n x = da.ones((n, 1000, 1000), chunks=(1, 1000, 1000), dtype=\"float64\")\n z = x.rechunk(\"auto\")\n assert z.chunks == ((1,) * n, (250,) * 4, (250,) * 4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_down_test_rechunk_down.None_2.assert_z_chunks_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_down_test_rechunk_down.None_2.assert_z_chunks_10_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 736, "end_line": 751, "span_ids": ["test_rechunk_down"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_down():\n with dask.config.set({\"array.chunk-size\": \"10MiB\"}):\n x = da.ones((100, 1000, 1000), chunks=(1, 1000, 1000), dtype=\"uint8\")\n y = x.rechunk(\"auto\")\n assert y.chunks == ((10,) * 10, (1000,), (1000,))\n\n with dask.config.set({\"array.chunk-size\": \"1MiB\"}):\n z = y.rechunk(\"auto\")\n assert z.chunks == ((5,) * 20, (250,) * 4, (250,) * 4)\n\n with dask.config.set({\"array.chunk-size\": \"1MiB\"}):\n z = y.rechunk({0: \"auto\"})\n assert z.chunks == ((1,) * 100, (1000,), (1000,))\n\n z = y.rechunk({1: \"auto\"})\n assert z.chunks == ((10,) * 10, (100,) * 10, (1000,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_1d_test_reduction_1d_test.if_split_every_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_1d_test_reduction_1d_test.if_split_every_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 51, "end_line": 72, "span_ids": ["reduction_1d_test"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduction_1d_test(da_func, darr, np_func, narr, use_dtype=True, split_every=True):\n assert_eq(da_func(darr), np_func(narr))\n assert_eq(\n da_func(narr), np_func(narr)\n ) # Ensure Dask reductions work with NumPy arrays\n assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))\n assert_eq(da_func(darr, axis=()), np_func(narr, axis=()))\n assert same_keys(da_func(darr), da_func(darr))\n assert same_keys(da_func(darr, keepdims=True), da_func(darr, keepdims=True))\n if use_dtype:\n assert_eq(da_func(darr, dtype=\"f8\"), np_func(narr, dtype=\"f8\"))\n assert_eq(da_func(darr, dtype=\"i8\"), np_func(narr, dtype=\"i8\"))\n assert same_keys(da_func(darr, dtype=\"i8\"), da_func(darr, dtype=\"i8\"))\n if split_every:\n a1 = da_func(darr, split_every=2)\n a2 = da_func(darr, split_every={0: 2})\n assert same_keys(a1, a2)\n assert_eq(a1, np_func(narr))\n assert_eq(a2, np_func(narr))\n assert_eq(\n da_func(darr, keepdims=True, split_every=2), np_func(narr, keepdims=True)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_1D_test_reductions_1D.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_1D_test_reductions_1D.None_15", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 96, "span_ids": ["test_reductions_1D"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [\"f4\", \"i4\"])\ndef test_reductions_1D(dtype):\n x = np.arange(5).astype(dtype)\n a = da.from_array(x, chunks=(2,))\n\n reduction_1d_test(da.sum, a, np.sum, x)\n reduction_1d_test(da.prod, a, np.prod, x)\n reduction_1d_test(da.mean, a, np.mean, x)\n reduction_1d_test(da.var, a, np.var, x)\n reduction_1d_test(da.std, a, np.std, x)\n reduction_1d_test(da.min, a, np.min, x, False)\n reduction_1d_test(da.max, a, np.max, x, False)\n reduction_1d_test(da.any, a, np.any, x, False)\n reduction_1d_test(da.all, a, np.all, x, False)\n\n reduction_1d_test(da.nansum, a, np.nansum, x)\n reduction_1d_test(da.nanprod, a, np.nanprod, x)\n reduction_1d_test(da.nanmean, a, np.mean, x)\n reduction_1d_test(da.nanvar, a, np.var, x)\n reduction_1d_test(da.nanstd, a, np.std, x)\n reduction_1d_test(da.nanmin, a, np.nanmin, x, False)\n reduction_1d_test(da.nanmax, a, np.nanmax, x, False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reduction_errors_test_reductions_2D.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reduction_errors_test_reductions_2D.None_15", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 183, "end_line": 217, "span_ids": ["test_reductions_2D", "test_reduction_errors"], "tokens": 479}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reduction_errors():\n x = da.ones((5, 5), chunks=(3, 3))\n with pytest.raises(ValueError):\n x.sum(axis=2)\n with pytest.raises(ValueError):\n x.sum(axis=-3)\n\n\n@pytest.mark.slow\n@pytest.mark.filterwarnings(\"ignore:overflow encountered in reduce:RuntimeWarning\")\n@pytest.mark.parametrize(\"dtype\", [\"f4\", \"i4\"])\ndef test_reductions_2D(dtype):\n x = np.arange(1, 122).reshape((11, 11)).astype(dtype)\n a = da.from_array(x, chunks=(4, 4))\n\n b = a.sum(keepdims=True)\n assert b.__dask_keys__() == [[(b.name, 0, 0)]]\n\n reduction_2d_test(da.sum, a, np.sum, x)\n reduction_2d_test(da.prod, a, np.prod, x)\n reduction_2d_test(da.mean, a, np.mean, x)\n reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo\n reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo\n reduction_2d_test(da.min, a, np.min, x, False)\n reduction_2d_test(da.max, a, np.max, x, False)\n reduction_2d_test(da.any, a, np.any, x, False)\n reduction_2d_test(da.all, a, np.all, x, False)\n\n reduction_2d_test(da.nansum, a, np.nansum, x)\n reduction_2d_test(da.nanprod, a, np.nanprod, x)\n reduction_2d_test(da.nanmean, a, np.mean, x)\n reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo\n reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo\n reduction_2d_test(da.nanmin, a, np.nanmin, x, False)\n reduction_2d_test(da.nanmax, a, np.nanmax, x, False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_test_arg_reductions.assert_eq_dfunc_a2_0_sp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_test_arg_reductions.assert_eq_dfunc_a2_0_sp", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 188, "end_line": 218, "span_ids": ["test_arg_reductions"], "tokens": 335}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n [\"dfunc\", \"func\"],\n [\n (da.argmin, np.argmin),\n (da.argmax, np.argmax),\n (da.nanargmin, np.nanargmin),\n (da.nanargmax, np.nanargmax),\n ],\n)\ndef test_arg_reductions(dfunc, func):\n x = np.random.random((10, 10, 10))\n a = da.from_array(x, chunks=(3, 4, 5))\n\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n assert_eq(dfunc(a, 1), func(x, 1))\n assert_eq(dfunc(a, 2), func(x, 2))\n with config.set(split_every=2):\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n assert_eq(dfunc(a, 1), func(x, 1))\n assert_eq(dfunc(a, 2), func(x, 2))\n\n pytest.raises(ValueError, lambda: dfunc(a, 3))\n pytest.raises(TypeError, lambda: dfunc(a, (0, 1)))\n\n x2 = np.arange(10)\n a2 = da.from_array(x2, chunks=3)\n assert_eq(dfunc(a2), func(x2))\n assert_eq(dfunc(a2, 0), func(x2, 0))\n assert_eq(dfunc(a2, 0, split_every=2), func(x2, 0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_unknown_chunksize_test_arg_reductions_unknown_single_chunksize.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_arg_reductions_unknown_chunksize_test_arg_reductions_unknown_single_chunksize.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 246, "end_line": 274, "span_ids": ["test_arg_reductions_unknown_chunksize_2d", "test_arg_reductions_unknown_chunksize", "test_arg_reductions_unknown_single_chunksize"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"argmax\", \"nanargmax\"])\ndef test_arg_reductions_unknown_chunksize(func):\n x = da.arange(10, chunks=5)\n x = x[x > 1]\n\n with pytest.raises(ValueError) as info:\n getattr(da, func)(x)\n\n assert \"unknown chunksize\" in str(info.value)\n\n\n@pytest.mark.parametrize(\"func\", [\"argmax\", \"nanargmax\"])\ndef test_arg_reductions_unknown_chunksize_2d(func):\n x = da.ones((10, 10), chunks=(5, 5))\n x = x[x[0, :] > 0, :] # unknown chunks in first dimension only\n\n with pytest.raises(ValueError):\n getattr(da, func)(x, axis=0)\n\n getattr(da, func)(x, axis=1).compute()\n\n\n@pytest.mark.parametrize(\"func\", [\"argmax\", \"nanargmax\"])\ndef test_arg_reductions_unknown_single_chunksize(func):\n x = da.ones((10, 10), chunks=(10, 10))\n x = x[x[0, :] > 0, :] # unknown chunks in first dimension only\n\n getattr(da, func)(x, axis=0).compute()\n getattr(da, func)(x, axis=1).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_moment_test_moment.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_moment_test_moment.None_7", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 341, "end_line": 362, "span_ids": ["test_moment"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_moment():\n def moment(x, n, axis=None):\n return ((x - x.mean(axis=axis, keepdims=True)) ** n).sum(\n axis=axis\n ) / np.ones_like(x).sum(axis=axis)\n\n # Poorly conditioned\n x = np.array([1.0, 2.0, 3.0] * 10).reshape((3, 10)) + 1e8\n a = da.from_array(x, chunks=5)\n assert_eq(a.moment(2), moment(x, 2))\n assert_eq(a.moment(3), moment(x, 3))\n assert_eq(a.moment(4), moment(x, 4))\n\n x = np.arange(1, 122).reshape((11, 11)).astype(\"f8\")\n a = da.from_array(x, chunks=(4, 4))\n assert_eq(a.moment(4, axis=1), moment(x, 4, axis=1))\n assert_eq(a.moment(4, axis=(1, 0)), moment(x, 4, axis=(1, 0)))\n\n # Tree reduction\n assert_eq(a.moment(order=4, split_every=4), moment(x, 4))\n assert_eq(a.moment(order=4, axis=0, split_every=4), moment(x, 4, axis=0))\n assert_eq(a.moment(order=4, axis=1, split_every=4), moment(x, 4, axis=1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_negative_axes_test_reductions_with_negative_axes.assert_eq_a_sum_axis_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_negative_axes_test_reductions_with_negative_axes.assert_eq_a_sum_axis_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 365, "end_line": 373, "span_ids": ["test_reductions_with_negative_axes"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_with_negative_axes():\n x = np.random.random((4, 4, 4))\n a = da.from_array(x, chunks=2)\n\n assert_eq(a.argmin(axis=-1), x.argmin(axis=-1))\n assert_eq(a.argmin(axis=-1, split_every=2), x.argmin(axis=-1))\n\n assert_eq(a.sum(axis=-1), x.sum(axis=-1))\n assert_eq(a.sum(axis=(0, -1)), x.sum(axis=(0, -1)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_test_nan.assert_eq_np_nanprod_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_test_nan.assert_eq_np_nanprod_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 376, "end_line": 389, "span_ids": ["test_nan"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nan():\n x = np.array([[1, np.nan, 3, 4], [5, 6, 7, np.nan], [9, 10, 11, 12]])\n d = da.from_array(x, chunks=(2, 2))\n\n assert_eq(np.nansum(x), da.nansum(d))\n assert_eq(np.nansum(x, axis=0), da.nansum(d, axis=0))\n assert_eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))\n assert_eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))\n assert_eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))\n assert_eq(np.nanvar(x), da.nanvar(d))\n assert_eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))\n assert_eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))\n assert_eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))\n assert_eq(np.nanprod(x), da.nanprod(d))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_0d_array_test_reduction_on_scalar.assert_x_x_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_0d_array_test_reduction_on_scalar.assert_x_x_all_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 423, "end_line": 436, "span_ids": ["test_0d_array", "test_reduction_on_scalar"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_0d_array():\n x = da.mean(da.ones(4, chunks=4), axis=()).compute()\n x = da.mean(da.ones(4, chunks=4), axis=0).compute()\n y = np.mean(np.ones(4))\n assert type(x) == type(y)\n\n x = da.sum(da.zeros(4, chunks=1)).compute()\n y = np.sum(np.zeros(4))\n assert type(x) == type(y)\n\n\ndef test_reduction_on_scalar():\n x = da.from_array(np.array(1.0), chunks=())\n assert (x == x).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_empty_array_assert_max_deps.if_eq_.else_.assert_max_map_len_depen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_with_empty_array_assert_max_deps.if_eq_.else_.assert_max_map_len_depen", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 452, "end_line": 473, "span_ids": ["assert_max_deps", "test_reductions_with_empty_array"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_with_empty_array():\n dx1 = da.ones((10, 0, 5), chunks=4)\n x1 = dx1.compute()\n dx2 = da.ones((0, 0, 0), chunks=4)\n x2 = dx2.compute()\n\n for dx, x in [(dx1, x1), (dx2, x2)]:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning) # Mean of empty slice\n assert_eq(dx.mean(), x.mean())\n assert_eq(dx.mean(axis=()), x.mean(axis=()))\n assert_eq(dx.mean(axis=0), x.mean(axis=0))\n assert_eq(dx.mean(axis=1), x.mean(axis=1))\n assert_eq(dx.mean(axis=2), x.mean(axis=2))\n\n\ndef assert_max_deps(x, n, eq=True):\n dependencies, dependents = get_deps(x.dask)\n if eq:\n assert max(map(len, dependencies.values())) == n\n else:\n assert max(map(len, dependencies.values())) <= n", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_depth_test_tree_reduce_depth.None_26": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_depth_test_tree_reduce_depth.None_26", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 462, "end_line": 496, "span_ids": ["test_tree_reduce_depth"], "tokens": 646}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tree_reduce_depth():\n # 2D\n x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))\n thresh = {0: 2, 1: 3}\n assert_max_deps(x.sum(split_every=thresh), 2 * 3)\n assert_max_deps(x.sum(axis=(), split_every=thresh), 1)\n assert_max_deps(x.sum(axis=0, split_every=thresh), 2)\n assert_max_deps(x.sum(axis=1, split_every=thresh), 3)\n assert_max_deps(x.sum(split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(), split_every=20), 1)\n assert_max_deps(x.sum(axis=0, split_every=20), 4)\n assert_max_deps(x.sum(axis=1, split_every=20), 6)\n\n # 3D\n x = da.from_array(np.arange(11 * 22 * 29).reshape((11, 22, 29)), chunks=(3, 4, 5))\n thresh = {0: 2, 1: 3, 2: 4}\n assert_max_deps(x.sum(split_every=thresh), 2 * 3 * 4)\n assert_max_deps(x.sum(axis=(), split_every=thresh), 1)\n assert_max_deps(x.sum(axis=0, split_every=thresh), 2)\n assert_max_deps(x.sum(axis=1, split_every=thresh), 3)\n assert_max_deps(x.sum(axis=2, split_every=thresh), 4)\n assert_max_deps(x.sum(axis=(0, 1), split_every=thresh), 2 * 3)\n assert_max_deps(x.sum(axis=(0, 2), split_every=thresh), 2 * 4)\n assert_max_deps(x.sum(axis=(1, 2), split_every=thresh), 3 * 4)\n assert_max_deps(x.sum(split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(), split_every=20), 1)\n assert_max_deps(x.sum(axis=0, split_every=20), 4)\n assert_max_deps(x.sum(axis=1, split_every=20), 6)\n assert_max_deps(x.sum(axis=2, split_every=20), 6)\n assert_max_deps(x.sum(axis=(0, 1), split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(0, 2), split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(1, 2), split_every=20), 20, False)\n assert_max_deps(x.sum(axis=(0, 1), split_every=40), 4 * 6)\n assert_max_deps(x.sum(axis=(0, 2), split_every=40), 4 * 6)\n assert_max_deps(x.sum(axis=(1, 2), split_every=40), 6 * 6)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_set_options_test_array_reduction_out.assert_eq_x_func_np_ones": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_tree_reduce_set_options_test_array_reduction_out.assert_eq_x_func_np_ones", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 533, "end_line": 566, "span_ids": ["test_array_reduction_out", "test_tree_reduce_set_options", "test_general_reduction_names", "test_reduction_names"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tree_reduce_set_options():\n x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))\n with config.set(split_every={0: 2, 1: 3}):\n assert_max_deps(x.sum(), 2 * 3)\n assert_max_deps(x.sum(axis=()), 1)\n assert_max_deps(x.sum(axis=0), 2)\n\n\ndef test_reduction_names():\n x = da.ones(5, chunks=(2,))\n assert x.sum().name.startswith(\"sum\")\n assert \"max\" in x.max().name.split(\"-\")[0]\n assert x.var().name.startswith(\"var\")\n assert x.all().name.startswith(\"all\")\n assert any(k[0].startswith(\"nansum\") for k in da.nansum(x).dask)\n assert x.mean().name.startswith(\"mean\")\n\n\ndef test_general_reduction_names():\n dtype = int\n a = da.reduction(\n da.ones(10, dtype, chunks=2), np.sum, np.sum, dtype=dtype, name=\"foo\"\n )\n names, tokens = list(zip_longest(*[key[0].rsplit(\"-\", 1) for key in a.dask]))\n assert set(names) == {\"ones_like\", \"foo\", \"foo-partial\", \"foo-aggregate\"}\n assert all(tokens)\n\n\n@pytest.mark.parametrize(\"func\", [np.sum, np.argmax])\ndef test_array_reduction_out(func):\n x = da.arange(10, chunks=(5,))\n y = da.ones((10, 10), chunks=(4, 4))\n func(y, axis=0, out=x)\n assert_eq(x, func(np.ones((10, 10)), axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_array_cumreduction_axis_test_array_cumreduction_out.assert_eq_x_func_np_ones": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_array_cumreduction_axis_test_array_cumreduction_out.assert_eq_x_func_np_ones", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 536, "end_line": 564, "span_ids": ["test_array_cumreduction_axis", "test_array_cumreduction_out"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"cumsum\", \"cumprod\", \"nancumsum\", \"nancumprod\"])\n@pytest.mark.parametrize(\"use_nan\", [False, True])\n@pytest.mark.parametrize(\"axis\", [None, 0, 1, -1])\n@pytest.mark.parametrize(\"method\", [\"sequential\", \"blelloch\"])\ndef test_array_cumreduction_axis(func, use_nan, axis, method):\n np_func = getattr(np, func)\n da_func = getattr(da, func)\n\n s = (10, 11, 12)\n a = np.arange(np.prod(s), dtype=float).reshape(s)\n if use_nan:\n a[1] = np.nan\n d = da.from_array(a, chunks=(4, 5, 6))\n if func in [\"cumprod\", \"nancumprod\"] and method == \"blelloch\" and axis is None:\n with pytest.warns(RuntimeWarning):\n da_func(d, axis=axis, method=method).compute()\n return\n\n a_r = np_func(a, axis=axis)\n d_r = da_func(d, axis=axis, method=method)\n\n assert_eq(a_r, d_r)\n\n\n@pytest.mark.parametrize(\"func\", [np.cumsum, np.cumprod])\ndef test_array_cumreduction_out(func):\n x = da.ones((10, 10), chunks=(4, 4))\n func(x, axis=0, out=x)\n assert_eq(x, func(np.ones((10, 10)), axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk1_test_topk_argtopk1.None_1.daskfunc_b_k_axis_3_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk1_test_topk_argtopk1.None_1.daskfunc_b_k_axis_3_s", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 562, "end_line": 613, "span_ids": ["test_topk_argtopk1"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npfunc,daskfunc\", [(np.sort, da.topk), (np.argsort, da.argtopk)]\n)\n@pytest.mark.parametrize(\"split_every\", [None, 2, 4, 8])\ndef test_topk_argtopk1(npfunc, daskfunc, split_every):\n # Test data\n k = 5\n # Test at least 3 levels of aggregation when split_every=2\n # to stress the different chunk, combine, aggregate kernels\n npa = np.random.random(800)\n npb = np.random.random((10, 20, 30))\n\n a = da.from_array(npa, chunks=((120, 80, 100, 200, 300),))\n b = da.from_array(npb, chunks=(4, 8, 8))\n\n # 1-dimensional arrays\n # top 5 elements, sorted descending\n assert_eq(npfunc(npa)[-k:][::-1], daskfunc(a, k, split_every=split_every))\n # bottom 5 elements, sorted ascending\n assert_eq(npfunc(npa)[:k], daskfunc(a, -k, split_every=split_every))\n\n # n-dimensional arrays\n # also testing when k > chunk\n # top 5 elements, sorted descending\n assert_eq(\n npfunc(npb, axis=0)[-k:, :, :][::-1, :, :],\n daskfunc(b, k, axis=0, split_every=split_every),\n )\n assert_eq(\n npfunc(npb, axis=1)[:, -k:, :][:, ::-1, :],\n daskfunc(b, k, axis=1, split_every=split_every),\n )\n assert_eq(\n npfunc(npb, axis=-1)[:, :, -k:][:, :, ::-1],\n daskfunc(b, k, axis=-1, split_every=split_every),\n )\n with pytest.raises(ValueError):\n daskfunc(b, k, axis=3, split_every=split_every)\n\n # bottom 5 elements, sorted ascending\n assert_eq(\n npfunc(npb, axis=0)[:k, :, :], daskfunc(b, -k, axis=0, split_every=split_every)\n )\n assert_eq(\n npfunc(npb, axis=1)[:, :k, :], daskfunc(b, -k, axis=1, split_every=split_every)\n )\n assert_eq(\n npfunc(npb, axis=-1)[:, :, :k],\n daskfunc(b, -k, axis=-1, split_every=split_every),\n )\n with pytest.raises(ValueError):\n daskfunc(b, -k, axis=3, split_every=split_every)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk2_test_topk_argtopk2.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk2_test_topk_argtopk2.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 616, "end_line": 630, "span_ids": ["test_topk_argtopk2"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npfunc,daskfunc\", [(np.sort, da.topk), (np.argsort, da.argtopk)]\n)\n@pytest.mark.parametrize(\"split_every\", [None, 2, 3, 4])\n@pytest.mark.parametrize(\"chunksize\", [1, 2, 3, 4, 5, 10])\ndef test_topk_argtopk2(npfunc, daskfunc, split_every, chunksize):\n \"\"\"Fine test use cases when k is larger than chunk size\"\"\"\n npa = np.random.random((10,))\n a = da.from_array(npa, chunks=chunksize)\n k = 5\n\n # top 5 elements, sorted descending\n assert_eq(npfunc(npa)[-k:][::-1], daskfunc(a, k, split_every=split_every))\n # bottom 5 elements, sorted ascending\n assert_eq(npfunc(npa)[:k], daskfunc(a, -k, split_every=split_every))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk3_test_topk_argtopk3.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_topk_argtopk3_test_topk_argtopk3.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 633, "end_line": 640, "span_ids": ["test_topk_argtopk3"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_topk_argtopk3():\n a = da.random.random((10, 20, 30), chunks=(4, 8, 8))\n\n # As Array methods\n assert_eq(a.topk(5, axis=1, split_every=2), da.topk(a, 5, axis=1, split_every=2))\n assert_eq(\n a.argtopk(5, axis=1, split_every=2), da.argtopk(a, 5, axis=1, split_every=2)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_regres_3940_test_regres_3940.if_func_not_in_da_cumsum.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_regres_3940_test_regres_3940.if_func_not_in_da_cumsum.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 644, "end_line": 660, "span_ids": ["test_regres_3940"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func\",\n [da.cumsum, da.cumprod, da.argmin, da.argmax, da.min, da.max, da.nansum, da.nanmax],\n)\n@pytest.mark.parametrize(\"method\", [\"sequential\", \"blelloch\"])\ndef test_regres_3940(func, method):\n if func in {da.cumsum, da.cumprod}:\n kwargs = {\"method\": method}\n else:\n kwargs = {}\n a = da.ones((5, 2), chunks=(2, 2))\n assert func(a, **kwargs).name != func(a + 1, **kwargs).name\n assert func(a, axis=0, **kwargs).name != func(a, **kwargs).name\n assert func(a, axis=0, **kwargs).name != func(a, axis=1, **kwargs).name\n if func not in {da.cumsum, da.cumprod, da.argmin, da.argmax}:\n assert func(a, axis=()).name != func(a).name\n assert func(a, axis=()).name != func(a, axis=0).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_trace_test_trace.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_trace_test_trace.None_13", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 657, "end_line": 679, "span_ids": ["test_trace"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_trace():\n def _assert(a, b, *args, **kwargs):\n return assert_eq(a.trace(*args, **kwargs), b.trace(*args, **kwargs))\n\n b = np.arange(12).reshape((3, 4))\n a = da.from_array(b, 1)\n _assert(a, b)\n _assert(a, b, 0)\n _assert(a, b, 1)\n _assert(a, b, -1)\n\n b = np.arange(8).reshape((2, 2, 2))\n a = da.from_array(b, 2)\n _assert(a, b)\n _assert(a, b, 0)\n _assert(a, b, 1)\n _assert(a, b, -1)\n _assert(a, b, 0, 0, 1)\n _assert(a, b, 0, 0, 2)\n _assert(a, b, 0, 1, 2, int)\n _assert(a, b, 0, 1, 2, float)\n _assert(a, b, offset=1, axis1=0, axis2=2, dtype=int)\n _assert(a, b, offset=1, axis1=0, axis2=2, dtype=float)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_expand_tuple_test_expand_tuple.assert_expand_tuple_7_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_expand_tuple_test_expand_tuple.assert_expand_tuple_7_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 54, "end_line": 58, "span_ids": ["test_expand_tuple"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_expand_tuple():\n assert expand_tuple((2, 4), 2) == (1, 1, 2, 2)\n assert expand_tuple((2, 4), 3) == (1, 1, 1, 1, 2)\n assert expand_tuple((3, 4), 2) == (1, 2, 2, 2)\n assert expand_tuple((7, 4), 3) == (2, 2, 3, 1, 1, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_contract_tuple_test_contract_tuple.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_contract_tuple_test_contract_tuple.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 65, "span_ids": ["test_contract_tuple"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_contract_tuple():\n assert contract_tuple((1, 1, 2, 3, 1), 2) == (2, 2, 2, 2)\n assert contract_tuple((1, 1, 2, 5, 1), 2) == (2, 2, 4, 2)\n assert contract_tuple((2, 4), 2) == (2, 4)\n assert contract_tuple((2, 4), 3) == (6,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_array_return_type_test_atleast_nd_no_args.assert_np_r_n_da_r_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_array_return_type_test_atleast_nd_no_args.assert_np_r_n_da_r_n", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 26, "end_line": 47, "span_ids": ["test_array_return_type", "test_atleast_nd_no_args", "test_derived_docstrings"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_return_type():\n # Regression test for https://github.com/dask/dask/issues/5426\n x = [0, 1, 2, 3]\n dx = da.array(x)\n assert isinstance(dx, da.Array)\n assert_eq(x, dx)\n\n\ndef test_derived_docstrings():\n assert \"This docstring was copied from numpy.array\" in da.routines.array.__doc__\n assert \"Create an array.\" in da.routines.array.__doc__\n\n\n@pytest.mark.parametrize(\"funcname\", [\"atleast_1d\", \"atleast_2d\", \"atleast_3d\"])\ndef test_atleast_nd_no_args(funcname):\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n\n np_r_n = np_func()\n da_r_n = da_func()\n\n assert np_r_n == da_r_n", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_one_arg_test_atleast_nd_one_arg.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_one_arg_test_atleast_nd_one_arg.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 50, "end_line": 71, "span_ids": ["test_atleast_nd_one_arg"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", [\"atleast_1d\", \"atleast_2d\", \"atleast_3d\"])\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [\n (tuple(), tuple()),\n ((4,), (2,)),\n ((4, 6), (2, 3)),\n ((4, 6, 8), (2, 3, 4)),\n ((4, 6, 8, 10), (2, 3, 4, 5)),\n ],\n)\ndef test_atleast_nd_one_arg(funcname, shape, chunks):\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=chunks)\n\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n\n np_r = np_func(np_a)\n da_r = da_func(da_a)\n\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_two_args_test_atleast_nd_two_args.for_np_r_da_r_in_zip_np_.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_atleast_nd_two_args_test_atleast_nd_two_args.for_np_r_da_r_in_zip_np_.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 74, "end_line": 104, "span_ids": ["test_atleast_nd_two_args"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", [\"atleast_1d\", \"atleast_2d\", \"atleast_3d\"])\n@pytest.mark.parametrize(\n \"shape1, shape2\",\n list(\n itertools.combinations_with_replacement(\n [tuple(), (4,), (4, 6), (4, 6, 8), (4, 6, 8, 10)], 2\n )\n ),\n)\ndef test_atleast_nd_two_args(funcname, shape1, shape2):\n np_a_1 = np.random.random(shape1)\n da_a_1 = da.from_array(np_a_1, chunks=tuple(c // 2 for c in shape1))\n\n np_a_2 = np.random.random(shape2)\n da_a_2 = da.from_array(np_a_2, chunks=tuple(c // 2 for c in shape2))\n\n np_a_n = [np_a_1, np_a_2]\n da_a_n = [da_a_1, da_a_2]\n\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n\n np_r_n = np_func(*np_a_n)\n da_r_n = da_func(*da_a_n)\n\n assert type(np_r_n) is type(da_r_n)\n\n assert len(np_r_n) == len(da_r_n)\n\n for np_r, da_r in zip(np_r_n, da_r_n):\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_test_transpose.None_1.d_transpose_1_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_test_transpose.None_1.d_transpose_1_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 107, "end_line": 121, "span_ids": ["test_transpose"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_transpose():\n x = np.arange(240).reshape((4, 6, 10))\n d = da.from_array(x, (2, 3, 4))\n\n assert_eq(d.transpose((2, 0, 1)), x.transpose((2, 0, 1)))\n assert same_keys(d.transpose((2, 0, 1)), d.transpose((2, 0, 1)))\n\n assert_eq(d.transpose(2, 0, 1), x.transpose(2, 0, 1))\n assert same_keys(d.transpose(2, 0, 1), d.transpose(2, 0, 1))\n\n with pytest.raises(ValueError):\n d.transpose(1, 2)\n\n with pytest.raises(ValueError):\n d.transpose((1, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_negative_axes_test_transpose_skip_when_possible.assert_x_transpose_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_transpose_negative_axes_test_transpose_skip_when_possible.assert_x_transpose_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 124, "end_line": 134, "span_ids": ["test_transpose_negative_axes", "test_transpose_skip_when_possible"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_transpose_negative_axes():\n x = np.ones((2, 3, 4, 5))\n y = da.ones((2, 3, 4, 5), chunks=3)\n\n assert_eq(x.transpose([-1, -2, 0, 1]), y.transpose([-1, -2, 0, 1]))\n\n\ndef test_transpose_skip_when_possible():\n x = da.ones((2, 3, 4), chunks=3)\n assert x.transpose((0, 1, 2)) is x\n assert x.transpose((-3, -2, -1)) is x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_swapaxes_test_swapaxes.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_swapaxes_test_swapaxes.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 150, "span_ids": ["test_swapaxes"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_swapaxes():\n x = np.random.normal(0, 10, size=(10, 12, 7))\n d = da.from_array(x, chunks=(4, 5, 2))\n\n assert_eq(np.swapaxes(x, 0, 1), da.swapaxes(d, 0, 1))\n assert_eq(np.swapaxes(x, 2, 1), da.swapaxes(d, 2, 1))\n assert_eq(x.swapaxes(2, 1), d.swapaxes(2, 1))\n assert_eq(x.swapaxes(0, 0), d.swapaxes(0, 0))\n assert_eq(x.swapaxes(1, 2), d.swapaxes(1, 2))\n assert_eq(x.swapaxes(0, -1), d.swapaxes(0, -1))\n assert_eq(x.swapaxes(-1, 1), d.swapaxes(-1, 1))\n\n assert d.swapaxes(0, 1).name == d.swapaxes(0, 1).name\n assert d.swapaxes(0, 1).name != d.swapaxes(1, 0).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_test_moveaxis_rollaxis.for_axis1_in_range_x_ndi.for_axis2_in_range_x_ndi.assert_eq_np_func_x_axis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_test_moveaxis_rollaxis.for_axis1_in_range_x_ndi.for_axis2_in_range_x_ndi.assert_eq_np_func_x_axis", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 153, "end_line": 163, "span_ids": ["test_moveaxis_rollaxis"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"funcname\", [\"moveaxis\", \"rollaxis\"])\n@pytest.mark.parametrize(\"shape\", [(), (5,), (3, 5, 7, 3)])\ndef test_moveaxis_rollaxis(funcname, shape):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=(len(shape) * (2,)))\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n for axis1 in range(-x.ndim, x.ndim):\n assert isinstance(da_func(d, 0, axis1), da.Array)\n for axis2 in range(-x.ndim, x.ndim):\n assert_eq(np_func(x, axis1, axis2), da_func(d, axis1, axis2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_keyword_test_moveaxis_rollaxis_numpy_api.assert_eq_result_np_roll": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_moveaxis_rollaxis_keyword_test_moveaxis_rollaxis_numpy_api.assert_eq_result_np_roll", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 166, "end_line": 185, "span_ids": ["test_moveaxis_rollaxis_keyword", "test_moveaxis_rollaxis_numpy_api"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_moveaxis_rollaxis_keyword():\n x = np.random.random((10, 12, 7))\n d = da.from_array(x, chunks=(4, 5, 2))\n assert_eq(\n np.moveaxis(x, destination=1, source=0), da.moveaxis(d, destination=1, source=0)\n )\n assert_eq(np.rollaxis(x, 2), da.rollaxis(d, 2))\n assert isinstance(da.rollaxis(d, 1), da.Array)\n assert_eq(np.rollaxis(x, start=1, axis=2), da.rollaxis(d, start=1, axis=2))\n\n\ndef test_moveaxis_rollaxis_numpy_api():\n a = da.random.random((4, 4, 4), chunks=2)\n result = np.moveaxis(a, 2, 0)\n assert isinstance(result, da.Array)\n assert_eq(result, np.moveaxis(a.compute(), 2, 0))\n\n result = np.rollaxis(a, 2, 0)\n assert isinstance(result, da.Array)\n assert_eq(result, np.rollaxis(a.compute(), 2, 0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_flip_test_flip.try_.else_.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_flip_test_flip.try_.else_.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 188, "end_line": 230, "span_ids": ["test_flip"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"funcname, kwargs\",\n [\n (\"flipud\", {}),\n (\"fliplr\", {}),\n (\"flip\", {}),\n (\"flip\", {\"axis\": 0}),\n (\"flip\", {\"axis\": 1}),\n (\"flip\", {\"axis\": 2}),\n (\"flip\", {\"axis\": -1}),\n (\"flip\", {\"axis\": (0, 2)}),\n ],\n)\n@pytest.mark.parametrize(\"shape\", [tuple(), (4,), (4, 6), (4, 6, 8), (4, 6, 8, 10)])\ndef test_flip(funcname, kwargs, shape):\n axis = kwargs.get(\"axis\")\n if axis is None:\n if funcname == \"flipud\":\n axis = (0,)\n elif funcname == \"fliplr\":\n axis = (1,)\n elif funcname == \"flip\":\n axis = range(len(shape))\n elif not isinstance(axis, tuple):\n axis = (axis,)\n\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=1)\n\n np_func = getattr(np, funcname)\n da_func = getattr(da, funcname)\n\n try:\n for ax in axis:\n range(np_a.ndim)[ax]\n except IndexError:\n with pytest.raises(ValueError):\n da_func(da_a, **kwargs)\n else:\n np_r = np_func(np_a, **kwargs)\n da_r = da_func(da_a, **kwargs)\n\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_matmul_test_matmul.for_d1_d2_in_itertools_p.if_x_ndim_0_or_y_ndim_.else_.assert_eq_expected_da_ma": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_matmul_test_matmul.for_d1_d2_in_itertools_p.if_x_ndim_0_or_y_ndim_.else_.assert_eq_expected_da_ma", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 224, "end_line": 282, "span_ids": ["test_matmul"], "tokens": 839}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"x_shape, y_shape, x_chunks, y_chunks\",\n [\n [(), (), (), ()],\n [(), (7,), (), ()],\n [(), (7, 11), (), ()],\n [(), (7, 11, 15), (), ()],\n [(), (7, 11, 15, 19), (), ()],\n [(7,), (), (), ()],\n [(7,), (7,), (), ()],\n [(11,), (11, 7), (), ()],\n [(15,), (7, 15, 11), (), ()],\n [(19,), (7, 11, 19, 15), (), ()],\n [(7, 11), (), (), ()],\n [(7, 11), (11,), (), ()],\n [(7, 11), (11, 7), (), ()],\n [(11, 15), (7, 15, 11), (), ()],\n [(15, 19), (7, 11, 19, 15), (), ()],\n [(7, 11, 15), (), (), ()],\n [(7, 11, 15), (15,), (), ()],\n [(7, 11, 15), (15, 7), (), ()],\n [(7, 11, 15), (7, 15, 11), (), ()],\n [(11, 15, 19), (7, 11, 19, 15), (), ()],\n [(7, 11, 15, 19), (), (), ()],\n [(7, 11, 15, 19), (19,), (), ()],\n [(7, 11, 15, 19), (19, 7), (), ()],\n [(7, 11, 15, 19), (11, 19, 13), (), ()],\n [(7, 11, 15, 19), (7, 11, 19, 15), (), ()],\n # These tests use explicitly special/disparate chunk sizes:\n [(), (7,), (), (5,)],\n [(), (7, 11, 15, 19), (), (1, 3, 5, 19)],\n [(7, 11), (11, 7), (1, 1), (1, 1)],\n [(7, 11), (11, 7), (3, 5), (4, 2)],\n [(7, 11), (11, 7), (7, 11), (11, 7)],\n [(11, 15, 19), (7, 11, 19, 15), (7, 7, 7), (3, 9, 9, 9)],\n [(3, 3, 20, 30), (3, 3, 30, 20), (1, 3, 2, 6), (1, 3, 5, 10)],\n ],\n)\ndef test_matmul(x_shape, y_shape, x_chunks, y_chunks):\n np.random.seed(3732)\n\n x = np.random.random(x_shape)[()]\n y = np.random.random(y_shape)[()]\n\n a = da.from_array(x, chunks=x_chunks or tuple((i // 2) for i in x.shape))\n b = da.from_array(y, chunks=y_chunks or tuple((i // 2) for i in y.shape))\n\n expected = None\n try:\n expected = np.matmul(x, y)\n except ValueError:\n pass\n\n for d1, d2 in itertools.product([a, x], [b, y]):\n if x.ndim == 0 or y.ndim == 0:\n with pytest.raises(ValueError):\n da.matmul(d1, d2)\n else:\n assert_eq(expected, da.matmul(d1, d2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_test_tensordot.with_pytest_warns_da_Perf.assert_not_same_keys_da_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_test_tensordot.with_pytest_warns_da_Perf.assert_not_same_keys_da_t", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 294, "span_ids": ["test_tensordot"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tensordot():\n x = np.arange(400).reshape((20, 20))\n a = da.from_array(x, chunks=(5, 4))\n y = np.arange(200).reshape((20, 10))\n b = da.from_array(y, chunks=(4, 5))\n\n for axes in [1, (1, 0)]:\n assert_eq(da.tensordot(a, b, axes=axes), np.tensordot(x, y, axes=axes))\n assert_eq(da.tensordot(x, b, axes=axes), np.tensordot(x, y, axes=axes))\n assert_eq(da.tensordot(a, y, axes=axes), np.tensordot(x, y, axes=axes))\n\n assert same_keys(da.tensordot(a, b, axes=(1, 0)), da.tensordot(a, b, axes=(1, 0)))\n\n # Increasing number of chunks warning\n with pytest.warns(da.PerformanceWarning):\n assert not same_keys(da.tensordot(a, b, axes=0), da.tensordot(a, b, axes=1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_2_test_tensordot_2.assert_eq_da_tensordot_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_2_test_tensordot_2.assert_eq_da_tensordot_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 297, "end_line": 304, "span_ids": ["test_tensordot_2"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"axes\", [0, 1, (0, 1), (1, 0), ((1, 0), (2, 1)), ((1, 2), (2, 0)), ((2, 0), (1, 2))]\n)\ndef test_tensordot_2(axes):\n x = np.arange(4 * 4 * 4).reshape((4, 4, 4))\n y = da.from_array(x, chunks=2)\n\n assert_eq(da.tensordot(y, y, axes=axes), np.tensordot(x, x, axes=axes))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_neq2_test_tensordot_double_contraction_neq2.assert_eq_da_tensordot_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_neq2_test_tensordot_double_contraction_neq2.assert_eq_da_tensordot_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 307, "end_line": 312, "span_ids": ["test_tensordot_double_contraction_neq2"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [\"auto\", (4, 6), (2, 3), (4, 3), (2, 6)])\ndef test_tensordot_double_contraction_neq2(chunks):\n # Regression test for https://github.com/dask/dask/issues/5472\n x = np.arange(24).reshape(4, 6)\n y = da.from_array(x, chunks=chunks)\n assert_eq(da.tensordot(y, y, axes=2), np.tensordot(x, x, axes=2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_ngt2_test_tensordot_double_contraction_ngt2.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_double_contraction_ngt2_test_tensordot_double_contraction_ngt2.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 315, "end_line": 329, "span_ids": ["test_tensordot_double_contraction_ngt2"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tensordot_double_contraction_ngt2():\n # Regression test for https://github.com/dask/dask/issues/5472\n x = np.arange(60.0).reshape(3, 4, 5)\n y = np.arange(60.0).reshape(4, 5, 3)\n u = da.from_array(x)\n v = da.from_array(y)\n\n assert_eq(da.tensordot(u, v, axes=2), np.tensordot(x, y, axes=2))\n\n x = np.arange(60.0).reshape(3, 4, 5)\n y = np.arange(60.0).reshape(4, 5, 3)\n u = da.from_array(x, chunks=3)\n v = da.from_array(y)\n\n assert_eq(da.tensordot(u, v, axes=2), np.tensordot(x, y, axes=2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_more_than_26_dims_test_dot_method.assert_eq_a_dot_b_x_dot": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tensordot_more_than_26_dims_test_dot_method.assert_eq_a_dot_b_x_dot", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 379, "end_line": 392, "span_ids": ["test_tensordot_more_than_26_dims", "test_dot_method"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tensordot_more_than_26_dims():\n ndim = 27\n x = np.broadcast_to(1, [2] * ndim)\n dx = da.from_array(x, chunks=-1)\n assert_eq(da.tensordot(dx, dx, ndim), np.array(2**ndim))\n\n\ndef test_dot_method():\n x = np.arange(400).reshape((20, 20))\n a = da.from_array(x, chunks=(5, 5))\n y = np.arange(200).reshape((20, 10))\n b = da.from_array(y, chunks=(5, 5))\n\n assert_eq(a.dot(b), x.dot(y))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vdot_test_vdot.assert_eq_da_vdot_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vdot_test_vdot.assert_eq_da_vdot_a_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 348, "end_line": 363, "span_ids": ["test_vdot"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape, chunks\", [((20,), (6,)), ((4, 5), (2, 3))])\ndef test_vdot(shape, chunks):\n np.random.seed(1337)\n\n x = 2 * np.random.random((2,) + shape) - 1\n x = x[0] + 1j * x[1]\n\n y = 2 * np.random.random((2,) + shape) - 1\n y = y[0] + 1j * y[1]\n\n a = da.from_array(x, chunks=chunks)\n b = da.from_array(y, chunks=chunks)\n\n assert_eq(np.vdot(x, y), da.vdot(a, b))\n assert_eq(np.vdot(y, x), da.vdot(b, a))\n assert_eq(da.vdot(a, b), da.vdot(b, a).conj())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_outer_test_outer.assert_eq_np_outer_y_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_outer_test_outer.assert_eq_np_outer_y_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 366, "end_line": 377, "span_ids": ["test_outer"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape1, shape2\", [((20,), (6,)), ((4, 5), (2, 3))])\ndef test_outer(shape1, shape2):\n np.random.seed(1337)\n\n x = 2 * np.random.random(shape1) - 1\n y = 2 * np.random.random(shape2) - 1\n\n a = da.from_array(x, chunks=3)\n b = da.from_array(y, chunks=3)\n\n assert_eq(np.outer(x, y), da.outer(a, b))\n assert_eq(np.outer(y, x), da.outer(b, a))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_along_axis_test_apply_along_axis.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_along_axis_test_apply_along_axis.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 380, "end_line": 412, "span_ids": ["test_apply_along_axis"], "tokens": 336}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func1d_name, func1d, specify_output_props\",\n [\n [\"ndim\", lambda x: x.ndim, False],\n [\"sum\", lambda x: x.sum(), False],\n [\"range\", lambda x: [x.min(), x.max()], False],\n [\"range2\", lambda x: [[x.min(), x.max()], [x.max(), x.min()]], False],\n [\"cumsum\", lambda x: np.cumsum(x), True],\n ],\n)\n@pytest.mark.parametrize(\n \"input_shape, axis\",\n [[(10, 15, 20), 0], [(10, 15, 20), 1], [(10, 15, 20), 2], [(10, 15, 20), -1]],\n)\ndef test_apply_along_axis(func1d_name, func1d, specify_output_props, input_shape, axis):\n a = np.random.randint(0, 10, input_shape)\n d = da.from_array(a, chunks=(len(input_shape) * (5,)))\n\n output_shape = None\n output_dtype = None\n\n if specify_output_props:\n slices = [0] * a.ndim\n slices[axis] = slice(None)\n slices = tuple(slices)\n sample = np.array(func1d(a[slices]))\n output_shape = sample.shape\n output_dtype = sample.dtype\n\n assert_eq(\n da.apply_along_axis(func1d, axis, d, dtype=output_dtype, shape=output_shape),\n np.apply_along_axis(func1d, axis, a),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_over_axes_test_apply_over_axes.assert_eq_da_apply_over_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_apply_over_axes_test_apply_over_axes.assert_eq_da_apply_over_a", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 415, "end_line": 443, "span_ids": ["test_apply_over_axes"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func_name, func\",\n [\n [\"sum0\", lambda x, axis: x.sum(axis=axis)],\n [\"sum1\", lambda x, axis: x.sum(axis=axis, keepdims=True)],\n [\n \"range\",\n lambda x, axis: np.concatenate(\n [x.min(axis=axis, keepdims=True), x.max(axis=axis, keepdims=True)],\n axis=axis,\n ),\n ],\n ],\n)\n@pytest.mark.parametrize(\n \"shape, axes\",\n [\n [(10, 15, 20), tuple()],\n [(10, 15, 20), 0],\n [(10, 15, 20), (1,)],\n [(10, 15, 20), (-1, 1)],\n [(10, 15, 20), (2, 0, 1)],\n ],\n)\ndef test_apply_over_axes(func_name, func, shape, axes):\n a = np.random.randint(0, 10, shape)\n d = da.from_array(a, chunks=(len(shape) * (5,)))\n\n assert_eq(da.apply_over_axes(func, d, axes), np.apply_over_axes(func, a, axes))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ptp_test_ptp.assert_eq_da_ptp_d_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ptp_test_ptp.assert_eq_da_ptp_d_axis_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 446, "end_line": 460, "span_ids": ["test_ptp"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, axis\",\n [\n [(10, 15, 20), None],\n [(10, 15, 20), 0],\n [(10, 15, 20), 1],\n [(10, 15, 20), 2],\n [(10, 15, 20), -1],\n ],\n)\ndef test_ptp(shape, axis):\n a = np.random.randint(0, 10, shape)\n d = da.from_array(a, chunks=(len(shape) * (5,)))\n\n assert_eq(da.ptp(d, axis), np.ptp(a, axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_test_diff.assert_eq_da_diff_a_n_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_test_diff.assert_eq_da_diff_a_n_a", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 463, "end_line": 472, "span_ids": ["test_diff"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, axis\",\n [[(10, 15, 20), 0], [(10, 15, 20), 1], [(10, 15, 20), 2], [(10, 15, 20), -1]],\n)\n@pytest.mark.parametrize(\"n\", [0, 1, 2])\ndef test_diff(shape, n, axis):\n x = np.random.randint(0, 10, shape)\n a = da.from_array(x, chunks=(len(shape) * (5,)))\n\n assert_eq(da.diff(a, n, axis), np.diff(x, n, axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_gradient_test_gradient.if_isinstance_axis_Numbe.else_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_gradient_test_gradient.if_isinstance_axis_Numbe.else_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 484, "end_line": 518, "span_ids": ["test_gradient"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, varargs, axis\",\n [\n [(10, 15, 20), (), None],\n [(10, 15, 20), (2,), None],\n [(10, 15, 20), (1.0, 1.5, 2.0), None],\n [(10, 15, 20), (), 0],\n [(10, 15, 20), (), 1],\n [(10, 15, 20), (), 2],\n [(10, 15, 20), (), -1],\n [(10, 15, 20), (), (0, 2)],\n [(10, 15, 20), (np.exp(np.arange(10)), np.exp(np.arange(20))), (0, 2)],\n [(10, 15, 20), (0.5, np.exp(np.arange(20))), (0, 2)],\n [(10, 15, 20), (np.exp(np.arange(20)),), -1],\n ],\n)\n@pytest.mark.parametrize(\"edge_order\", [1, 2])\ndef test_gradient(shape, varargs, axis, edge_order):\n a = np.random.randint(0, 10, shape)\n d_a = da.from_array(a, chunks=(len(shape) * (5,)))\n\n r_a = np.gradient(a, *varargs, axis=axis, edge_order=edge_order)\n r_d_a = da.gradient(d_a, *varargs, axis=axis, edge_order=edge_order)\n\n if isinstance(axis, Number):\n assert_eq(r_d_a, r_a)\n else:\n assert len(r_d_a) == len(r_a)\n\n for e_r_d_a, e_r_a in zip(r_d_a, r_a):\n assert_eq(e_r_d_a, e_r_a)\n\n assert_eq(\n da.sqrt(sum(map(da.square, r_d_a))), np.sqrt(sum(map(np.square, r_a)))\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_with_weights_test_bincount_unspecified_minlength._shape_is_nan_so_must": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_with_weights_test_bincount_unspecified_minlength._shape_is_nan_so_must", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 538, "end_line": 561, "span_ids": ["test_bincount_with_weights", "test_bincount_unspecified_minlength"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"weights\",\n [\n np.array([1, 2, 1, 0.5, 1], dtype=np.float32),\n np.array([1, 2, 1, 0, 1], dtype=np.int32),\n ],\n)\ndef test_bincount_with_weights(weights):\n x = np.array([2, 1, 5, 2, 1])\n d = da.from_array(x, chunks=2)\n\n dweights = da.from_array(weights, chunks=2)\n e = da.bincount(d, weights=dweights, minlength=6)\n assert_eq(e, np.bincount(x, weights=dweights.compute(), minlength=6))\n assert same_keys(da.bincount(d, weights=dweights, minlength=6), e)\n\n\ndef test_bincount_unspecified_minlength():\n x = np.array([1, 1, 3, 7, 0])\n d = da.from_array(x, chunks=2)\n e = da.bincount(d)\n assert_eq(e, np.bincount(x))\n assert same_keys(da.bincount(d), e)\n assert len(e.compute()) == 8 # shape is (nan,) so must compute for len()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_digitize_test_digitize.for_chunks_in_10_10_.for_right_in_False_True.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_digitize_test_digitize.for_chunks_in_10_10_.for_right_in_False_True.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 552, "end_line": 570, "span_ids": ["test_digitize"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_digitize():\n x = np.array([2, 4, 5, 6, 1])\n bins = np.array([1, 2, 3, 4, 5])\n for chunks in [2, 4]:\n for right in [False, True]:\n d = da.from_array(x, chunks=chunks)\n assert_eq(\n da.digitize(d, bins, right=right), np.digitize(x, bins, right=right)\n )\n\n x = np.random.random(size=(100, 100))\n bins = np.random.random(size=13)\n bins.sort()\n for chunks in [(10, 10), (10, 20), (13, 17), (87, 54)]:\n for right in [False, True]:\n d = da.from_array(x, chunks=chunks)\n assert_eq(\n da.digitize(d, bins, right=right), np.digitize(x, bins, right=right)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_alternative_bins_range_test_histogram_return_type.assert_eq_da_histogram_v_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_alternative_bins_range_test_histogram_return_type.assert_eq_da_histogram_v_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 737, "end_line": 759, "span_ids": ["test_histogram_return_type", "test_histogram_bins_range_with_nan_array", "test_histogram_alternative_bins_range"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogram_alternative_bins_range():\n v = da.random.random(100, chunks=10)\n (a1, b1) = da.histogram(v, bins=10, range=(0, 1))\n (a2, b2) = np.histogram(v, bins=10, range=(0, 1))\n assert_eq(a1, a2)\n assert_eq(b1, b2)\n\n\ndef test_histogram_bins_range_with_nan_array():\n # Regression test for issue #3977\n v = da.from_array(np.array([-2, np.nan, 2]), chunks=1)\n (a1, b1) = da.histogram(v, bins=10, range=(-3, 3))\n (a2, b2) = np.histogram(v, bins=10, range=(-3, 3))\n assert_eq(a1, a2)\n assert_eq(b1, b2)\n\n\ndef test_histogram_return_type():\n v = da.random.random(100, chunks=10)\n bins = np.arange(0, 1.01, 0.01)\n # Check if return type is same as hist\n bins = np.arange(0, 11, 1, dtype=\"i4\")\n assert_eq(da.histogram(v * 10, bins=bins)[0], np.histogram(v * 10, bins=bins)[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_extra_args_and_shapes_test_histogram_extra_args_and_shapes.for_v_bins_w_in_data_.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_extra_args_and_shapes_test_histogram_extra_args_and_shapes.for_v_bins_w_in_data_.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 614, "end_line": 639, "span_ids": ["test_histogram_extra_args_and_shapes"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogram_extra_args_and_shapes():\n # Check for extra args and shapes\n bins = np.arange(0, 1.01, 0.01)\n v = da.random.random(100, chunks=10)\n data = [\n (v, bins, da.ones(100, chunks=v.chunks) * 5),\n (da.random.random((50, 50), chunks=10), bins, da.ones((50, 50), chunks=10) * 5),\n ]\n\n for v, bins, w in data:\n # density\n assert_eq(\n da.histogram(v, bins=bins, density=True)[0],\n np.histogram(v, bins=bins, density=True)[0],\n )\n\n # weights\n assert_eq(\n da.histogram(v, bins=bins, weights=w)[0],\n np.histogram(v, bins=bins, weights=w)[0],\n )\n\n assert_eq(\n da.histogram(v, bins=bins, weights=w, density=True)[0],\n da.histogram(v, bins=bins, weights=w, density=True)[0],\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_normed_deprecation_test_histogram_bin_range_raises.assert_bins_in_err_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_normed_deprecation_test_histogram_bin_range_raises.assert_bins_in_err_msg_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 642, "end_line": 672, "span_ids": ["test_histogram_normed_deprecation", "test_histogram_bin_range_raises"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogram_normed_deprecation():\n x = da.arange(10)\n with pytest.raises(ValueError) as info:\n da.histogram(x, bins=[1, 2, 3], normed=True)\n\n assert \"density\" in str(info.value)\n assert \"deprecated\" in str(info.value).lower()\n\n\n@pytest.mark.parametrize(\n \"bins, hist_range\",\n [\n (None, None),\n (10, None),\n (10, 1),\n (None, (1, 10)),\n (10, [0, 1, 2]),\n (10, [0]),\n (10, np.array([[0, 1]])),\n (10, da.array([[0, 1]])),\n ([[0, 1, 2]], None),\n (np.array([[0, 1, 2]]), None),\n (da.array([[0, 1, 2]]), None),\n ],\n)\ndef test_histogram_bin_range_raises(bins, hist_range):\n data = da.random.random(10, chunks=2)\n with pytest.raises((ValueError, TypeError)) as info:\n da.histogram(data, bins=bins, range=hist_range)\n err_msg = str(info.value)\n assert \"bins\" in err_msg or \"range\" in err_msg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_range_test_histogram_delayed_range.assert_eq_bins_d_bins_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_range_test_histogram_delayed_range.assert_eq_bins_d_bins_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 675, "end_line": 708, "span_ids": ["test_histogram_delayed_range"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"density\", [True, False])\n@pytest.mark.parametrize(\"weighted\", [True, False])\n@pytest.mark.parametrize(\"non_delayed_i\", [None, 0, 1])\n@pytest.mark.parametrize(\"delay_n_bins\", [False, True])\ndef test_histogram_delayed_range(density, weighted, non_delayed_i, delay_n_bins):\n n = 100\n v = np.random.random(n)\n vd = da.from_array(v, chunks=10)\n\n if weighted:\n weights = np.random.random(n)\n weights_d = da.from_array(weights, chunks=vd.chunks)\n\n d_range = [vd.min(), vd.max()]\n if non_delayed_i is not None:\n d_range[non_delayed_i] = d_range[non_delayed_i].compute()\n hist_d, bins_d = da.histogram(\n vd,\n bins=da.array(n) if delay_n_bins and not density else n,\n range=d_range,\n density=density,\n weights=weights_d if weighted else None,\n )\n\n hist, bins = np.histogram(\n v,\n bins=n,\n range=[v.min(), v.max()],\n density=density,\n weights=weights if weighted else None,\n )\n\n assert_eq(hist_d, hist)\n assert_eq(bins_d, bins)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_corrcoef_test_round.assert_eq_d_round_2_da_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_corrcoef_test_round.assert_eq_d_round_2_da_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 775, "end_line": 797, "span_ids": ["test_round", "test_corrcoef"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_corrcoef():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n assert_eq(da.corrcoef(d), np.corrcoef(x))\n assert_eq(da.corrcoef(d, rowvar=0), np.corrcoef(x, rowvar=0))\n assert_eq(da.corrcoef(d, d), np.corrcoef(x, x))\n\n y = np.arange(8)\n e = da.from_array(y, chunks=(4,))\n\n assert_eq(da.corrcoef(d, e), np.corrcoef(x, y))\n assert_eq(da.corrcoef(e, d), np.corrcoef(y, x))\n\n\ndef test_round():\n x = np.random.random(10)\n d = da.from_array(x, chunks=4)\n\n for i in (0, 1, 4, 5):\n assert_eq(x.round(i), d.round(i))\n\n assert_eq(d.round(2), da.round(d, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_kwargs_test_unique_kwargs.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_kwargs_test_unique_kwargs.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 800, "end_line": 830, "span_ids": ["test_unique_kwargs"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"return_index\", [False, True])\n@pytest.mark.parametrize(\"return_inverse\", [False, True])\n@pytest.mark.parametrize(\"return_counts\", [False, True])\ndef test_unique_kwargs(return_index, return_inverse, return_counts):\n kwargs = dict(\n return_index=return_index,\n return_inverse=return_inverse,\n return_counts=return_counts,\n )\n\n a = np.array([1, 2, 4, 4, 5, 2])\n d = da.from_array(a, chunks=(3,))\n\n r_a = np.unique(a, **kwargs)\n r_d = da.unique(d, **kwargs)\n\n if not any([return_index, return_inverse, return_counts]):\n assert isinstance(r_a, np.ndarray)\n assert isinstance(r_d, da.Array)\n\n r_a = (r_a,)\n r_d = (r_d,)\n\n assert len(r_a) == len(r_d)\n\n if return_inverse:\n i = 1 + int(return_index)\n assert (d.size,) == r_d[i].shape\n\n for e_r_a, e_r_d in zip(r_a, r_d):\n assert_eq(e_r_d, e_r_a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_rand_test_unique_rand.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unique_rand_test_unique_rand.for_e_r_a_e_r_d_in_zip_r.assert_eq_e_r_d_e_r_a_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 833, "end_line": 855, "span_ids": ["test_unique_rand"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"seed\", [23, 796])\n@pytest.mark.parametrize(\"low, high\", [[0, 10]])\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [[(10,), (5,)], [(10,), (3,)], [(4, 5), (3, 2)], [(20, 20), (4, 5)]],\n)\ndef test_unique_rand(seed, low, high, shape, chunks):\n np.random.seed(seed)\n\n a = np.random.randint(low, high, size=shape)\n d = da.from_array(a, chunks=chunks)\n\n kwargs = dict(return_index=True, return_inverse=True, return_counts=True)\n\n r_a = np.unique(a, **kwargs)\n r_d = da.unique(d, **kwargs)\n\n assert len(r_a) == len(r_d)\n\n assert (d.size,) == r_d[2].shape\n\n for e_r_a, e_r_d in zip(r_a, r_d):\n assert_eq(e_r_d, e_r_a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_rand_test_isin_rand.assert_eq_r_a_r_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_rand_test_isin_rand.assert_eq_r_a_r_d_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1274, "end_line": 1300, "span_ids": ["test_isin_rand"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"seed\", [23, 796])\n@pytest.mark.parametrize(\"low, high\", [[0, 10]])\n@pytest.mark.parametrize(\n \"elements_shape, elements_chunks\",\n [[(10,), (5,)], [(10,), (3,)], [(4, 5), (3, 2)], [(20, 20), (4, 5)]],\n)\n@pytest.mark.parametrize(\n \"test_shape, test_chunks\",\n [[(10,), (5,)], [(10,), (3,)], [(4, 5), (3, 2)], [(20, 20), (4, 5)]],\n)\n@pytest.mark.parametrize(\"invert\", [True, False])\ndef test_isin_rand(\n seed, low, high, elements_shape, elements_chunks, test_shape, test_chunks, invert\n):\n rng = np.random.RandomState(seed)\n\n a1 = rng.randint(low, high, size=elements_shape)\n d1 = da.from_array(a1, chunks=elements_chunks)\n\n a2 = rng.randint(low, high, size=test_shape) - 5\n d2 = da.from_array(a2, chunks=test_chunks)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=da.PerformanceWarning)\n r_a = np.isin(a1, a2, invert=invert)\n r_d = da.isin(d1, d2, invert=invert)\n assert_eq(r_a, r_d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_assume_unique__maybe_len.try_.except_TypeError_.return.0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isin_assume_unique__maybe_len.try_.except_TypeError_.return.0", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 886, "end_line": 901, "span_ids": ["_maybe_len", "test_isin_assume_unique"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"assume_unique\", [True, False])\ndef test_isin_assume_unique(assume_unique):\n a1 = np.arange(10)\n d1 = da.from_array(a1, chunks=(5,))\n\n test_elements = np.arange(0, 10, 2)\n r_a = np.isin(a1, test_elements, assume_unique=assume_unique)\n r_d = da.isin(d1, test_elements, assume_unique=assume_unique)\n assert_eq(r_a, r_d)\n\n\ndef _maybe_len(l):\n try:\n return len(l)\n except TypeError:\n return 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_squeeze_test_squeeze.assert_d_s_chunks_exp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_squeeze_test_squeeze.assert_d_s_chunks_exp_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 986, "end_line": 1010, "span_ids": ["test_squeeze"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"is_func\", [True, False])\n@pytest.mark.parametrize(\"axis\", [None, 0, -1, (0, -1)])\ndef test_squeeze(is_func, axis):\n a = np.arange(10)[None, :, None, None]\n d = da.from_array(a, chunks=(1, 3, 1, 1))\n\n if is_func:\n a_s = np.squeeze(a, axis=axis)\n d_s = da.squeeze(d, axis=axis)\n else:\n a_s = a.squeeze(axis=axis)\n d_s = d.squeeze(axis=axis)\n\n assert_eq(d_s, a_s)\n assert same_keys(d_s, da.squeeze(d, axis=axis))\n\n if axis is None:\n axis = tuple(range(a.ndim))\n else:\n axis = axis if isinstance(axis, tuple) else (axis,)\n axis = tuple(i % a.ndim for i in axis)\n axis = tuple(i for i, c in enumerate(d.chunks) if i in axis and len(c) == 1)\n\n exp_d_s_chunks = tuple(c for i, c in enumerate(d.chunks) if i not in axis)\n assert d_s.chunks == exp_d_s_chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vstack_test_hstack.assert_eq_np_hstack_x_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_vstack_test_hstack.assert_eq_np_hstack_x_y", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1013, "end_line": 1030, "span_ids": ["test_vstack", "test_hstack"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_vstack():\n x = np.arange(5)\n y = np.ones(5)\n a = da.arange(5, chunks=2)\n b = da.ones(5, chunks=2)\n\n assert_eq(np.vstack((x, y)), da.vstack((a, b)))\n assert_eq(np.vstack((x, y[None, :])), da.vstack((a, b[None, :])))\n\n\ndef test_hstack():\n x = np.arange(5)\n y = np.ones(5)\n a = da.arange(5, chunks=2)\n b = da.ones(5, chunks=2)\n\n assert_eq(np.hstack((x[None, :], y[None, :])), da.hstack((a[None, :], b[None, :])))\n assert_eq(np.hstack((x, y)), da.hstack((a, b)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_dstack_test_dstack.assert_eq_np_dstack_x_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_dstack_test_dstack.assert_eq_np_dstack_x_y", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1033, "end_line": 1044, "span_ids": ["test_dstack"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dstack():\n x = np.arange(5)\n y = np.ones(5)\n a = da.arange(5, chunks=2)\n b = da.ones(5, chunks=2)\n\n assert_eq(\n np.dstack((x[None, None, :], y[None, None, :])),\n da.dstack((a[None, None, :], b[None, None, :])),\n )\n assert_eq(np.dstack((x[None, :], y[None, :])), da.dstack((a[None, :], b[None, :])))\n assert_eq(np.dstack((x, y)), da.dstack((a, b)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_stack_unknown_chunk_sizes_test_stack_unknown_chunk_sizes.assert_eq_np_stacked_dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_stack_unknown_chunk_sizes_test_stack_unknown_chunk_sizes.assert_eq_np_stacked_dsk", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1047, "end_line": 1065, "span_ids": ["test_stack_unknown_chunk_sizes"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"np_func,dsk_func,nan_chunk\",\n [(np.hstack, da.hstack, 0), (np.dstack, da.dstack, 1), (np.vstack, da.vstack, 2)],\n)\ndef test_stack_unknown_chunk_sizes(np_func, dsk_func, nan_chunk):\n shape = (100, 100, 100)\n x = da.ones(shape, chunks=(50, 50, 50))\n y = np.ones(shape)\n\n tmp = list(x._chunks)\n tmp[nan_chunk] = (np.nan,) * 2\n x._chunks = tuple(tmp)\n\n with pytest.raises(ValueError):\n dsk_func((x, x))\n\n np_stacked = np_func((y, y))\n dsk_stacked = dsk_func((x, x), allow_unknown_chunksizes=True)\n assert_eq(np_stacked, dsk_stacked)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_test_take.assert_same_keys_da_take_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_test_take.assert_same_keys_da_take_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1068, "end_line": 1078, "span_ids": ["test_take"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take():\n x = np.arange(400).reshape((20, 20))\n a = da.from_array(x, chunks=(5, 5))\n\n assert_eq(np.take(x, 3, axis=0), da.take(a, 3, axis=0))\n assert_eq(np.take(x, [3, 4, 5], axis=-1), da.take(a, [3, 4, 5], axis=-1))\n\n with pytest.raises(ValueError):\n da.take(a, 3, axis=2)\n\n assert same_keys(da.take(a, [3, 4, 5], axis=-1), da.take(a, [3, 4, 5], axis=-1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_dask_from_numpy_test_take_dask_from_numpy.assert_eq_z_np_array_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_take_dask_from_numpy_test_take_dask_from_numpy.assert_eq_z_np_array_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1081, "end_line": 1088, "span_ids": ["test_take_dask_from_numpy"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_dask_from_numpy():\n x = np.arange(5).astype(\"f8\")\n y = da.from_array(np.array([1, 2, 3, 3, 2, 1]), chunks=3)\n\n z = da.take(x * 2, y)\n\n assert z.chunks == y.chunks\n assert_eq(z, np.array([2.0, 4.0, 6.0, 6.0, 4.0, 2.0]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_compress_test_compress.None_2.da_compress_True_Fal": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_compress_test_compress.None_2.da_compress_True_Fal", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1091, "end_line": 1123, "span_ids": ["test_compress"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compress():\n x = np.arange(25).reshape((5, 5))\n a = da.from_array(x, chunks=(2, 2))\n\n c1 = np.array([True, False, True, False, True])\n c2 = np.array([True, False])\n c3 = [True, False]\n dc1 = da.from_array(c1, chunks=3)\n dc2 = da.from_array(c2, chunks=2)\n\n for c, dc in [(c1, c1), (c2, c2), (c3, c3), (c1, dc1), (c2, dc2), (c3, dc2)]:\n for axis in [None, 0, 1]:\n res = da.compress(dc, a, axis=axis)\n assert_eq(np.compress(c, x, axis=axis), res)\n if isinstance(dc, da.Array):\n # If condition is a dask array then we expect the shape of the\n # compressed array to be nan, because we won't know that until\n # the result is computed.\n axis = axis or 0\n assert np.isnan(res.shape[axis]).all()\n assert np.isnan(res.chunks[axis]).all()\n else:\n # If condition is a not a dask array then we expect the shape of the\n # compressed axis to be known, i.e., not nan.\n axis = axis or 0\n assert np.count_nonzero(dc) == res.shape[axis]\n assert not np.isnan(res.chunks[axis]).any()\n\n with pytest.raises(ValueError):\n da.compress([True, False], a, axis=100)\n\n with pytest.raises(ValueError):\n da.compress([[True], [False]], a, axis=100)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_extract_test_extract.for_c_dc_in_c1_c1_.if_isinstance_dc_da_Arra.assert_np_isnan_res_chunk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_extract_test_extract.for_c_dc_in_c1_c1_.if_isinstance_dc_da_Arra.assert_np_isnan_res_chunk", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1126, "end_line": 1141, "span_ids": ["test_extract"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_extract():\n x = np.arange(25).reshape((5, 5))\n a = da.from_array(x, chunks=(2, 2))\n\n c1 = np.array([True, False, True, False, True])\n c2 = np.array([[True, False], [True, False]])\n c3 = np.array([True, False])\n dc1 = da.from_array(c1, chunks=3)\n dc2 = da.from_array(c2, chunks=(2, 1))\n dc3 = da.from_array(c3, chunks=2)\n\n for c, dc in [(c1, c1), (c2, c2), (c3, c3), (c1, dc1), (c2, dc2), (c3, dc3)]:\n res = da.extract(dc, a)\n assert_eq(np.extract(c, x), res)\n if isinstance(dc, da.Array):\n assert np.isnan(res.chunks[0]).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isnull_test_isclose.assert_eq_da_isclose_a_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_isnull_test_isclose.assert_eq_da_isclose_a_b", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1448, "end_line": 1469, "span_ids": ["test_isnull_result_is_an_array", "test_isnull", "test_isclose"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_isnull():\n x = np.array([1, np.nan])\n a = da.from_array(x, chunks=(2,))\n with contextlib.suppress(ImportError):\n assert_eq(da.isnull(a), np.isnan(x))\n assert_eq(da.notnull(a), ~(np.isnan(x)))\n\n\ndef test_isnull_result_is_an_array():\n # regression test for https://github.com/dask/dask/issues/3822\n arr = da.from_array(np.arange(3, dtype=np.int64), chunks=-1)\n with contextlib.suppress(ImportError):\n result = da.isnull(arr[0]).compute()\n assert type(result) is np.ndarray\n\n\ndef test_isclose():\n x = np.array([0, np.nan, 1, 1.5])\n y = np.array([1e-9, np.nan, 1, 2])\n a = da.from_array(x, chunks=(2,))\n b = da.from_array(y, chunks=(2,))\n assert_eq(da.isclose(a, b, equal_nan=True), np.isclose(x, y, equal_nan=True))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_allclose_test_allclose.assert_eq_np_array_n_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_allclose_test_allclose.assert_eq_np_array_n_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1168, "end_line": 1178, "span_ids": ["test_allclose"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_allclose():\n n_a = np.array([0, np.nan, 1, 1.5])\n n_b = np.array([1e-9, np.nan, 1, 2])\n\n d_a = da.from_array(n_a, chunks=(2,))\n d_b = da.from_array(n_b, chunks=(2,))\n\n n_r = np.allclose(n_a, n_b, equal_nan=True)\n d_r = da.allclose(d_a, d_b, equal_nan=True)\n\n assert_eq(np.array(n_r)[()], d_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_choose_test_choose.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_choose_test_choose.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1181, "end_line": 1193, "span_ids": ["test_choose"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_choose():\n # test choose function\n x = np.random.randint(10, size=(15, 16))\n d = da.from_array(x, chunks=(4, 5))\n\n assert_eq(da.choose(d > 5, [0, d]), np.choose(x > 5, [0, x]))\n assert_eq(da.choose(d > 5, [-d, d]), np.choose(x > 5, [-x, x]))\n\n # test choose method\n index_dask = d > 5\n index_numpy = x > 5\n assert_eq(index_dask.choose([0, d]), index_numpy.choose([0, x]))\n assert_eq(index_dask.choose([-d, d]), index_numpy.choose([-x, x]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_test_piecewise.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_test_piecewise.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1196, "end_line": 1205, "span_ids": ["test_piecewise"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_piecewise():\n np.random.seed(1337)\n\n x = np.random.randint(10, size=(15, 16))\n d = da.from_array(x, chunks=(4, 5))\n\n assert_eq(\n np.piecewise(x, [x < 5, x >= 5], [lambda e, v, k: e + 1, 5], 1, k=2),\n da.piecewise(d, [d < 5, d >= 5], [lambda e, v, k: e + 1, 5], 1, k=2),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_otherwise_test_piecewise_otherwise.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_piecewise_otherwise_test_piecewise_otherwise.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1208, "end_line": 1229, "span_ids": ["test_piecewise_otherwise"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_piecewise_otherwise():\n np.random.seed(1337)\n\n x = np.random.randint(10, size=(15, 16))\n d = da.from_array(x, chunks=(4, 5))\n\n assert_eq(\n np.piecewise(\n x,\n [x > 5, x <= 2],\n [lambda e, v, k: e + 1, lambda e, v, k: v * e, lambda e, v, k: 0],\n 1,\n k=2,\n ),\n da.piecewise(\n d,\n [d > 5, d <= 2],\n [lambda e, v, k: e + 1, lambda e, v, k: v * e, lambda e, v, k: 0],\n 1,\n k=2,\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_argwhere_test_argwhere_str.assert_eq_d_nz_x_nz_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_argwhere_test_argwhere_str.assert_eq_d_nz_x_nz_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1232, "end_line": 1263, "span_ids": ["test_argwhere", "test_argwhere_str", "test_argwhere_obj"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_argwhere():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_nz = np.argwhere(x)\n d_nz = da.argwhere(d)\n\n assert_eq(d_nz, x_nz)\n\n\ndef test_argwhere_obj():\n x = np.random.randint(10, size=(15, 16)).astype(object)\n d = da.from_array(x, chunks=(4, 5))\n\n x_nz = np.argwhere(x)\n d_nz = da.argwhere(d)\n\n assert_eq(d_nz, x_nz)\n\n\ndef test_argwhere_str():\n # We may have behavior differences with NumPy for strings\n # with just spaces, depending on the version of NumPy.\n # https://github.com/numpy/numpy/issues/9875\n x = np.array(list(\"Hello world\"))\n d = da.from_array(x, chunks=(4,))\n\n x_nz = np.argwhere(x)\n d_nz = da.argwhere(d)\n\n assert_eq(d_nz, x_nz)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_test_where.for_c1_c2_in_.for_b1_b2_in_0_0_.assert_eq_w1_w2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_test_where.for_c1_c2_in_.for_b1_b2_in_0_0_.assert_eq_w1_w2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1266, "end_line": 1287, "span_ids": ["test_where"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where():\n x = np.random.randint(10, size=(15, 14))\n x[5, 5] = x[4, 4] = 0 # Ensure some false elements\n d = da.from_array(x, chunks=(4, 5))\n y = np.random.randint(10, size=15).astype(np.uint8)\n e = da.from_array(y, chunks=(4,))\n\n for c1, c2 in [\n (d > 5, x > 5),\n (d, x),\n (1, 1),\n (0, 0),\n (5, 5),\n (True, True),\n (np.True_, np.True_),\n (False, False),\n (np.False_, np.False_),\n ]:\n for b1, b2 in [(0, 0), (-e[:, None], -y[:, None]), (e[:14], y[:14])]:\n w1 = da.where(c1, d, b1)\n w2 = np.where(c2, x, b2)\n assert_eq(w1, w2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_scalar_dtype_test_where_scalar_dtype.assert_eq_w3_w4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_scalar_dtype_test_where_scalar_dtype.assert_eq_w3_w4_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1290, "end_line": 1302, "span_ids": ["test_where_scalar_dtype"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_scalar_dtype():\n x = np.int32(3)\n y1 = np.array([4, 5, 6], dtype=np.int16)\n c1 = np.array([1, 0, 1])\n y2 = da.from_array(y1, chunks=2)\n c2 = da.from_array(c1, chunks=2)\n w1 = np.where(c1, x, y1)\n w2 = da.where(c2, x, y2)\n assert_eq(w1, w2)\n # Test again for the bool optimization\n w3 = np.where(True, x, y1)\n w4 = da.where(True, x, y1)\n assert_eq(w3, w4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_bool_optimization_test_where_bool_optimization.for_c_in_True_False_np.assert_w1_is_ex_w1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_bool_optimization_test_where_bool_optimization.for_c_in_True_False_np.assert_w1_is_ex_w1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1305, "end_line": 1319, "span_ids": ["test_where_bool_optimization"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_bool_optimization():\n x = np.random.randint(10, size=(15, 16))\n d = da.from_array(x, chunks=(4, 5))\n y = np.random.randint(10, size=(15, 16))\n e = da.from_array(y, chunks=(4, 5))\n\n for c in [True, False, np.True_, np.False_, 1, 0]:\n w1 = da.where(c, d, e)\n w2 = np.where(c, x, y)\n\n assert_eq(w1, w2)\n\n ex_w1 = d if c else e\n\n assert w1 is ex_w1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_nonzero_test_where_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_w_.assert_eq_d_w_i_x_w_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_nonzero_test_where_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_w_.assert_eq_d_w_i_x_w_i_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1322, "end_line": 1334, "span_ids": ["test_where_nonzero"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_nonzero():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_w = np.where(x)\n d_w = da.where(d)\n\n assert isinstance(d_w, type(x_w))\n assert len(d_w) == len(x_w)\n\n for i in range(len(x_w)):\n assert_eq(d_w[i], x_w[i])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_incorrect_args_test_count_nonzero.for_shape_chunks_in_0_.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_where_incorrect_args_test_count_nonzero.for_shape_chunks_in_0_.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1337, "end_line": 1359, "span_ids": ["test_count_nonzero", "test_where_incorrect_args"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_incorrect_args():\n a = da.ones(5, chunks=3)\n\n for kwd in [\"x\", \"y\"]:\n kwargs = {kwd: a}\n try:\n da.where(a > 0, **kwargs)\n except ValueError as e:\n assert \"either both or neither of x and y should be given\" in str(e)\n\n\ndef test_count_nonzero():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_c = np.count_nonzero(x)\n d_c = da.count_nonzero(d)\n\n if d_c.shape == tuple():\n assert x_c == d_c.compute()\n else:\n assert_eq(x_c, d_c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_axis_test_count_nonzero_obj.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_axis_test_count_nonzero_obj.if_d_c_shape_tuple_.else_.assert_eq_x_c_d_c_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1362, "end_line": 1387, "span_ids": ["test_count_nonzero_axis", "test_count_nonzero_obj"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [None, 0, (1,), (0, 1)])\ndef test_count_nonzero_axis(axis):\n for shape, chunks in [((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_c = np.count_nonzero(x, axis)\n d_c = da.count_nonzero(d, axis)\n\n if d_c.shape == tuple():\n assert x_c == d_c.compute()\n else:\n assert_eq(x_c, d_c)\n\n\ndef test_count_nonzero_obj():\n x = np.random.randint(10, size=(15, 16)).astype(object)\n d = da.from_array(x, chunks=(4, 5))\n\n x_c = np.count_nonzero(x)\n d_c = da.count_nonzero(d)\n\n if d_c.shape == tuple():\n assert x_c == d_c.compute()\n else:\n assert_eq(x_c, d_c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_obj_axis_test_count_nonzero_obj_axis.if_d_c_shape_tuple_.else_.assert_eq_x_c_astype_np_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_obj_axis_test_count_nonzero_obj_axis.if_d_c_shape_tuple_.else_.assert_eq_x_c_astype_np_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1390, "end_line": 1406, "span_ids": ["test_count_nonzero_obj_axis"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [None, 0, (1,), (0, 1)])\ndef test_count_nonzero_obj_axis(axis):\n x = np.random.randint(10, size=(15, 16)).astype(object)\n d = da.from_array(x, chunks=(4, 5))\n\n x_c = np.count_nonzero(x, axis)\n d_c = da.count_nonzero(d, axis)\n\n if d_c.shape == tuple():\n assert x_c == d_c.compute()\n else:\n #######################################################\n # Workaround oddness with Windows and object arrays. #\n # #\n # xref: https://github.com/numpy/numpy/issues/9468 #\n #######################################################\n assert_eq(x_c.astype(np.intp), d_c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_str_test_flatnonzero.for_shape_chunks_in_0_.assert_eq_d_fnz_x_fnz_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_count_nonzero_str_test_flatnonzero.for_shape_chunks_in_0_.assert_eq_d_fnz_x_fnz_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1409, "end_line": 1430, "span_ids": ["test_count_nonzero_str", "test_flatnonzero"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_count_nonzero_str():\n # We may have behavior differences with NumPy for strings\n # with just spaces, depending on the version of NumPy.\n # https://github.com/numpy/numpy/issues/9875\n x = np.array(list(\"Hellow orld\"))\n d = da.from_array(x, chunks=(4,))\n\n x_c = np.count_nonzero(x)\n d_c = da.count_nonzero(d)\n\n assert x_c == d_c.compute()\n\n\ndef test_flatnonzero():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_fnz = np.flatnonzero(x)\n d_fnz = da.flatnonzero(d)\n\n assert_eq(d_fnz, x_fnz)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_test_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_nz_.assert_eq_d_nz_i_x_nz_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_test_nonzero.for_shape_chunks_in_0_.for_i_in_range_len_x_nz_.assert_eq_d_nz_i_x_nz_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1433, "end_line": 1445, "span_ids": ["test_nonzero"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nonzero():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_nz = np.nonzero(x)\n d_nz = da.nonzero(d)\n\n assert isinstance(d_nz, type(x_nz))\n assert len(d_nz) == len(x_nz)\n\n for i in range(len(x_nz)):\n assert_eq(d_nz[i], x_nz[i])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unravel_index_test_unravel_index.for_nindices_shape_orde.assert_eq_darr_vindex_d_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_unravel_index_test_unravel_index.for_nindices_shape_orde.assert_eq_darr_vindex_d_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1479, "end_line": 1503, "span_ids": ["test_unravel_index"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unravel_index():\n for nindices, shape, order in [\n (0, (15,), \"C\"),\n (1, (15,), \"C\"),\n (3, (15,), \"C\"),\n (3, (15,), \"F\"),\n (2, (15, 16), \"C\"),\n (2, (15, 16), \"F\"),\n ]:\n arr = np.random.random(shape)\n darr = da.from_array(arr, chunks=1)\n\n findices = np.random.randint(np.prod(shape, dtype=int), size=nindices)\n d_findices = da.from_array(findices, chunks=1)\n\n indices = np.unravel_index(findices, shape, order)\n d_indices = da.unravel_index(d_findices, shape, order)\n\n assert isinstance(d_indices, type(indices))\n assert len(d_indices) == len(indices)\n\n for i in range(len(indices)):\n assert_eq(d_indices[i], indices[i])\n\n assert_eq(darr.vindex[d_indices], arr[indices])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_test_coarsen.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_test_coarsen.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1506, "end_line": 1519, "span_ids": ["test_coarsen"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_coarsen():\n x = np.random.randint(10, size=(24, 24))\n d = da.from_array(x, chunks=(4, 8))\n\n assert_eq(\n da.chunk.coarsen(np.sum, x, {0: 2, 1: 4}), da.coarsen(np.sum, d, {0: 2, 1: 4})\n )\n assert_eq(\n da.chunk.coarsen(np.sum, x, {0: 2, 1: 4}), da.coarsen(da.sum, d, {0: 2, 1: 4})\n )\n assert_eq(\n da.chunk.coarsen(np.mean, x, {0: 2, 1: 4}, dtype=\"float32\"),\n da.coarsen(da.mean, d, {0: 2, 1: 4}, dtype=\"float32\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_insert_test_insert.None_2.da_insert_a_3_1_axi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_insert_test_insert.None_2.da_insert_a_3_1_axi", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 2144, "end_line": 2180, "span_ids": ["test_insert"], "tokens": 571}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_insert():\n x = np.random.randint(10, size=(10, 10))\n a = da.from_array(x, chunks=(5, 5))\n y = np.random.randint(10, size=(5, 10))\n b = da.from_array(y, chunks=(4, 4))\n\n assert_eq(np.insert(x, 0, -1, axis=0), da.insert(a, 0, -1, axis=0))\n assert_eq(np.insert(x, 3, -1, axis=-1), da.insert(a, 3, -1, axis=-1))\n assert_eq(np.insert(x, 5, -1, axis=1), da.insert(a, 5, -1, axis=1))\n assert_eq(np.insert(x, -1, -1, axis=-2), da.insert(a, -1, -1, axis=-2))\n assert_eq(np.insert(x, [2, 3, 3], -1, axis=1), da.insert(a, [2, 3, 3], -1, axis=1))\n assert_eq(\n np.insert(x, [2, 3, 8, 8, -2, -2], -1, axis=0),\n da.insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0),\n )\n assert_eq(\n np.insert(x, slice(1, 4), -1, axis=1), da.insert(a, slice(1, 4), -1, axis=1)\n )\n assert_eq(\n np.insert(x, [2] * 3 + [5] * 2, y, axis=0),\n da.insert(a, [2] * 3 + [5] * 2, b, axis=0),\n )\n assert_eq(np.insert(x, 0, y[0], axis=1), da.insert(a, 0, b[0], axis=1))\n\n assert same_keys(\n da.insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0),\n da.insert(a, [2, 3, 8, 8, -2, -2], -1, axis=0),\n )\n\n with pytest.raises(NotImplementedError):\n da.insert(a, [4, 2], -1, axis=0)\n\n with pytest.raises(np.AxisError):\n da.insert(a, [3], -1, axis=2)\n\n with pytest.raises(np.AxisError):\n da.insert(a, [3], -1, axis=-3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py__numpy_and_dask_inputs__numpy_and_dask_inputs.return.np_inputs_da_inputs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py__numpy_and_dask_inputs__numpy_and_dask_inputs.return.np_inputs_da_inputs", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1618, "end_line": 1657, "span_ids": ["_numpy_and_dask_inputs"], "tokens": 313}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _numpy_and_dask_inputs(input_sigs):\n # einsum label dimensions\n _dimensions = {\n \"a\": 5,\n \"b\": 6,\n \"c\": 7,\n \"d\": 5,\n \"e\": 6,\n \"f\": 10,\n \"g\": 1,\n \"h\": 2,\n \"*\": 11,\n }\n\n # dimension chunks sizes\n _chunks = {\n \"a\": (2, 3),\n \"b\": (2, 3, 1),\n \"c\": (2, 3, 2),\n \"d\": (4, 1),\n \"e\": (2, 4),\n \"f\": (1, 2, 3, 4),\n \"g\": 1,\n \"h\": (1, 1),\n \"*\": 11,\n }\n\n def _shape_from_string(s):\n return tuple(_dimensions[c] for c in s)\n\n def _chunks_from_string(s):\n return tuple(_chunks[c] for c in s)\n\n shapes = [_shape_from_string(s) for s in input_sigs]\n chunks = [_chunks_from_string(s) for s in input_sigs]\n\n np_inputs = [np.random.random(s) for s in shapes]\n da_inputs = [da.from_array(i, chunks=c) for i, c in zip(np_inputs, chunks)]\n\n return np_inputs, da_inputs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_optimize_test_einsum_optimize.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_optimize_test_einsum_optimize.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1706, "end_line": 1724, "span_ids": ["test_einsum_optimize"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"optimize_opts\", [(True, False), (\"greedy\", False), (\"optimal\", False)]\n)\ndef test_einsum_optimize(optimize_opts):\n sig = \"ea,fb,abcd,gc,hd->efgh\"\n input_sigs = sig.split(\"->\")[0].split(\",\")\n np_inputs, da_inputs = _numpy_and_dask_inputs(input_sigs)\n\n opt1, opt2 = optimize_opts\n\n assert_eq(\n np.einsum(sig, *np_inputs, optimize=opt1),\n da.einsum(sig, *np_inputs, optimize=opt2),\n )\n\n assert_eq(\n np.einsum(sig, *np_inputs, optimize=opt2),\n da.einsum(sig, *np_inputs, optimize=opt1),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_order_test_einsum_order.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_order_test_einsum_order.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1727, "end_line": 1735, "span_ids": ["test_einsum_order"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"order\", [\"C\", \"F\", \"A\", \"K\"])\ndef test_einsum_order(order):\n sig = \"ea,fb,abcd,gc,hd->efgh\"\n input_sigs = sig.split(\"->\")[0].split(\",\")\n np_inputs, da_inputs = _numpy_and_dask_inputs(input_sigs)\n\n assert_eq(\n np.einsum(sig, *np_inputs, order=order), da.einsum(sig, *np_inputs, order=order)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_casting_test_einsum_casting.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_casting_test_einsum_casting.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1738, "end_line": 1747, "span_ids": ["test_einsum_casting"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"casting\", [\"no\", \"equiv\", \"safe\", \"same_kind\", \"unsafe\"])\ndef test_einsum_casting(casting):\n sig = \"ea,fb,abcd,gc,hd->efgh\"\n input_sigs = sig.split(\"->\")[0].split(\",\")\n np_inputs, da_inputs = _numpy_and_dask_inputs(input_sigs)\n\n assert_eq(\n np.einsum(sig, *np_inputs, casting=casting),\n da.einsum(sig, *np_inputs, casting=casting),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_split_every_test_einsum_invalid_args.with_pytest_raises_TypeEr.da_einsum_a_da_inputs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_split_every_test_einsum_invalid_args.with_pytest_raises_TypeEr.da_einsum_a_da_inputs", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1750, "end_line": 1761, "span_ids": ["test_einsum_invalid_args", "test_einsum_split_every"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [None, 2])\ndef test_einsum_split_every(split_every):\n np_inputs, da_inputs = _numpy_and_dask_inputs(\"a\")\n assert_eq(\n np.einsum(\"a\", *np_inputs), da.einsum(\"a\", *da_inputs, split_every=split_every)\n )\n\n\ndef test_einsum_invalid_args():\n _, da_inputs = _numpy_and_dask_inputs(\"a\")\n with pytest.raises(TypeError):\n da.einsum(\"a\", *da_inputs, foo=1, bar=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction_test_einsum_broadcasting_contraction.assert_eq_np_res_mul_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction_test_einsum_broadcasting_contraction.assert_eq_np_res_mul_res", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1764, "end_line": 1784, "span_ids": ["test_einsum_broadcasting_contraction"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_einsum_broadcasting_contraction():\n a = np.random.rand(1, 5, 4)\n b = np.random.rand(4, 6)\n c = np.random.rand(5, 6)\n d = np.random.rand(10)\n\n d_a = da.from_array(a, chunks=(1, (2, 3), (2, 2)))\n d_b = da.from_array(b, chunks=((2, 2), (4, 2)))\n d_c = da.from_array(c, chunks=((2, 3), (4, 2)))\n d_d = da.from_array(d, chunks=((7, 3)))\n\n np_res = np.einsum(\"ijk,kl,jl\", a, b, c)\n da_res = da.einsum(\"ijk,kl,jl\", d_a, d_b, d_c)\n assert_eq(np_res, da_res)\n\n mul_res = da_res * d\n\n np_res = np.einsum(\"ijk,kl,jl,i->i\", a, b, c, d)\n da_res = da.einsum(\"ijk,kl,jl,i->i\", d_a, d_b, d_c, d_d)\n assert_eq(np_res, da_res)\n assert_eq(np_res, mul_res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction2_test_einsum_broadcasting_contraction2.assert_eq_np_res_mul_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction2_test_einsum_broadcasting_contraction2.assert_eq_np_res_mul_res", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1787, "end_line": 1807, "span_ids": ["test_einsum_broadcasting_contraction2"], "tokens": 296}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_einsum_broadcasting_contraction2():\n a = np.random.rand(1, 1, 5, 4)\n b = np.random.rand(4, 6)\n c = np.random.rand(5, 6)\n d = np.random.rand(7, 7)\n\n d_a = da.from_array(a, chunks=(1, 1, (2, 3), (2, 2)))\n d_b = da.from_array(b, chunks=((2, 2), (4, 2)))\n d_c = da.from_array(c, chunks=((2, 3), (4, 2)))\n d_d = da.from_array(d, chunks=((7, 3)))\n\n np_res = np.einsum(\"abjk,kl,jl\", a, b, c)\n da_res = da.einsum(\"abjk,kl,jl\", d_a, d_b, d_c)\n assert_eq(np_res, da_res)\n\n mul_res = da_res * d\n\n np_res = np.einsum(\"abjk,kl,jl,ab->ab\", a, b, c, d)\n da_res = da.einsum(\"abjk,kl,jl,ab->ab\", d_a, d_b, d_c, d_d)\n assert_eq(np_res, da_res)\n assert_eq(np_res, mul_res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction3_test_einsum_broadcasting_contraction3.assert_eq_np_res_da_res_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_broadcasting_contraction3_test_einsum_broadcasting_contraction3.assert_eq_np_res_da_res_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1810, "end_line": 1823, "span_ids": ["test_einsum_broadcasting_contraction3"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_einsum_broadcasting_contraction3():\n a = np.random.rand(1, 5, 4)\n b = np.random.rand(4, 1, 6)\n c = np.random.rand(5, 6)\n d = np.random.rand(7, 7)\n\n d_a = da.from_array(a, chunks=(1, (2, 3), (2, 2)))\n d_b = da.from_array(b, chunks=((2, 2), 1, (4, 2)))\n d_c = da.from_array(c, chunks=((2, 3), (4, 2)))\n d_d = da.from_array(d, chunks=((7, 3)))\n\n np_res = np.einsum(\"ajk,kbl,jl,ab->ab\", a, b, c, d)\n da_res = da.einsum(\"ajk,kbl,jl,ab->ab\", d_a, d_b, d_c, d_d)\n assert_eq(np_res, da_res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d_test_slice_1d._x_1_8_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d_test_slice_1d._x_1_8_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 38, "end_line": 109, "span_ids": ["test_slice_1d"], "tokens": 784}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_1d():\n expected = {0: slice(10, 25, 1), 1: slice(None, None, None), 2: slice(0, 1, 1)}\n result = _slice_1d(100, [25] * 4, slice(10, 51, None))\n assert expected == result\n\n # x[100:12:-3]\n expected = {\n 0: slice(-2, -8, -3),\n 1: slice(-1, -21, -3),\n 2: slice(-3, -21, -3),\n 3: slice(-2, -21, -3),\n 4: slice(-1, -21, -3),\n }\n result = _slice_1d(100, [20] * 5, slice(100, 12, -3))\n assert expected == result\n\n # x[102::-3]\n expected = {\n 0: slice(-2, -21, -3),\n 1: slice(-1, -21, -3),\n 2: slice(-3, -21, -3),\n 3: slice(-2, -21, -3),\n 4: slice(-1, -21, -3),\n }\n result = _slice_1d(100, [20] * 5, slice(102, None, -3))\n assert expected == result\n\n # x[::-4]\n expected = {\n 0: slice(-1, -21, -4),\n 1: slice(-1, -21, -4),\n 2: slice(-1, -21, -4),\n 3: slice(-1, -21, -4),\n 4: slice(-1, -21, -4),\n }\n result = _slice_1d(100, [20] * 5, slice(None, None, -4))\n assert expected == result\n\n # x[::-7]\n expected = {\n 0: slice(-5, -21, -7),\n 1: slice(-4, -21, -7),\n 2: slice(-3, -21, -7),\n 3: slice(-2, -21, -7),\n 4: slice(-1, -21, -7),\n }\n result = _slice_1d(100, [20] * 5, slice(None, None, -7))\n assert expected == result\n\n # x=range(115)\n # x[::-7]\n expected = {\n 0: slice(-7, -24, -7),\n 1: slice(-2, -24, -7),\n 2: slice(-4, -24, -7),\n 3: slice(-6, -24, -7),\n 4: slice(-1, -24, -7),\n }\n result = _slice_1d(115, [23] * 5, slice(None, None, -7))\n assert expected == result\n\n # x[79::-3]\n expected = {\n 0: slice(-1, -21, -3),\n 1: slice(-3, -21, -3),\n 2: slice(-2, -21, -3),\n 3: slice(-1, -21, -3),\n }\n result = _slice_1d(100, [20] * 5, slice(79, None, -3))\n assert expected == result\n\n # x[-1:-8:-1]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d.expected_14_test_slice_1d.None_14": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_1d.expected_14_test_slice_1d.None_14", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 110, "end_line": 179, "span_ids": ["test_slice_1d"], "tokens": 850}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_1d():\n # ... other code\n expected = {4: slice(-1, -8, -1)}\n result = _slice_1d(100, [20, 20, 20, 20, 20], slice(-1, 92, -1))\n assert expected == result\n\n # x[20:0:-1]\n expected = {0: slice(-1, -20, -1), 1: slice(-20, -21, -1)}\n result = _slice_1d(100, [20, 20, 20, 20, 20], slice(20, 0, -1))\n assert expected == result\n\n # x[:0]\n expected = {}\n result = _slice_1d(100, [20, 20, 20, 20, 20], slice(0))\n assert result\n\n # x=range(99)\n expected = {\n 0: slice(-3, -21, -3),\n 1: slice(-2, -21, -3),\n 2: slice(-1, -21, -3),\n 3: slice(-2, -20, -3),\n 4: slice(-1, -21, -3),\n }\n # This array has non-uniformly sized blocks\n result = _slice_1d(99, [20, 20, 20, 19, 20], slice(100, None, -3))\n assert expected == result\n\n # x=range(104)\n # x[::-3]\n expected = {\n 0: slice(-1, -21, -3),\n 1: slice(-3, -24, -3),\n 2: slice(-3, -28, -3),\n 3: slice(-1, -14, -3),\n 4: slice(-1, -22, -3),\n }\n # This array has non-uniformly sized blocks\n result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, None, -3))\n assert expected == result\n\n # x=range(104)\n # x[:27:-3]\n expected = {\n 1: slice(-3, -16, -3),\n 2: slice(-3, -28, -3),\n 3: slice(-1, -14, -3),\n 4: slice(-1, -22, -3),\n }\n # This array has non-uniformly sized blocks\n result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, 27, -3))\n assert expected == result\n\n # x=range(104)\n # x[100:27:-3]\n expected = {\n 1: slice(-3, -16, -3),\n 2: slice(-3, -28, -3),\n 3: slice(-1, -14, -3),\n 4: slice(-4, -22, -3),\n }\n # This array has non-uniformly sized blocks\n result = _slice_1d(104, [20, 23, 27, 13, 21], slice(100, 27, -3))\n assert expected == result\n\n # x=range(1000000000000)\n # x[1000:]\n expected = {0: slice(1000, 1000000000, 1)}\n expected.update({ii: slice(None, None, None) for ii in range(1, 1000)})\n # This array is large\n result = _slice_1d(1000000000000, [1000000000] * 1000, slice(1000, None, None))\n assert expected == result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_singleton_value_on_boundary_test_slice_array_1d.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_singleton_value_on_boundary_test_slice_array_1d.None_7", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 182, "end_line": 228, "span_ids": ["test_slice_array_1d", "test_slice_singleton_value_on_boundary"], "tokens": 688}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_singleton_value_on_boundary():\n assert _slice_1d(15, [5, 5, 5], 10) == {2: 0}\n assert _slice_1d(30, (5, 5, 5, 5, 5, 5), 10) == {2: 0}\n\n\ndef test_slice_array_1d():\n # x[24::2]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0), (slice(24, 25, 2),)),\n (\"y\", 1): (getitem, (\"x\", 1), (slice(1, 25, 2),)),\n (\"y\", 2): (getitem, (\"x\", 2), (slice(0, 25, 2),)),\n (\"y\", 3): (getitem, (\"x\", 3), (slice(1, 25, 2),)),\n }\n result, chunks = slice_array(\"y\", \"x\", [[25] * 4], [slice(24, None, 2)], 8)\n\n assert expected == result\n\n # x[26::2]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 1), (slice(1, 25, 2),)),\n (\"y\", 1): (getitem, (\"x\", 2), (slice(0, 25, 2),)),\n (\"y\", 2): (getitem, (\"x\", 3), (slice(1, 25, 2),)),\n }\n\n result, chunks = slice_array(\"y\", \"x\", [[25] * 4], [slice(26, None, 2)], 8)\n assert expected == result\n\n # x[24::2]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0), (slice(24, 25, 2),)),\n (\"y\", 1): (getitem, (\"x\", 1), (slice(1, 25, 2),)),\n (\"y\", 2): (getitem, (\"x\", 2), (slice(0, 25, 2),)),\n (\"y\", 3): (getitem, (\"x\", 3), (slice(1, 25, 2),)),\n }\n result, chunks = slice_array(\"y\", \"x\", [(25,) * 4], (slice(24, None, 2),), 8)\n\n assert expected == result\n\n # x[26::2]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 1), (slice(1, 25, 2),)),\n (\"y\", 1): (getitem, (\"x\", 2), (slice(0, 25, 2),)),\n (\"y\", 2): (getitem, (\"x\", 3), (slice(1, 25, 2),)),\n }\n\n result, chunks = slice_array(\"y\", \"x\", [(25,) * 4], (slice(26, None, 2),), 8)\n assert expected == result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_array_2d_test_slice_array_2d.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_array_2d_test_slice_array_2d.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 231, "end_line": 268, "span_ids": ["test_slice_array_2d"], "tokens": 392}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_array_2d():\n # 2d slices: x[13::2,10::1]\n expected = {\n (\"y\", 0, 0): (getitem, (\"x\", 0, 0), (slice(13, 20, 2), slice(10, 20, 1))),\n (\"y\", 0, 1): (\n getitem,\n (\"x\", 0, 1),\n (slice(13, 20, 2), slice(None, None, None)),\n ),\n (\"y\", 0, 2): (\n getitem,\n (\"x\", 0, 2),\n (slice(13, 20, 2), slice(None, None, None)),\n ),\n }\n\n result, chunks = slice_array(\n \"y\",\n \"x\",\n [[20], [20, 20, 5]],\n [slice(13, None, 2), slice(10, None, 1)],\n itemsize=8,\n )\n\n assert expected == result\n\n # 2d slices with one dimension: x[5,10::1]\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0, 0), (5, slice(10, 20, 1))),\n (\"y\", 1): (getitem, (\"x\", 0, 1), (5, slice(None, None, None))),\n (\"y\", 2): (getitem, (\"x\", 0, 2), (5, slice(None, None, None))),\n }\n\n result, chunks = slice_array(\n \"y\", \"x\", ([20], [20, 20, 5]), [5, slice(10, None, 1)], 8\n )\n\n assert expected == result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_optimizations_test_slicing_with_singleton_indices.assert_expected_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_optimizations_test_slicing_with_singleton_indices.assert_expected_result", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 271, "end_line": 296, "span_ids": ["test_slice_optimizations", "test_slicing_with_singleton_indices"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_optimizations():\n # bar[:]\n expected = {(\"foo\", 0): (\"bar\", 0)}\n result, chunks = slice_array(\"foo\", \"bar\", [[100]], (slice(None, None, None),), 8)\n assert expected == result\n\n # bar[:,:,:]\n expected = {(\"foo\", 0): (\"bar\", 0), (\"foo\", 1): (\"bar\", 1), (\"foo\", 2): (\"bar\", 2)}\n result, chunks = slice_array(\n \"foo\",\n \"bar\",\n [(100, 1000, 10000)],\n (slice(None, None, None), slice(None, None, None), slice(None, None, None)),\n itemsize=8,\n )\n assert expected == result\n\n\ndef test_slicing_with_singleton_indices():\n result, chunks = slice_array(\n \"y\", \"x\", ([5, 5], [5, 5]), (slice(0, 5), 8), itemsize=8\n )\n\n expected = {(\"y\", 0): (getitem, (\"x\", 0, 1), (slice(None, None, None), 3))}\n\n assert expected == result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_newaxis_test_slicing_with_newaxis.assert_chunks_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_newaxis_test_slicing_with_newaxis.assert_chunks_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 299, "end_line": 322, "span_ids": ["test_slicing_with_newaxis"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_with_newaxis():\n result, chunks = slice_array(\n \"y\",\n \"x\",\n ([5, 5], [5, 5]),\n (slice(0, 3), None, slice(None, None, None)),\n itemsize=8,\n )\n\n expected = {\n (\"y\", 0, 0, 0): (\n getitem,\n (\"x\", 0, 0),\n (slice(0, 3, 1), None, slice(None, None, None)),\n ),\n (\"y\", 0, 0, 1): (\n getitem,\n (\"x\", 0, 1),\n (slice(0, 3, 1), None, slice(None, None, None)),\n ),\n }\n\n assert expected == result\n assert chunks == ((3,), (1,), (5, 5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_test_take.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_test_take.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 325, "end_line": 355, "span_ids": ["test_take"], "tokens": 479}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take():\n chunks, dsk = take(\"y\", \"x\", [(20, 20, 20, 20)], [5, 1, 47, 3], itemsize=8, axis=0)\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0), (np.array([5, 1]),)),\n (\"y\", 1): (getitem, (\"x\", 2), (np.array([7]),)),\n (\"y\", 2): (getitem, (\"x\", 0), (np.array([3]),)),\n }\n np.testing.assert_equal(sorted(dsk.items()), sorted(expected.items()))\n assert chunks == ((2, 1, 1),)\n\n chunks, dsk = take(\n \"y\", \"x\", [(20, 20, 20, 20), (20, 20)], [5, 1, 47, 3], itemsize=8, axis=0\n )\n expected = {\n (\"y\", 0, 0): (\n getitem,\n (\"x\", 0, 0),\n (np.array([5, 1]), slice(None, None, None)),\n ),\n (\"y\", 0, 1): (\n getitem,\n (\"x\", 0, 1),\n (np.array([5, 1]), slice(None, None, None)),\n ),\n (\"y\", 1, 0): (getitem, (\"x\", 2, 0), (np.array([7]), slice(None, None, None))),\n (\"y\", 1, 1): (getitem, (\"x\", 2, 1), (np.array([7]), slice(None, None, None))),\n (\"y\", 2, 0): (getitem, (\"x\", 0, 0), (np.array([3]), slice(None, None, None))),\n (\"y\", 2, 1): (getitem, (\"x\", 0, 1), (np.array([3]), slice(None, None, None))),\n }\n np.testing.assert_equal(sorted(dsk.items()), sorted(expected.items()))\n assert chunks == ((2, 1, 1), (20, 20))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_sorted_test_take_sorted.assert_chunks_20_20": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_sorted_test_take_sorted.assert_chunks_20_20", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 349, "end_line": 372, "span_ids": ["test_take_sorted"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_sorted():\n chunks, dsk = take(\"y\", \"x\", [(20, 20, 20, 20)], [1, 3, 5, 47], itemsize=8, axis=0)\n expected = {\n (\"y\", 0): (getitem, (\"x\", 0), ([1, 3, 5],)),\n (\"y\", 1): (getitem, (\"x\", 2), ([7],)),\n }\n np.testing.assert_equal(dsk, expected)\n assert chunks == ((3, 1),)\n\n chunks, dsk = take(\n \"y\", \"x\", [(20, 20, 20, 20), (20, 20)], [1, 3, 5, 37], itemsize=8, axis=1\n )\n expected = merge(\n {\n (\"y\", i, 0): (getitem, (\"x\", i, 0), (slice(None, None, None), [1, 3, 5]))\n for i in range(4)\n },\n {\n (\"y\", i, 1): (getitem, (\"x\", i, 1), (slice(None, None, None), [17]))\n for i in range(4)\n },\n )\n np.testing.assert_equal(dsk, expected)\n assert chunks == ((20, 20, 20, 20), (3, 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_chunks_test_slicing_chunks.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_chunks_test_slicing_chunks.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 384, "end_line": 398, "span_ids": ["test_slicing_chunks"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_chunks():\n result, chunks = slice_array(\n \"y\", \"x\", ([5, 5], [5, 5]), (1, np.array([2, 0, 3])), itemsize=8\n )\n assert chunks == ((3,),)\n\n result, chunks = slice_array(\n \"y\", \"x\", ([5, 5], [5, 5]), (slice(0, 7), np.array([2, 0, 3])), itemsize=8\n )\n assert chunks == ((5, 2), (3,))\n\n result, chunks = slice_array(\n \"y\", \"x\", ([5, 5], [5, 5]), (slice(0, 7), 1), itemsize=8\n )\n assert chunks == ((5, 2),)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_numpy_arrays_test_slicing_with_numpy_arrays.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_numpy_arrays_test_slicing_with_numpy_arrays.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 401, "end_line": 425, "span_ids": ["test_slicing_with_numpy_arrays"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_with_numpy_arrays():\n a, bd1 = slice_array(\n \"y\",\n \"x\",\n ((3, 3, 3, 1), (3, 3, 3, 1)),\n (np.array([1, 2, 9]), slice(None, None, None)),\n itemsize=8,\n )\n b, bd2 = slice_array(\n \"y\",\n \"x\",\n ((3, 3, 3, 1), (3, 3, 3, 1)),\n (np.array([1, 2, 9]), slice(None, None, None)),\n itemsize=8,\n )\n\n assert bd1 == bd2\n np.testing.assert_equal(a, b)\n\n i = [False, True, True, False, False, False, False, False, False, True]\n index = (i, slice(None, None, None))\n index = normalize_index(index, (10, 10))\n c, bd3 = slice_array(\"y\", \"x\", ((3, 3, 3, 1), (3, 3, 3, 1)), index, itemsize=8)\n assert bd1 == bd3\n np.testing.assert_equal(a, c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_and_chunks_test_slicing_identities.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_and_chunks_test_slicing_identities.None_9", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 428, "end_line": 446, "span_ids": ["test_slicing_and_chunks", "test_slicing_identities"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_and_chunks():\n o = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))\n t = o[4:-4, 2:-2]\n assert t.chunks == ((8, 8), (6, 6))\n\n\ndef test_slicing_identities():\n a = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))\n\n assert a is a[slice(None)]\n assert a is a[:]\n assert a is a[::]\n assert a is a[...]\n assert a is a[0:]\n assert a is a[0::]\n assert a is a[::1]\n assert a is a[0 : len(a)]\n assert a is a[0::1]\n assert a is a[0 : len(a) : 1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_stop_0_ReturnItem.__getitem__.return.key": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_stop_0_ReturnItem.__getitem__.return.key", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 449, "end_line": 465, "span_ids": ["test_slice_stop_0", "ReturnItem.__getitem__", "ReturnItem", "test_slice_list_then_None"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_stop_0():\n # from gh-125\n a = da.ones(10, chunks=(10,))[:0].compute()\n b = np.ones(10)[:0]\n assert_eq(a, b)\n\n\ndef test_slice_list_then_None():\n x = da.zeros(shape=(5, 5), chunks=(3, 3))\n y = x[[2, 1]][None]\n\n assert_eq(y, np.zeros((1, 2, 5)))\n\n\nclass ReturnItem:\n def __getitem__(self, key):\n return key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_exhaustively_test_slicing_exhaustively.for_i_in_first_indexers_.for_j_in_second_indexers_.assert_eq_x_i_j_a_i_j": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_exhaustively_test_slicing_exhaustively.for_i_in_first_indexers_.for_j_in_second_indexers_.assert_eq_x_i_j_a_i_j", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 468, "end_line": 489, "span_ids": ["test_slicing_exhaustively"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skip(reason=\"really long test\")\ndef test_slicing_exhaustively():\n x = np.random.rand(6, 7, 8)\n a = da.from_array(x, chunks=(3, 3, 3))\n I = ReturnItem()\n\n # independent indexing along different axes\n indexers = [0, -2, I[:], I[:5], [0, 1], [0, 1, 2], [4, 2], I[::-1], None, I[:0], []]\n for i in indexers:\n assert_eq(x[i], a[i]), i\n for j in indexers:\n assert_eq(x[i][:, j], a[i][:, j]), (i, j)\n assert_eq(x[:, i][j], a[:, i][j]), (i, j)\n for k in indexers:\n assert_eq(x[..., i][:, j][k], a[..., i][:, j][k]), (i, j, k)\n\n # repeated indexing along the first axis\n first_indexers = [I[:], I[:5], np.arange(5), [3, 1, 4, 5, 0], np.arange(6) < 6]\n second_indexers = [0, -1, 3, I[:], I[:3], I[2:-1], [2, 4], [], I[:0]]\n for i in first_indexers:\n for j in second_indexers:\n assert_eq(x[i][j], a[i][j]), (i, j)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_negative_step_flops_keys_test_slicing_with_negative_step_flops_keys.assert_y_dask_y_name_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_with_negative_step_flops_keys_test_slicing_with_negative_step_flops_keys.assert_y_dask_y_name_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 492, "end_line": 503, "span_ids": ["test_slicing_with_negative_step_flops_keys"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_with_negative_step_flops_keys():\n x = da.arange(10, chunks=5)\n y = x[:1:-1]\n assert (x.name, 1) in y.dask[(y.name, 0)]\n assert (x.name, 0) in y.dask[(y.name, 1)]\n\n assert_eq(y, np.arange(10)[:1:-1])\n\n assert y.chunks == ((5, 3),)\n\n assert y.dask[(y.name, 0)] == (getitem, (x.name, 1), (slice(-1, -6, -1),))\n assert y.dask[(y.name, 1)] == (getitem, (x.name, 0), (slice(-1, -4, -1),))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_slice_test_multiple_list_slicing.assert_eq_x_0_1_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_slice_test_multiple_list_slicing.assert_eq_x_0_1_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 506, "end_line": 516, "span_ids": ["test_empty_slice", "test_multiple_list_slicing"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_slice():\n x = da.ones((5, 5), chunks=(2, 2), dtype=\"i4\")\n y = x[:0]\n\n assert_eq(y, np.ones((5, 5), dtype=\"i4\")[:0])\n\n\ndef test_multiple_list_slicing():\n x = np.random.rand(6, 7, 8)\n a = da.from_array(x, chunks=(3, 3, 3))\n assert_eq(x[:, [0, 1, 2]][[0, 1]], a[:, [0, 1, 2]][[0, 1]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_list_slicing_test_boolean_list_slicing.assert_eq_da_asarray_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_list_slicing_test_boolean_list_slicing.assert_eq_da_asarray_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 519, "end_line": 529, "span_ids": ["test_boolean_list_slicing"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_boolean_list_slicing():\n with pytest.raises(IndexError):\n da.asarray(range(2))[[True]]\n with pytest.raises(IndexError):\n da.asarray(range(2))[[False, False, False]]\n x = np.arange(5)\n ind = [True, False, False, False, True]\n assert_eq(da.asarray(x)[ind], x[ind])\n # https://github.com/dask/dask/issues/3706\n ind = [True]\n assert_eq(da.asarray([0])[ind], np.arange(1)[ind])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_numpy_array_slicing_test_boolean_numpy_array_slicing.assert_eq_da_asarray_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_boolean_numpy_array_slicing_test_boolean_numpy_array_slicing.assert_eq_da_asarray_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 532, "end_line": 542, "span_ids": ["test_boolean_numpy_array_slicing"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_boolean_numpy_array_slicing():\n with pytest.raises(IndexError):\n da.asarray(range(2))[np.array([True])]\n with pytest.raises(IndexError):\n da.asarray(range(2))[np.array([False, False, False])]\n x = np.arange(5)\n ind = np.array([True, False, False, False, True])\n assert_eq(da.asarray(x)[ind], x[ind])\n # https://github.com/dask/dask/issues/3706\n ind = np.array([True])\n assert_eq(da.asarray([0])[ind], np.arange(1)[ind])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_list_test_new_blockdim.assert_new_blockdim_20_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_empty_list_test_new_blockdim.assert_new_blockdim_20_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 545, "end_line": 559, "span_ids": ["test_new_blockdim", "test_empty_list", "test_uneven_chunks"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_list():\n x = np.ones((5, 5, 5), dtype=\"i4\")\n dx = da.from_array(x, chunks=2)\n\n assert_eq(dx[[], :3, :2], x[[], :3, :2])\n assert_eq(dx[:3, [], :2], x[:3, [], :2])\n assert_eq(dx[:3, :2, []], x[:3, :2, []])\n\n\ndef test_uneven_chunks():\n assert da.ones(20, chunks=5)[::2].chunks == ((3, 2, 3, 2),)\n\n\ndef test_new_blockdim():\n assert new_blockdim(20, [5, 5, 5, 5], slice(0, None, 2)) == [3, 2, 3, 2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_test_slicing_consistent_names.assert_same_keys_a_0_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_test_slicing_consistent_names.assert_same_keys_a_0_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 562, "end_line": 574, "span_ids": ["test_slicing_consistent_names"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_consistent_names():\n x = np.arange(100).reshape((10, 10))\n a = da.from_array(x, chunks=(5, 5))\n assert same_keys(a[0], a[0])\n assert same_keys(a[:, [1, 2, 3]], a[:, [1, 2, 3]])\n assert same_keys(a[:, 5:2:-1], a[:, 5:2:-1])\n assert same_keys(a[0, ...], a[0, ...])\n assert same_keys(a[...], a[...])\n assert same_keys(a[[1, 3, 5]], a[[1, 3, 5]])\n assert same_keys(a[-11:11], a[:])\n assert same_keys(a[-11:-9], a[:1])\n assert same_keys(a[-1], a[9])\n assert same_keys(a[0::-1], a[0:-11:-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_after_normalization_test_sanitize_index.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_consistent_names_after_normalization_test_sanitize_index.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 577, "end_line": 596, "span_ids": ["test_slicing_consistent_names_after_normalization", "test_sanitize_index_element", "test_sanitize_index"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slicing_consistent_names_after_normalization():\n x = da.zeros(10, chunks=(5,))\n assert same_keys(x[0:], x[:10])\n assert same_keys(x[0:], x[0:10])\n assert same_keys(x[0:], x[0:10:1])\n assert same_keys(x[:], x[0:10:1])\n\n\ndef test_sanitize_index_element():\n with pytest.raises(TypeError):\n _sanitize_index_element(\"Hello!\")\n\n\ndef test_sanitize_index():\n pd = pytest.importorskip(\"pandas\")\n with pytest.raises(TypeError):\n sanitize_index(\"Hello!\")\n\n np.testing.assert_equal(sanitize_index(pd.Series([1, 2, 3])), [1, 2, 3])\n np.testing.assert_equal(sanitize_index((1, 2, 3)), [1, 2, 3])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_uneven_blockdims_test_uneven_blockdims.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_uneven_blockdims_test_uneven_blockdims.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 599, "end_line": 622, "span_ids": ["test_uneven_blockdims"], "tokens": 553}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_uneven_blockdims():\n blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30), (100,))\n index = (slice(240, 270), slice(None))\n dsk_out, bd_out = slice_array(\"in\", \"out\", blockdims, index, itemsize=8)\n sol = {\n (\"in\", 0, 0): (getitem, (\"out\", 7, 0), (slice(28, 31, 1), slice(None))),\n (\"in\", 1, 0): (getitem, (\"out\", 8, 0), (slice(0, 27, 1), slice(None))),\n }\n assert dsk_out == sol\n assert bd_out == ((3, 27), (100,))\n\n blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30),) * 2\n index = (slice(240, 270), slice(180, 230))\n dsk_out, bd_out = slice_array(\"in\", \"out\", blockdims, index, itemsize=8)\n sol = {\n (\"in\", 0, 0): (getitem, (\"out\", 7, 5), (slice(28, 31, 1), slice(29, 30, 1))),\n (\"in\", 0, 1): (getitem, (\"out\", 7, 6), (slice(28, 31, 1), slice(None))),\n (\"in\", 0, 2): (getitem, (\"out\", 7, 7), (slice(28, 31, 1), slice(0, 18, 1))),\n (\"in\", 1, 0): (getitem, (\"out\", 8, 5), (slice(0, 27, 1), slice(29, 30, 1))),\n (\"in\", 1, 1): (getitem, (\"out\", 8, 6), (slice(0, 27, 1), slice(None))),\n (\"in\", 1, 2): (getitem, (\"out\", 8, 7), (slice(0, 27, 1), slice(0, 18, 1))),\n }\n assert dsk_out == sol\n assert bd_out == ((3, 27), (1, 31, 18))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_oob_check_test_index_with_int_dask_array.assert_eq_x_T_idx_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_oob_check_test_index_with_int_dask_array.assert_eq_x_T_idx_ex", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 625, "end_line": 658, "span_ids": ["test_oob_check", "test_index_with_int_dask_array"], "tokens": 354}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_oob_check():\n x = da.ones(5, chunks=(2,))\n with pytest.raises(IndexError):\n x[6]\n with pytest.raises(IndexError):\n x[[6]]\n with pytest.raises(IndexError):\n x[-10]\n with pytest.raises(IndexError):\n x[[-10]]\n with pytest.raises(IndexError):\n x[0, 0]\n\n\n@pytest.mark.parametrize(\"idx_chunks\", [None, 3, 2, 1])\n@pytest.mark.parametrize(\"x_chunks\", [None, (3, 5), (2, 3), (1, 2), (1, 1)])\ndef test_index_with_int_dask_array(x_chunks, idx_chunks):\n # test data is crafted to stress use cases:\n # - pick from different chunks of x out of order\n # - a chunk of x contains no matches\n # - only one chunk of x\n x = np.array(\n [[10, 20, 30, 40, 50], [60, 70, 80, 90, 100], [110, 120, 130, 140, 150]]\n )\n idx = np.array([3, 0, 1])\n expect = np.array([[40, 10, 20], [90, 60, 70], [140, 110, 120]])\n\n if x_chunks is not None:\n x = da.from_array(x, chunks=x_chunks)\n if idx_chunks is not None:\n idx = da.from_array(idx, chunks=idx_chunks)\n\n assert_eq(x[:, idx], expect)\n assert_eq(x.T[idx, :], expect.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_0d_test_index_with_int_dask_array_0d.assert_eq_x_idx0_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_0d_test_index_with_int_dask_array_0d.assert_eq_x_idx0_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 661, "end_line": 667, "span_ids": ["test_index_with_int_dask_array_0d"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [1, 2, 3])\ndef test_index_with_int_dask_array_0d(chunks):\n # Slice by 0-dimensional array\n x = da.from_array([[10, 20, 30], [40, 50, 60]], chunks=chunks)\n idx0 = da.from_array(1, chunks=1)\n assert_eq(x[idx0, :], x[1, :])\n assert_eq(x[:, idx0], x[:, 1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_nanchunks_test_index_with_int_dask_array_nanchunks.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_nanchunks_test_index_with_int_dask_array_nanchunks.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 670, "end_line": 677, "span_ids": ["test_index_with_int_dask_array_nanchunks"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [1, 2, 3, 4, 5])\ndef test_index_with_int_dask_array_nanchunks(chunks):\n # Slice by array with nan-sized chunks\n a = da.arange(-2, 3, chunks=chunks)\n assert_eq(a[a.nonzero()], np.array([-2, -1, 1, 2]))\n # Edge case: the nan-sized chunks resolve to size 0\n a = da.zeros(5, chunks=chunks)\n assert_eq(a[a.nonzero()], np.array([]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_negindex_test_index_with_int_dask_array_indexerror.None_1.a_idx_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_negindex_test_index_with_int_dask_array_indexerror.None_1.a_idx_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 680, "end_line": 695, "span_ids": ["test_index_with_int_dask_array_negindex", "test_index_with_int_dask_array_indexerror"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [2, 4])\ndef test_index_with_int_dask_array_negindex(chunks):\n a = da.arange(4, chunks=chunks)\n idx = da.from_array([-1, -4], chunks=1)\n assert_eq(a[idx], np.array([3, 0]))\n\n\n@pytest.mark.parametrize(\"chunks\", [2, 4])\ndef test_index_with_int_dask_array_indexerror(chunks):\n a = da.arange(4, chunks=chunks)\n idx = da.from_array([4], chunks=1)\n with pytest.raises(IndexError):\n a[idx].compute()\n idx = da.from_array([-5], chunks=1)\n with pytest.raises(IndexError):\n a[idx].compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_dtypes_test_index_with_int_dask_array_nocompute.with_pytest_raises_NotImp.result_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_int_dask_array_dtypes_test_index_with_int_dask_array_nocompute.with_pytest_raises_NotImp.result_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 698, "end_line": 719, "span_ids": ["test_index_with_int_dask_array_dtypes", "test_index_with_int_dask_array_nocompute"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dtype\", [\"int8\", \"int16\", \"int32\", \"int64\", \"uint8\", \"uint16\", \"uint32\", \"uint64\"]\n)\ndef test_index_with_int_dask_array_dtypes(dtype):\n a = da.from_array([10, 20, 30, 40], chunks=-1)\n idx = da.from_array(np.array([1, 2]).astype(dtype), chunks=1)\n assert_eq(a[idx], np.array([20, 30]))\n\n\ndef test_index_with_int_dask_array_nocompute():\n \"\"\"Test that when the indices are a dask array\n they are not accidentally computed\n \"\"\"\n\n def crash():\n raise NotImplementedError()\n\n x = da.arange(5, chunks=-1)\n idx = da.Array({(\"x\", 0): (crash,)}, name=\"x\", chunks=((2,),), dtype=np.int64)\n result = x[idx]\n with pytest.raises(NotImplementedError):\n result.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_test_index_with_bool_dask_array.for_index_in_ind_slice.assert_eq_x_x_index_d_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_test_index_with_bool_dask_array.for_index_in_ind_slice.assert_eq_x_x_index_d_i", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 722, "end_line": 729, "span_ids": ["test_index_with_bool_dask_array"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_with_bool_dask_array():\n x = np.arange(36).reshape((6, 6))\n d = da.from_array(x, chunks=(3, 3))\n ind = np.asarray([True, True, False, True, False, False], dtype=bool)\n ind = da.from_array(ind, chunks=2)\n for index in [ind, (slice(1, 9, 2), ind), (ind, slice(2, 8, 1))]:\n x_index = dask.compute(index)[0]\n assert_eq(x[x_index], d[index])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_2_test_index_with_bool_dask_array_2.for_i_in_range_x_ndim_.assert_eq_x_tuple_index3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_index_with_bool_dask_array_2_test_index_with_bool_dask_array_2.for_i_in_range_x_ndim_.assert_eq_x_tuple_index3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 732, "end_line": 748, "span_ids": ["test_index_with_bool_dask_array_2"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_with_bool_dask_array_2():\n x = np.random.random((10, 10, 10))\n ind = np.random.random(10) > 0.5\n\n d = da.from_array(x, chunks=(3, 4, 5))\n dind = da.from_array(ind, chunks=4)\n\n index = [slice(1, 9, 1), slice(None)]\n\n for i in range(x.ndim):\n index2 = index[:]\n index2.insert(i, dind)\n\n index3 = index[:]\n index3.insert(i, ind)\n\n assert_eq(x[tuple(index3)], d[tuple(index2)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_cull_test_negative_list_slicing.assert_eq_dx_4_1_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_cull_test_negative_list_slicing.assert_eq_dx_4_1_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 742, "end_line": 824, "span_ids": ["impl:3", "test_slicing_with_Nones", "test_slicing_integer_no_warnings", "test_slicing_none_int_ellipes", "test_cull", "test_None_overlap_int", "test_negative_n_slicing", "test_negative_list_slicing"], "tokens": 720}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail\ndef test_cull():\n x = da.ones(1000, chunks=(10,))\n\n for slc in [1, slice(0, 30), slice(0, None, 100)]:\n y = x[slc]\n assert len(y.dask) < len(x.dask)\n\n\n@pytest.mark.parametrize(\"shape\", [(2,), (2, 3), (2, 3, 5)])\n@pytest.mark.parametrize(\n \"index\", [(Ellipsis,), (None, Ellipsis), (Ellipsis, None), (None, Ellipsis, None)]\n)\ndef test_slicing_with_Nones(shape, index):\n x = np.random.random(shape)\n d = da.from_array(x, chunks=shape)\n\n assert_eq(x[index], d[index])\n\n\nindexers = [Ellipsis, slice(2), 0, 1, -2, -1, slice(-2, None), None]\n\n\n\"\"\"\n# We comment this out because it is 4096 tests\n@pytest.mark.parametrize('a', indexers)\n@pytest.mark.parametrize('b', indexers)\n@pytest.mark.parametrize('c', indexers)\n@pytest.mark.parametrize('d', indexers)\ndef test_slicing_none_int_ellipses(a, b, c, d):\n if (a, b, c, d).count(Ellipsis) > 1:\n return\n shape = (2,3,5,7,11)\n x = np.arange(np.prod(shape)).reshape(shape)\n y = da.core.asarray(x)\n\n xx = x[a, b, c, d]\n yy = y[a, b, c, d]\n assert_eq(xx, yy)\n\"\"\"\n\n\ndef test_slicing_integer_no_warnings():\n # https://github.com/dask/dask/pull/2457/\n X = da.random.random((100, 2), (2, 2))\n idx = np.array([0, 0, 1, 1])\n with warnings.catch_warnings(record=True) as record:\n X[idx].compute()\n assert not record\n\n\n@pytest.mark.slow\ndef test_slicing_none_int_ellipes():\n shape = (2, 3, 5, 7, 11)\n x = np.arange(np.prod(shape)).reshape(shape)\n y = da.core.asarray(x)\n for ind in itertools.product(indexers, indexers, indexers, indexers):\n if ind.count(Ellipsis) > 1:\n continue\n\n assert_eq(x[ind], y[ind])\n\n\ndef test_None_overlap_int():\n a, b, c, d = (0, slice(None, 2, None), None, Ellipsis)\n shape = (2, 3, 5, 7, 11)\n x = np.arange(np.prod(shape)).reshape(shape)\n y = da.core.asarray(x)\n\n xx = x[a, b, c, d]\n yy = y[a, b, c, d]\n assert_eq(xx, yy)\n\n\ndef test_negative_n_slicing():\n assert_eq(da.ones(2, chunks=2)[-2], np.ones(2)[-2])\n\n\ndef test_negative_list_slicing():\n x = np.arange(5)\n dx = da.from_array(x, chunks=2)\n assert_eq(dx[[0, -5]], x[[0, -5]])\n assert_eq(dx[[4, -1]], x[[4, -1]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_permit_oob_slices_test_take_semi_sorted.assert_y_chunks_5_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_permit_oob_slices_test_take_semi_sorted.assert_y_chunks_5_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 836, "end_line": 860, "span_ids": ["test_normalize_index", "test_take_semi_sorted", "test_permit_oob_slices"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_permit_oob_slices():\n x = np.arange(5)\n dx = da.from_array(x, chunks=2)\n\n assert_eq(x[-102:], dx[-102:])\n assert_eq(x[102:], dx[102:])\n assert_eq(x[:102], dx[:102])\n assert_eq(x[:-102], dx[:-102])\n\n\ndef test_normalize_index():\n assert normalize_index((Ellipsis, None), (10,)) == (slice(None), None)\n assert normalize_index(5, (np.nan,)) == (5,)\n assert normalize_index(-5, (np.nan,)) == (-5,)\n (result,) = normalize_index([-5, -2, 1], (np.nan,))\n assert result.tolist() == [-5, -2, 1]\n assert normalize_index(slice(-5, -2), (np.nan,)) == (slice(-5, -2),)\n\n\ndef test_take_semi_sorted():\n x = da.ones(10, chunks=(5,))\n index = np.arange(15) % 10\n\n y = x[index]\n assert y.chunks == ((5, 5, 5),)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_plan_test_slicing_plan.for_i_x_j_y_in_zip.assert_x_y_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slicing_plan_test_slicing_plan.for_i_x_j_y_in_zip.assert_x_y_all_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 863, "end_line": 881, "span_ids": ["test_slicing_plan"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"chunks,index,expected\",\n [\n ((5, 5, 5), np.arange(5, 15) % 10, [(1, np.arange(5)), (0, np.arange(5))]),\n (\n (5, 5, 5, 5),\n np.arange(20) // 2,\n [(0, np.arange(10) // 2), (1, np.arange(10) // 2)],\n ),\n ((10, 10), [15, 2, 3, 15], [(1, [5]), (0, [2, 3]), (1, [5])]),\n ],\n)\ndef test_slicing_plan(chunks, index, expected):\n plan = slicing_plan(chunks, index=index)\n assert len(plan) == len(expected)\n for (i, x), (j, y) in zip(plan, expected):\n assert i == j\n assert len(x) == len(y)\n assert (x == y).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_pathological_unsorted_slicing_test_pathological_unsorted_slicing.assert_out_of_order_in_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_pathological_unsorted_slicing_test_pathological_unsorted_slicing.assert_out_of_order_in_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 954, "end_line": 964, "span_ids": ["test_pathological_unsorted_slicing"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pathological_unsorted_slicing():\n x = da.ones(100, chunks=10)\n\n # [0, 10, 20, ... 90, 1, 11, 21, ... 91, ...]\n index = np.arange(100).reshape(10, 10).ravel(order=\"F\")\n\n with pytest.warns(da.PerformanceWarning) as info:\n x[index]\n\n assert \"10\" in str(info.list[0])\n assert \"out-of-order\" in str(info.list[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_setitem_with_different_chunks_preserves_shape_test_setitem_with_different_chunks_preserves_shape.assert_x_shape_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_setitem_with_different_chunks_preserves_shape_test_setitem_with_different_chunks_preserves_shape.assert_x_shape_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 990, "end_line": 1003, "span_ids": ["test_setitem_with_different_chunks_preserves_shape"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"params\", [(2, 2, 1), (5, 3, 2)])\ndef test_setitem_with_different_chunks_preserves_shape(params):\n \"\"\"Reproducer for https://github.com/dask/dask/issues/3730.\n\n Mutating based on an array with different chunks can cause new chunks to be\n used. We need to ensure those new chunk sizes are applied to the mutated\n array, otherwise the array won't generate the correct keys.\n \"\"\"\n array_size, chunk_size1, chunk_size2 = params\n x = da.zeros(array_size, chunks=chunk_size1)\n mask = da.zeros(array_size, chunks=chunk_size2)\n x[mask] = 1\n result = x.compute()\n assert x.shape == result.shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh3579_test_make_blockwise_sorted_slice.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh3579_test_make_blockwise_sorted_slice.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1006, "end_line": 1020, "span_ids": ["test_make_blockwise_sorted_slice", "test_gh3579"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gh3579():\n assert_eq(np.arange(10)[0::-1], da.arange(10, chunks=3)[0::-1])\n assert_eq(np.arange(10)[::-1], da.arange(10, chunks=3)[::-1])\n\n\ndef test_make_blockwise_sorted_slice():\n x = da.arange(8, chunks=4)\n index = np.array([6, 0, 4, 2, 7, 1, 5, 3])\n\n a, b = make_block_sorted_slices(index, x.chunks)\n\n index2 = np.array([0, 2, 4, 6, 1, 3, 5, 7])\n index3 = np.array([3, 0, 2, 1, 7, 4, 6, 5])\n np.testing.assert_array_equal(a, index2)\n np.testing.assert_array_equal(b, index3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_shuffle_slice_test_shuffle_slice.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_shuffle_slice_test_shuffle_slice.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1036, "end_line": 1047, "span_ids": ["test_shuffle_slice"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore:Slicing:dask.array.core.PerformanceWarning\")\n@pytest.mark.parametrize(\n \"size, chunks\", [((100, 2), (50, 2)), ((100, 2), (37, 1)), ((100,), (55,))]\n)\ndef test_shuffle_slice(size, chunks):\n x = da.random.randint(0, 1000, size=size, chunks=chunks)\n index = np.arange(len(x))\n np.random.shuffle(index)\n\n a = x[index]\n b = shuffle_slice(x, index)\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_basic_test_basic.if_yy_shape_.if_not_isinstance_zz_spa._mostly_dense": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_basic_test_basic.if_yy_shape_.if_not_isinstance_zz_spa._mostly_dense", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 90, "span_ids": ["test_basic"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\ndef test_basic(func):\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n x[x < 0.8] = 0\n\n y = x.map_blocks(sparse.COO.from_numpy)\n\n xx = func(x)\n yy = func(y)\n\n assert_eq(xx, yy)\n\n if yy.shape:\n zz = yy.compute()\n if not isinstance(zz, sparse.COO):\n assert (zz != 1).sum() > np.prod(zz.shape) / 2 # mostly dense", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_tensordot_test_tensordot.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_tensordot_test_tensordot.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 107, "end_line": 125, "span_ids": ["test_tensordot"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n SPARSE_VERSION < parse_version(\"0.7.0+10\"),\n reason=\"fixed in https://github.com/pydata/sparse/pull/256\",\n)\ndef test_tensordot():\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n x[x < 0.8] = 0\n y = da.random.random((4, 3, 2), chunks=(2, 2, 1))\n y[y < 0.8] = 0\n\n xx = x.map_blocks(sparse.COO.from_numpy)\n yy = y.map_blocks(sparse.COO.from_numpy)\n\n assert_eq(da.tensordot(x, y, axes=(2, 0)), da.tensordot(xx, yy, axes=(2, 0)))\n assert_eq(da.tensordot(x, y, axes=(1, 1)), da.tensordot(xx, yy, axes=(1, 1)))\n assert_eq(\n da.tensordot(x, y, axes=((1, 2), (1, 0))),\n da.tensordot(xx, yy, axes=((1, 2), (1, 0))),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_concatenate_test_mixed_concatenate.assert_eq_dd_ss_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 114, "end_line": 129, "span_ids": ["test_mixed_concatenate"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"upstream change\", strict=False)\n@pytest.mark.parametrize(\"func\", functions)\ndef test_mixed_concatenate(func):\n x = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n\n y = da.random.random((2, 3, 4), chunks=(1, 2, 2))\n y[y < 0.8] = 0\n yy = y.map_blocks(sparse.COO.from_numpy)\n\n d = da.concatenate([x, y], axis=0)\n s = da.concatenate([x, yy], axis=0)\n\n dd = func(d)\n ss = func(s)\n\n assert_eq(dd, ss)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_random_test_mixed_random.assert_eq_dd_ss_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 132, "end_line": 144, "span_ids": ["test_mixed_random"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"upstream change\", strict=False)\n@pytest.mark.parametrize(\"func\", functions)\ndef test_mixed_random(func):\n d = da.random.random((4, 3, 4), chunks=(1, 2, 2))\n d[d < 0.7] = 0\n\n fn = lambda x: sparse.COO.from_numpy(x) if random.random() < 0.5 else x\n s = d.map_blocks(fn)\n\n dd = func(d)\n ss = func(s)\n\n assert_eq(dd, ss)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_output_type_test_mixed_output_type.assert_zz_nnz_y_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_mixed_output_type_test_mixed_output_type.assert_zz_nnz_y_comput", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 147, "end_line": 161, "span_ids": ["test_mixed_output_type"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"upstream change\", strict=False)\ndef test_mixed_output_type():\n y = da.random.random((10, 10), chunks=(5, 5))\n y[y < 0.8] = 0\n y = y.map_blocks(sparse.COO.from_numpy)\n\n x = da.zeros((10, 1), chunks=(5, 1))\n\n z = da.concatenate([x, y], axis=1)\n\n assert z.shape == (10, 11)\n\n zz = z.compute()\n assert isinstance(zz, sparse.COO)\n assert zz.nnz == y.compute().nnz", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_html_repr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_html_repr_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 199, "end_line": 242, "span_ids": ["test_html_repr", "test_map_blocks", "test_meta_from_array", "test_from_delayed_meta", "test_from_array"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_html_repr():\n pytest.importorskip(\"jinja2\")\n y = da.random.random((10, 10), chunks=(5, 5))\n y[y < 0.8] = 0\n y = y.map_blocks(sparse.COO.from_numpy)\n\n text = y._repr_html_()\n\n assert \"COO\" in text\n assert \"sparse\" in text\n assert \"Bytes\" not in text\n\n\ndef test_from_delayed_meta():\n def f():\n return sparse.COO.from_numpy(np.eye(3))\n\n d = dask.delayed(f)()\n x = da.from_delayed(d, shape=(3, 3), meta=sparse.COO.from_numpy(np.eye(1)))\n assert isinstance(x._meta, sparse.COO)\n assert_eq(x, x)\n\n\ndef test_from_array():\n x = sparse.COO.from_numpy(np.eye(10))\n d = da.from_array(x, chunks=(5, 5))\n\n assert isinstance(d._meta, sparse.COO)\n assert_eq(d, d)\n assert isinstance(d.compute(), sparse.COO)\n\n\ndef test_map_blocks():\n x = da.eye(10, chunks=5)\n y = x.map_blocks(sparse.COO.from_numpy, meta=sparse.COO.from_numpy(np.eye(1)))\n assert isinstance(y._meta, sparse.COO)\n assert_eq(y, y)\n\n\ndef test_meta_from_array():\n x = sparse.COO.from_numpy(np.eye(1))\n y = da.utils.meta_from_array(x, ndim=2)\n assert isinstance(y, sparse.COO)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_bias_raises_test_one.assert_allclose_result_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_bias_raises_test_one.assert_allclose_result_co", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 60, "span_ids": ["test_bias_raises", "test_one"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bias_raises():\n x = np.random.random(size=(30, 2))\n y = da.from_array(x, 3)\n\n with pytest.raises(NotImplementedError):\n dask.array.stats.skew(y, bias=False)\n\n with pytest.raises(NotImplementedError):\n dask.array.stats.kurtosis(y, bias=False)\n\n\n@pytest.mark.parametrize(\n \"kind\", [\"chisquare\", \"power_divergence\", \"normaltest\", \"skewtest\", \"kurtosistest\"]\n)\ndef test_one(kind):\n a = np.random.random(size=30)\n a_ = da.from_array(a, 3)\n\n dask_test = getattr(dask.array.stats, kind)\n scipy_test = getattr(scipy.stats, kind)\n\n result = dask_test(a_)\n expected = scipy_test(a)\n\n assert isinstance(result, Delayed)\n assert allclose(result.compute(), expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_two_test_two._assert_dask_compute_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_two_test_two._assert_dask_compute_re", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 65, "end_line": 98, "span_ids": ["test_two"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"kind, kwargs\",\n [\n (\"ttest_ind\", {}),\n (\"ttest_ind\", {\"equal_var\": False}),\n (\"ttest_1samp\", {}),\n (\"ttest_rel\", {}),\n (\"chisquare\", {}),\n (\"power_divergence\", {}),\n (\"power_divergence\", {\"lambda_\": 0}),\n (\"power_divergence\", {\"lambda_\": -1}),\n (\"power_divergence\", {\"lambda_\": \"neyman\"}),\n ],\n)\ndef test_two(kind, kwargs):\n # The sums of observed and expected frequencies must match\n a = np.random.random(size=30)\n b = a[::-1]\n\n a_ = da.from_array(a, 3)\n b_ = da.from_array(b, 3)\n\n dask_test = getattr(dask.array.stats, kind)\n scipy_test = getattr(scipy.stats, kind)\n\n with warnings.catch_warnings(): # maybe overflow warning (power_divergence)\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n result = dask_test(a_, b_, **kwargs)\n expected = scipy_test(a, b, **kwargs)\n\n assert isinstance(result, Delayed)\n assert allclose(result.compute(), expected)\n # fails occasionally. shouldn't this be exact?\n # assert dask.compute(*result) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_moments_test_anova.assert_allclose_result_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_moments_test_anova.assert_allclose_result_co", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 96, "end_line": 113, "span_ids": ["test_moments", "test_anova"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"k\", range(5))\ndef test_moments(k):\n x = np.random.random(size=(30, 2))\n y = da.from_array(x, 3)\n\n expected = scipy.stats.moment(x, k)\n result = dask.array.stats.moment(y, k)\n assert_eq(result, expected)\n\n\ndef test_anova():\n np_args = [i * np.random.random(size=(30,)) for i in range(4)]\n da_args = [da.from_array(x, chunks=10) for x in np_args]\n\n result = dask.array.stats.f_oneway(*da_args)\n expected = scipy.stats.f_oneway(*np_args)\n\n assert allclose(result.compute(), expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_test_errors.assert_unknown_chunk_siz": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_test_errors.assert_unknown_chunk_siz", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 24, "end_line": 55, "span_ids": ["test_errors", "test_repr_html"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repr_html():\n pytest.importorskip(\"jinja2\")\n assert da.ones([])._repr_html_()\n assert da.ones(10)[:0]._repr_html_()\n assert da.ones(10)._repr_html_()\n assert da.ones((10, 10))._repr_html_()\n assert da.ones((10, 10, 10))._repr_html_()\n assert da.ones((10, 10, 10, 10))._repr_html_()\n\n\ndef test_errors():\n # empty arrays\n with pytest.raises(NotImplementedError) as excpt:\n da.ones([]).to_svg()\n assert \"0 dimensions\" in str(excpt.value)\n\n # Scalars\n with pytest.raises(NotImplementedError) as excpt:\n da.asarray(1).to_svg()\n assert \"0 dimensions\" in str(excpt.value)\n\n # 0-length dims arrays\n with pytest.raises(NotImplementedError) as excpt:\n da.ones(10)[:0].to_svg()\n assert \"0-length dimensions\" in str(excpt.value)\n\n # unknown chunk sizes\n with pytest.raises(NotImplementedError) as excpt:\n x = da.ones(10)\n x = x[x > 5]\n x.to_svg()\n assert \"unknown chunk sizes\" in str(excpt.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_size_units_test_repr_html_size_units.parses_x__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_repr_html_size_units_test_repr_html_size_units.parses_x__repr_html__", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 58, "end_line": 71, "span_ids": ["test_repr_html_size_units"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repr_html_size_units():\n pytest.importorskip(\"jinja2\")\n x = da.ones((10000, 5000))\n x = da.ones((3000, 10000), chunks=(1000, 1000))\n text = x._repr_html_()\n\n assert \"MB\" in text or \"MiB\" in text\n assert str(x.shape) in text\n assert str(x.dtype) in text\n\n parses(text)\n\n x = da.ones((3000, 10000, 50), chunks=(1000, 1000, 10))\n parses(x._repr_html_())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_testing.py_sys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_testing.py_sys_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_testing.py", "file_name": "test_testing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["imports", "test_assert_eq_checks_scalars"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\n\nimport numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq\n\n\n@pytest.mark.skipif(sys.flags.optimize, reason=\"Assertions disabled.\")\ndef test_assert_eq_checks_scalars():\n # https://github.com/dask/dask/issues/2680\n with pytest.raises(AssertionError):\n assert_eq(np.array(0), np.array(1))\n\n a = da.from_array(np.array([0]), 1)[0]\n b = np.array([1])[0]\n with pytest.raises(AssertionError):\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_pickle_unary_ufuncs._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_pickle_unary_ufuncs._", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 131, "span_ids": ["impl:5", "imports", "test_ufunc", "test_ufunc_meta"], "tokens": 692}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pickle\nimport warnings\nfrom functools import partial\nfrom operator import add\n\nimport pytest\n\nnp = pytest.importorskip(\"numpy\")\n\nimport dask.array as da\nfrom dask.array.ufunc import da_frompyfunc\nfrom dask.array.utils import assert_eq\nfrom dask.base import tokenize\n\nDISCLAIMER = \"\"\"\nThis docstring was copied from numpy.{name}.\n\nSome inconsistencies with the Dask version may exist.\n\"\"\"\n\n\n@pytest.mark.parametrize(\"name\", [\"log\", \"modf\", \"frexp\"])\ndef test_ufunc_meta(name):\n disclaimer = DISCLAIMER.format(name=name)\n skip_test = \" # doctest: +SKIP\"\n ufunc = getattr(da, name)\n assert ufunc.__name__ == name\n assert disclaimer in ufunc.__doc__\n\n assert (\n ufunc.__doc__.replace(disclaimer, \"\").replace(skip_test, \"\")\n == getattr(np, name).__doc__\n )\n\n\ndef test_ufunc():\n for attr in [\"nin\", \"nargs\", \"nout\", \"ntypes\", \"identity\", \"signature\", \"types\"]:\n assert getattr(da.log, attr) == getattr(np.log, attr)\n\n with pytest.raises(AttributeError):\n da.log.not_an_attribute\n\n assert repr(da.log) == repr(np.log)\n assert \"nin\" in dir(da.log)\n assert \"outer\" in dir(da.log)\n\n\nbinary_ufuncs = [\n \"add\",\n \"arctan2\",\n \"copysign\",\n \"divide\",\n \"equal\",\n \"bitwise_and\",\n \"bitwise_or\",\n \"bitwise_xor\",\n \"floor_divide\",\n \"fmax\",\n \"fmin\",\n \"fmod\",\n \"greater\",\n \"greater_equal\",\n \"hypot\",\n \"ldexp\",\n \"less\",\n \"less_equal\",\n \"logaddexp\",\n \"logaddexp2\",\n \"logical_and\",\n \"logical_or\",\n \"logical_xor\",\n \"maximum\",\n \"minimum\",\n \"mod\",\n \"multiply\",\n \"nextafter\",\n \"not_equal\",\n \"power\",\n \"remainder\",\n \"subtract\",\n \"true_divide\",\n \"float_power\",\n]\n\nunary_ufuncs = [\n \"absolute\",\n \"arccos\",\n \"arccosh\",\n \"arcsin\",\n \"arcsinh\",\n \"arctan\",\n \"arctanh\",\n \"bitwise_not\",\n \"cbrt\",\n \"ceil\",\n \"conj\",\n \"cos\",\n \"cosh\",\n \"deg2rad\",\n \"degrees\",\n \"exp\",\n \"exp2\",\n \"expm1\",\n \"fabs\",\n \"fix\",\n \"floor\",\n \"invert\",\n \"isfinite\",\n \"isinf\",\n \"isnan\",\n \"log\",\n \"log10\",\n \"log1p\",\n \"log2\",\n \"logical_not\",\n \"negative\",\n \"rad2deg\",\n \"radians\",\n \"reciprocal\",\n \"rint\",\n \"sign\",\n \"signbit\",\n \"sin\",\n \"sinh\",\n \"spacing\",\n \"sqrt\",\n \"square\",\n \"tan\",\n \"tanh\",\n \"trunc\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_unary_ufunc_test_unary_ufunc.None_3.assert_eq_dafunc_arr_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_unary_ufunc_test_unary_ufunc.None_3.assert_eq_dafunc_arr_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 134, "end_line": 163, "span_ids": ["test_unary_ufunc"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", unary_ufuncs)\ndef test_unary_ufunc(ufunc):\n if ufunc == \"fix\":\n pytest.skip(\"fix calls floor in a way that we do not yet support\")\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n arr = np.random.randint(1, 100, size=(20, 20))\n darr = da.from_array(arr, 3)\n\n with warnings.catch_warnings(): # some invalid values (arccos, arcsin, etc.)\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(darr), da.Array)\n assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)\n\n with warnings.catch_warnings(): # some invalid values (arccos, arcsin, etc.)\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n # applying NumPy ufunc is lazy\n if isinstance(npfunc, np.ufunc):\n assert isinstance(npfunc(darr), da.Array)\n else:\n assert isinstance(npfunc(darr), np.ndarray)\n assert_eq(npfunc(darr), npfunc(arr), equal_nan=True)\n\n with warnings.catch_warnings(): # some invalid values (arccos, arcsin, etc.)\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n # applying Dask ufunc to normal ndarray triggers computation\n assert isinstance(dafunc(arr), np.ndarray)\n assert_eq(dafunc(arr), npfunc(arr), equal_nan=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_binary_ufunc_test_binary_ufunc.None_1.assert_eq_dafunc_10_arr1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_binary_ufunc_test_binary_ufunc.None_1.assert_eq_dafunc_10_arr1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 166, "end_line": 204, "span_ids": ["test_binary_ufunc"], "tokens": 460}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", binary_ufuncs)\ndef test_binary_ufunc(ufunc):\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n arr1 = np.random.randint(1, 100, size=(20, 20))\n darr1 = da.from_array(arr1, 3)\n\n arr2 = np.random.randint(1, 100, size=(20, 20))\n darr2 = da.from_array(arr2, 3)\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(darr1, darr2), da.Array)\n assert_eq(dafunc(darr1, darr2), npfunc(arr1, arr2))\n\n # applying NumPy ufunc triggers computation or is lazy\n assert isinstance(npfunc(darr1, darr2), da.Array)\n assert_eq(npfunc(darr1, darr2), npfunc(arr1, arr2))\n\n # applying Dask ufunc to normal ndarray triggers computation\n assert isinstance(dafunc(arr1, arr2), np.ndarray)\n assert_eq(dafunc(arr1, arr2), npfunc(arr1, arr2))\n\n # with scalar\n assert isinstance(dafunc(darr1, 10), da.Array)\n assert_eq(dafunc(darr1, 10), npfunc(arr1, 10))\n\n with warnings.catch_warnings(): # overflow in ldexp\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n assert isinstance(dafunc(10, darr1), da.Array)\n assert_eq(dafunc(10, darr1), npfunc(10, arr1))\n\n assert isinstance(dafunc(arr1, 10), np.ndarray)\n assert_eq(dafunc(arr1, 10), npfunc(arr1, 10))\n\n with warnings.catch_warnings(): # overflow in ldexp\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n assert isinstance(dafunc(10, arr1), np.ndarray)\n assert_eq(dafunc(10, arr1), npfunc(10, arr1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_outer_test_ufunc_outer.None_2.da_sin_outer_darr1_darr2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_outer_test_ufunc_outer.None_2.da_sin_outer_darr1_darr2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 202, "end_line": 241, "span_ids": ["test_ufunc_outer"], "tokens": 474}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ufunc_outer():\n arr1 = np.random.randint(1, 100, size=20)\n darr1 = da.from_array(arr1, 3)\n\n arr2 = np.random.randint(1, 100, size=(10, 3))\n darr2 = da.from_array(arr2, 3)\n\n # Check output types\n assert isinstance(da.add.outer(darr1, darr2), da.Array)\n assert isinstance(da.add.outer(arr1, darr2), da.Array)\n assert isinstance(da.add.outer(darr1, arr2), da.Array)\n assert isinstance(da.add.outer(arr1, arr2), np.ndarray)\n\n # Check mix of dimensions, dtypes, and numpy/dask/object\n cases = [\n ((darr1, darr2), (arr1, arr2)),\n ((darr2, darr1), (arr2, arr1)),\n ((darr2, darr1.astype(\"f8\")), (arr2, arr1.astype(\"f8\"))),\n ((darr1, arr2), (arr1, arr2)),\n ((darr1, 1), (arr1, 1)),\n ((1, darr2), (1, arr2)),\n ((1.5, darr2), (1.5, arr2)),\n (([1, 2, 3], darr2), ([1, 2, 3], arr2)),\n ((darr1.sum(), darr2), (arr1.sum(), arr2)),\n ((np.array(1), darr2), (np.array(1), arr2)),\n ]\n\n for (dA, dB), (A, B) in cases:\n assert_eq(da.add.outer(dA, dB), np.add.outer(A, B))\n\n # Check dtype kwarg works\n assert_eq(\n da.add.outer(darr1, darr2, dtype=\"f8\"), np.add.outer(arr1, arr2, dtype=\"f8\")\n )\n\n with pytest.raises(ValueError):\n da.add.outer(darr1, darr2, out=arr1)\n\n with pytest.raises(ValueError):\n da.sin.outer(darr1, darr2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_complex_test_complex.for_darr_arr_in_dacomp.assert_eq_dafunc_arr_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_complex_test_complex.for_darr_arr_in_dacomp.assert_eq_dafunc_arr_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 244, "end_line": 271, "span_ids": ["test_complex"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", [\"isreal\", \"iscomplex\", \"real\", \"imag\"])\ndef test_complex(ufunc):\n\n dafunc = getattr(da, ufunc)\n # Note that these functions are not NumPy ufuncs\n npfunc = getattr(np, ufunc)\n\n real = np.random.randint(1, 100, size=(20, 20))\n imag = np.random.randint(1, 100, size=(20, 20)) * 1j\n comp = real + imag\n\n dareal = da.from_array(real, 3)\n daimag = da.from_array(imag, 3)\n dacomp = da.from_array(comp, 3)\n\n assert_eq(dacomp.real, comp.real)\n assert_eq(dacomp.imag, comp.imag)\n assert_eq(dacomp.conj(), comp.conj())\n\n for darr, arr in [(dacomp, comp), (dareal, real), (daimag, imag)]:\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(darr), da.Array)\n assert_eq(dafunc(darr), npfunc(arr))\n assert_eq(npfunc(darr), npfunc(arr))\n\n # applying Dask ufunc to normal ndarray triggers computation\n assert isinstance(dafunc(arr), np.ndarray)\n assert_eq(dafunc(arr), npfunc(arr))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_2results_test_ufunc_2results.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_2results_test_ufunc_2results.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 274, "end_line": 305, "span_ids": ["test_ufunc_2results"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", [\"frexp\", \"modf\"])\ndef test_ufunc_2results(ufunc):\n\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n arr = np.random.randint(1, 100, size=(20, 20))\n darr = da.from_array(arr, 3)\n\n # applying Dask ufunc doesn't trigger computation\n res1, res2 = dafunc(darr)\n assert isinstance(res1, da.Array)\n assert isinstance(res2, da.Array)\n exp1, exp2 = npfunc(arr)\n assert_eq(res1, exp1)\n assert_eq(res2, exp2)\n\n # applying NumPy ufunc is now lazy\n res1, res2 = npfunc(darr)\n assert isinstance(res1, da.Array)\n assert isinstance(res2, da.Array)\n exp1, exp2 = npfunc(arr)\n assert_eq(res1, exp1)\n assert_eq(res2, exp2)\n\n # applying Dask ufunc to normal ndarray triggers computation\n res1, res2 = dafunc(arr)\n assert isinstance(res1, da.Array)\n assert isinstance(res2, da.Array)\n exp1, exp2 = npfunc(arr)\n assert_eq(res1, exp1)\n assert_eq(res2, exp2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_clip_test_clip.assert_eq_x_clip_min_1_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_clip_test_clip.assert_eq_x_clip_min_1_m", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 308, "end_line": 317, "span_ids": ["test_clip"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_clip():\n x = np.random.normal(0, 10, size=(10, 10))\n d = da.from_array(x, chunks=(3, 4))\n\n assert_eq(x.clip(5), d.clip(5))\n assert_eq(x.clip(1, 5), d.clip(1, 5))\n assert_eq(x.clip(min=5), d.clip(min=5))\n assert_eq(x.clip(max=5), d.clip(max=5))\n assert_eq(x.clip(max=1, min=5), d.clip(max=1, min=5))\n assert_eq(x.clip(min=1, max=5), d.clip(min=1, max=5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_angle_test_angle.assert_eq_da_angle_comp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_angle_test_angle.assert_eq_da_angle_comp_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 320, "end_line": 329, "span_ids": ["test_angle"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_angle():\n real = np.random.randint(1, 100, size=(20, 20))\n imag = np.random.randint(1, 100, size=(20, 20)) * 1j\n comp = real + imag\n dacomp = da.from_array(comp, 3)\n\n assert_eq(da.angle(dacomp), np.angle(comp))\n assert_eq(da.angle(dacomp, deg=True), np.angle(comp, deg=True))\n assert isinstance(da.angle(comp), np.ndarray)\n assert_eq(da.angle(comp), np.angle(comp))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_issignedinf_test_non_ufunc_others.assert_eq_dafunc_darr_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_issignedinf_test_non_ufunc_others.assert_eq_dafunc_darr_n", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 332, "end_line": 349, "span_ids": ["test_non_ufunc_others", "test_issignedinf"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_issignedinf():\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n arr = np.random.randint(-1, 2, size=(20, 20)).astype(float) / 0\n darr = da.from_array(arr, 3)\n\n assert_eq(np.isneginf(arr), da.isneginf(darr))\n assert_eq(np.isposinf(arr), da.isposinf(darr))\n\n\n@pytest.mark.parametrize(\"func\", [\"i0\", \"sinc\", \"nan_to_num\"])\ndef test_non_ufunc_others(func):\n arr = np.random.randint(1, 100, size=(20, 20))\n darr = da.from_array(arr, 3)\n\n dafunc = getattr(da, func)\n npfunc = getattr(np, func)\n\n assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_test_frompyfunc.with_pytest_raises_NotImp.da_frompyfunc_lambda_x_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_test_frompyfunc.with_pytest_raises_NotImp.da_frompyfunc_lambda_x_y", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 352, "end_line": 365, "span_ids": ["test_frompyfunc"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frompyfunc():\n myadd = da.frompyfunc(add, 2, 1)\n np_myadd = np.frompyfunc(add, 2, 1)\n\n x = np.random.normal(0, 10, size=(10, 10))\n dx = da.from_array(x, chunks=(3, 4))\n y = np.random.normal(0, 10, size=10)\n dy = da.from_array(y, chunks=2)\n\n assert_eq(myadd(dx, dy), np_myadd(x, y))\n assert_eq(myadd.outer(dx, dy), np_myadd.outer(x, y))\n\n with pytest.raises(NotImplementedError):\n da.frompyfunc(lambda x, y: (x + y, x - y), 2, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_wrapper_test_frompyfunc_wrapper.assert_tokenize_da_frompy": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_frompyfunc_wrapper_test_frompyfunc_wrapper.assert_tokenize_da_frompy", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 368, "end_line": 398, "span_ids": ["test_frompyfunc_wrapper"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frompyfunc_wrapper():\n f = da_frompyfunc(add, 2, 1)\n np_f = np.frompyfunc(add, 2, 1)\n x = np.array([1, 2, 3])\n\n # Callable\n np.testing.assert_equal(f(x, 1), np_f(x, 1))\n\n # picklable\n f2 = pickle.loads(pickle.dumps(f))\n np.testing.assert_equal(f2(x, 1), np_f(x, 1))\n\n # Attributes\n assert f.ntypes == np_f.ntypes\n with pytest.raises(AttributeError):\n f.not_an_attribute\n\n # Tab completion\n assert \"ntypes\" in dir(f)\n\n # Methods\n np.testing.assert_equal(f.outer(x, x), np_f.outer(x, x))\n\n # funcname\n assert f.__name__ == \"frompyfunc-add\"\n\n # repr\n assert repr(f) == \"da.frompyfunc\"\n\n # tokenize\n assert tokenize(da_frompyfunc(add, 2, 1)) == tokenize(da_frompyfunc(add, 2, 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_array_ufunc_test_out_shape_mismatch.with_pytest_raises_ValueE.assert_np_log_x_out_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_array_ufunc_test_out_shape_mismatch.with_pytest_raises_ValueE.assert_np_log_x_out_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 401, "end_line": 449, "span_ids": ["test_array_ufunc_binop", "test_out_shape_mismatch", "test_out_numpy", "test_array_ufunc", "test_unsupported_ufunc_methods", "test_array_ufunc_out"], "tokens": 389}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_ufunc():\n x = np.arange(24).reshape((4, 6))\n d = da.from_array(x, chunks=(2, 3))\n\n for func in [np.sin, np.sum, np.negative, partial(np.prod, axis=0)]:\n assert isinstance(func(d), da.Array)\n assert_eq(func(d), func(x))\n\n\ndef test_array_ufunc_binop():\n x = np.arange(25).reshape((5, 5))\n d = da.from_array(x, chunks=(2, 2))\n\n for func in [np.add, np.multiply]:\n assert isinstance(func(d, d), da.Array)\n assert_eq(func(d, d), func(x, x))\n\n assert isinstance(func.outer(d, d), da.Array)\n assert_eq(func.outer(d, d), func.outer(x, x))\n\n\ndef test_array_ufunc_out():\n x = da.arange(10, chunks=(5,))\n np.sin(x, out=x)\n np.add(x, 10, out=x)\n assert_eq(x, np.sin(np.arange(10)) + 10)\n\n\ndef test_unsupported_ufunc_methods():\n x = da.arange(10, chunks=(5,))\n with pytest.raises(TypeError):\n assert np.add.reduce(x)\n\n\ndef test_out_numpy():\n x = da.arange(10, chunks=(5,))\n empty = np.empty(10, dtype=x.dtype)\n with pytest.raises((TypeError, NotImplementedError)) as info:\n np.add(x, 1, out=empty)\n\n assert \"ndarray\" in str(info.value)\n assert \"Array\" in str(info.value)\n\n\ndef test_out_shape_mismatch():\n x = da.arange(10, chunks=(5,))\n y = da.arange(15, chunks=(5,))\n with pytest.raises(ValueError):\n assert np.log(x, out=y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_pytest_test_can_make_really_big_array_of_ones.ones_shape_1000000_1000": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_pytest_test_can_make_really_big_array_of_ones.ones_shape_1000000_1000", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_wrap.py", "file_name": "test_wrap.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 70, "span_ids": ["test_can_make_really_big_array_of_ones", "test_size_as_list", "test_full_error_nonscalar_fill_value", "imports", "test_ones", "test_full_like_error_nonscalar_fill_value", "test_kwargs", "test_full_detects_da_dtype", "test_singleton_size", "test_full"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq\nfrom dask.array.wrap import ones\n\n\ndef test_ones():\n a = ones((10, 10), dtype=\"i4\", chunks=(4, 4))\n x = np.array(a)\n assert (x == np.ones((10, 10), \"i4\")).all()\n\n assert a.name.startswith(\"ones_like-\")\n\n\ndef test_size_as_list():\n a = ones([10, 10], dtype=\"i4\", chunks=(4, 4))\n x = np.array(a)\n assert (x == np.ones((10, 10), dtype=\"i4\")).all()\n\n\ndef test_singleton_size():\n a = ones(10, dtype=\"i4\", chunks=(4,))\n x = np.array(a)\n assert (x == np.ones(10, dtype=\"i4\")).all()\n\n\ndef test_kwargs():\n a = ones(10, dtype=\"i4\", chunks=(4,))\n x = np.array(a)\n assert (x == np.ones(10, dtype=\"i4\")).all()\n\n\ndef test_full():\n a = da.full((3, 3), 100, chunks=(2, 2), dtype=\"i8\")\n\n assert (a.compute() == 100).all()\n assert a.dtype == a.compute(scheduler=\"sync\").dtype == \"i8\"\n\n assert a.name.startswith(\"full_like-\")\n\n\ndef test_full_error_nonscalar_fill_value():\n with pytest.raises(ValueError, match=\"fill_value must be scalar\"):\n da.full((3, 3), [100, 100], chunks=(2, 2), dtype=\"i8\")\n\n\ndef test_full_detects_da_dtype():\n x = da.from_array(100)\n with pytest.warns(FutureWarning, match=\"not implemented by Dask array\") as record:\n # This shall not raise an NotImplementedError due to dtype detected as object.\n a = da.full(shape=(3, 3), fill_value=x)\n assert a.dtype == x.dtype\n assert_eq(a, np.full(shape=(3, 3), fill_value=100))\n assert len(record) == 1\n\n\ndef test_full_like_error_nonscalar_fill_value():\n x = np.full((3, 3), 1, dtype=\"i8\")\n with pytest.raises(ValueError, match=\"fill_value must be scalar\"):\n da.full_like(x, [100, 100], chunks=(2, 2), dtype=\"i8\")\n\n\ndef test_can_make_really_big_array_of_ones():\n ones((1000000, 1000000), chunks=(100000, 100000))\n ones(shape=(1000000, 1000000), chunks=(100000, 100000))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_test_wrap_consistent_names_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_wrap.py_test_wrap_consistent_names_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_wrap.py", "file_name": "test_wrap.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 74, "span_ids": ["test_wrap_consistent_names"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_wrap_consistent_names():\n assert sorted(ones(10, dtype=\"i4\", chunks=(4,)).dask) == sorted(\n ones(10, dtype=\"i4\", chunks=(4,)).dask\n )\n assert sorted(ones(10, dtype=\"i4\", chunks=(4,)).dask) != sorted(\n ones(10, chunks=(4,)).dask\n )\n assert sorted(da.full((3, 3), 100, chunks=(2, 2), dtype=\"f8\").dask) == sorted(\n da.full((3, 3), 100, chunks=(2, 2), dtype=\"f8\").dask\n )\n assert sorted(da.full((3, 3), 100, chunks=(2, 2), dtype=\"i2\").dask) != sorted(\n da.full((3, 3), 100, chunks=(2, 2)).dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_core_from_tiledb.return.core_from_array_tdb_chun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_core_from_tiledb.return.core_from_array_tdb_chun", "embedding": null, "metadata": {"file_path": "dask/array/tiledb_io.py", "file_name": "tiledb_io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 69, "span_ids": ["imports", "from_tiledb", "_tiledb_to_chunks"], "tokens": 468}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from . import core\n\n\ndef _tiledb_to_chunks(tiledb_array):\n schema = tiledb_array.schema\n return list(schema.domain.dim(i).tile for i in range(schema.ndim))\n\n\ndef from_tiledb(uri, attribute=None, chunks=None, storage_options=None, **kwargs):\n \"\"\"Load array from the TileDB storage format\n\n See https://docs.tiledb.io for more information about TileDB.\n\n Parameters\n ----------\n uri: TileDB array or str\n Location to save the data\n attribute: str or None\n Attribute selection (single-attribute view on multi-attribute array)\n\n\n Returns\n -------\n\n A Dask Array\n\n Examples\n --------\n\n >>> import tempfile, tiledb\n >>> import dask.array as da, numpy as np\n >>> uri = tempfile.NamedTemporaryFile().name\n >>> _ = tiledb.from_numpy(uri, np.arange(0,9).reshape(3,3)) # create a tiledb array\n >>> tdb_ar = da.from_tiledb(uri) # read back the array\n >>> tdb_ar.shape\n (3, 3)\n >>> tdb_ar.mean().compute()\n 4.0\n \"\"\"\n import tiledb\n\n tiledb_config = storage_options or dict()\n key = tiledb_config.pop(\"key\", None)\n\n if isinstance(uri, tiledb.Array):\n tdb = uri\n else:\n tdb = tiledb.open(uri, attr=attribute, config=tiledb_config, key=key)\n\n if tdb.schema.sparse:\n raise ValueError(\"Sparse TileDB arrays are not supported\")\n\n if not attribute:\n if tdb.schema.nattr > 1:\n raise TypeError(\n \"keyword 'attribute' must be provided\"\n \"when loading a multi-attribute TileDB array\"\n )\n else:\n attribute = tdb.schema.attr(0).name\n\n if tdb.iswritable:\n raise ValueError(\"TileDB array must be open for reading\")\n\n chunks = chunks or _tiledb_to_chunks(tdb)\n\n assert len(chunks) == tdb.schema.ndim\n\n return core.from_array(tdb, chunks, name=\"tiledb-%s\" % uri)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_to_tiledb_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tiledb_io.py_to_tiledb_", "embedding": null, "metadata": {"file_path": "dask/array/tiledb_io.py", "file_name": "tiledb_io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 72, "end_line": 166, "span_ids": ["to_tiledb"], "tokens": 690}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_tiledb(\n darray,\n uri,\n compute=True,\n return_stored=False,\n storage_options=None,\n key=None,\n **kwargs,\n):\n \"\"\"Save array to the TileDB storage format\n\n Save 'array' using the TileDB storage manager, to any TileDB-supported URI,\n including local disk, S3, or HDFS.\n\n See https://docs.tiledb.io for more information about TileDB.\n\n Parameters\n ----------\n\n darray: dask.array\n A dask array to write.\n uri:\n Any supported TileDB storage location.\n storage_options: dict\n Dict containing any configuration options for the TileDB backend.\n see https://docs.tiledb.io/en/stable/tutorials/config.html\n compute, return_stored: see ``store()``\n key: str or None\n Encryption key\n\n Returns\n -------\n\n None\n Unless ``return_stored`` is set to ``True`` (``False`` by default)\n\n Notes\n -----\n\n TileDB only supports regularly-chunked arrays.\n TileDB `tile extents`_ correspond to form 2 of the dask\n `chunk specification`_, and the conversion is\n done automatically for supported arrays.\n\n Examples\n --------\n\n >>> import dask.array as da, tempfile\n >>> uri = tempfile.NamedTemporaryFile().name\n >>> data = da.random.random(5,5)\n >>> da.to_tiledb(data, uri)\n >>> import tiledb\n >>> tdb_ar = tiledb.open(uri)\n >>> all(tdb_ar == data)\n True\n\n .. _chunk specification: https://docs.tiledb.io/en/stable/tutorials/tiling-dense.html\n .. _tile extents: http://docs.dask.org/en/latest/array-chunks.html\n \"\"\"\n import tiledb\n\n tiledb_config = storage_options or dict()\n # encryption key, if any\n key = key or tiledb_config.pop(\"key\", None)\n\n if not core._check_regular_chunks(darray.chunks):\n raise ValueError(\n \"Attempt to save array to TileDB with irregular \"\n \"chunking, please call `arr.rechunk(...)` first.\"\n )\n\n if isinstance(uri, str):\n chunks = [c[0] for c in darray.chunks]\n # create a suitable, empty, writable TileDB array\n tdb = tiledb.empty_like(\n uri, darray, tile=chunks, config=tiledb_config, key=key, **kwargs\n )\n elif isinstance(uri, tiledb.Array):\n tdb = uri\n # sanity checks\n if not ((darray.dtype == tdb.dtype) and (darray.ndim == tdb.ndim)):\n raise ValueError(\n \"Target TileDB array layout is not compatible with source array\"\n )\n else:\n raise ValueError(\n \"'uri' must be string pointing to supported TileDB store location \"\n \"or an open, writable TileDB array.\"\n )\n\n if not (tdb.isopen and tdb.iswritable):\n raise ValueError(\"Target TileDB array is not open and writable.\")\n\n return darray.store(tdb, lock=False, compute=compute, return_stored=return_stored)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_wrap_elemwise_wrap_elemwise.return.derived_from_source_wrap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_wrap_elemwise_wrap_elemwise.return.derived_from_source_wrap", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 23, "end_line": 43, "span_ids": ["wrap_elemwise"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrap_elemwise(numpy_ufunc, array_wrap=False, source=np):\n \"\"\"Wrap up numpy function into dask.array\"\"\"\n\n def wrapped(*args, **kwargs):\n dsk = [arg for arg in args if hasattr(arg, \"_elemwise\")]\n if len(dsk) > 0:\n is_dataframe = (\n is_dataframe_like(dsk[0])\n or is_series_like(dsk[0])\n or is_index_like(dsk[0])\n )\n if array_wrap and is_dataframe:\n return dsk[0]._elemwise(__array_wrap__, numpy_ufunc, *args, **kwargs)\n else:\n return dsk[0]._elemwise(numpy_ufunc, *args, **kwargs)\n else:\n return numpy_ufunc(*args, **kwargs)\n\n # functools.wraps cannot wrap ufunc in Python 2.x\n wrapped.__name__ = numpy_ufunc.__name__\n return derived_from(source)(wrapped)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_da_frompyfunc_da_frompyfunc.__dir__.return.list_o_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_da_frompyfunc_da_frompyfunc.__dir__.return.list_o_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 46, "end_line": 78, "span_ids": ["da_frompyfunc.__init__", "da_frompyfunc.__reduce__", "da_frompyfunc.__repr__", "da_frompyfunc.__call__", "da_frompyfunc.__dir__", "da_frompyfunc.__getattr__", "da_frompyfunc.__dask_tokenize__", "da_frompyfunc"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class da_frompyfunc:\n \"\"\"A serializable `frompyfunc` object\"\"\"\n\n def __init__(self, func, nin, nout):\n self._ufunc = np.frompyfunc(func, nin, nout)\n self._func = func\n self.nin = nin\n self.nout = nout\n self._name = funcname(func)\n self.__name__ = \"frompyfunc-%s\" % self._name\n\n def __repr__(self):\n return \"da.frompyfunc<%s, %d, %d>\" % (self._name, self.nin, self.nout)\n\n def __dask_tokenize__(self):\n return (normalize_function(self._func), self.nin, self.nout)\n\n def __reduce__(self):\n return (da_frompyfunc, (self._func, self.nin, self.nout))\n\n def __call__(self, *args, **kwargs):\n return self._ufunc(*args, **kwargs)\n\n def __getattr__(self, a):\n if not a.startswith(\"_\"):\n return getattr(self._ufunc, a)\n raise AttributeError(f\"{type(self).__name__!r} object has no attribute {a!r}\")\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n o.update(dir(self._ufunc))\n return list(o)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frompyfunc_ufunc.__repr__.return.repr_self__ufunc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frompyfunc_ufunc.__repr__.return.repr_self__ufunc_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 81, "end_line": 119, "span_ids": ["ufunc.__getattr__", "ufunc", "ufunc.__init__", "frompyfunc", "ufunc.__dir__", "ufunc.__repr__"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef frompyfunc(func, nin, nout):\n if nout > 1:\n raise NotImplementedError(\"frompyfunc with more than one output\")\n return ufunc(da_frompyfunc(func, nin, nout))\n\n\nclass ufunc:\n _forward_attrs = {\n \"nin\",\n \"nargs\",\n \"nout\",\n \"ntypes\",\n \"identity\",\n \"signature\",\n \"types\",\n }\n\n def __init__(self, ufunc):\n if not isinstance(ufunc, (np.ufunc, da_frompyfunc)):\n raise TypeError(\n \"must be an instance of `ufunc` or \"\n \"`da_frompyfunc`, got `%s\" % type(ufunc).__name__\n )\n self._ufunc = ufunc\n self.__name__ = ufunc.__name__\n if isinstance(ufunc, np.ufunc):\n derived_from(np)(self)\n\n def __getattr__(self, key):\n if key in self._forward_attrs:\n return getattr(self._ufunc, key)\n raise AttributeError(f\"{type(self).__name__!r} object has no attribute {key!r}\")\n\n def __dir__(self):\n return list(self._forward_attrs.union(dir(type(self)), self.__dict__))\n\n def __repr__(self):\n return repr(self._ufunc)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.__call___ufunc.__call__.if_len_dsks_0_.else_.return.self__ufunc_args_kwar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.__call___ufunc.__call__.if_len_dsks_0_.else_.return.self__ufunc_args_kwar", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 135, "span_ids": ["ufunc.__call__"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ufunc:\n\n def __call__(self, *args, **kwargs):\n dsks = [arg for arg in args if hasattr(arg, \"_elemwise\")]\n if len(dsks) > 0:\n for dsk in dsks:\n result = dsk._elemwise(self._ufunc, *args, **kwargs)\n if type(result) != type(NotImplemented):\n return result\n raise TypeError(\n \"Parameters of such types are not supported by \" + self.__name__\n )\n else:\n return self._ufunc(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.outer_ufunc.outer.return.blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_ufunc.outer_ufunc.outer.return.blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 134, "end_line": 182, "span_ids": ["ufunc.outer"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ufunc:\n\n @derived_from(np.ufunc)\n def outer(self, A, B, **kwargs):\n if self.nin != 2:\n raise ValueError(\"outer product only supported for binary functions\")\n if \"out\" in kwargs:\n raise ValueError(\"`out` kwarg not supported\")\n\n A_is_dask = is_dask_collection(A)\n B_is_dask = is_dask_collection(B)\n if not A_is_dask and not B_is_dask:\n return self._ufunc.outer(A, B, **kwargs)\n elif (\n A_is_dask\n and not isinstance(A, Array)\n or B_is_dask\n and not isinstance(B, Array)\n ):\n raise NotImplementedError(\n \"Dask objects besides `dask.array.Array` \"\n \"are not supported at this time.\"\n )\n\n A = asarray(A)\n B = asarray(B)\n ndim = A.ndim + B.ndim\n out_inds = tuple(range(ndim))\n A_inds = out_inds[: A.ndim]\n B_inds = out_inds[A.ndim :]\n\n dtype = apply_infer_dtype(\n self._ufunc.outer, [A, B], kwargs, \"ufunc.outer\", suggest_dtype=False\n )\n\n if \"dtype\" in kwargs:\n func = partial(self._ufunc.outer, dtype=kwargs.pop(\"dtype\"))\n else:\n func = self._ufunc.outer\n\n return blockwise(\n func,\n out_inds,\n A,\n A_inds,\n B,\n B_inds,\n dtype=dtype,\n token=self.__name__ + \".outer\",\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py__ufuncs_copied_from_thi_degrees.ufunc_np_degrees_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py__ufuncs_copied_from_thi_degrees.ufunc_np_degrees_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 188, "end_line": 280, "span_ids": ["impl:106", "ufunc.outer", "impl"], "tokens": 779}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# ufuncs, copied from this page:\n# https://docs.scipy.org/doc/numpy/reference/ufuncs.html\n\n# math operations\nadd = ufunc(np.add)\nsubtract = ufunc(np.subtract)\nmultiply = ufunc(np.multiply)\ndivide = ufunc(np.divide)\nlogaddexp = ufunc(np.logaddexp)\nlogaddexp2 = ufunc(np.logaddexp2)\ntrue_divide = ufunc(np.true_divide)\nfloor_divide = ufunc(np.floor_divide)\nnegative = ufunc(np.negative)\npower = ufunc(np.power)\nfloat_power = ufunc(np.float_power)\nremainder = ufunc(np.remainder)\nmod = ufunc(np.mod)\n# fmod: see below\nconj = conjugate = ufunc(np.conjugate)\nexp = ufunc(np.exp)\nexp2 = ufunc(np.exp2)\nlog = ufunc(np.log)\nlog2 = ufunc(np.log2)\nlog10 = ufunc(np.log10)\nlog1p = ufunc(np.log1p)\nexpm1 = ufunc(np.expm1)\nsqrt = ufunc(np.sqrt)\nsquare = ufunc(np.square)\ncbrt = ufunc(np.cbrt)\nreciprocal = ufunc(np.reciprocal)\n\n# trigonometric functions\nsin = ufunc(np.sin)\ncos = ufunc(np.cos)\ntan = ufunc(np.tan)\narcsin = ufunc(np.arcsin)\narccos = ufunc(np.arccos)\narctan = ufunc(np.arctan)\narctan2 = ufunc(np.arctan2)\nhypot = ufunc(np.hypot)\nsinh = ufunc(np.sinh)\ncosh = ufunc(np.cosh)\ntanh = ufunc(np.tanh)\narcsinh = ufunc(np.arcsinh)\narccosh = ufunc(np.arccosh)\narctanh = ufunc(np.arctanh)\ndeg2rad = ufunc(np.deg2rad)\nrad2deg = ufunc(np.rad2deg)\n\n# comparison functions\ngreater = ufunc(np.greater)\ngreater_equal = ufunc(np.greater_equal)\nless = ufunc(np.less)\nless_equal = ufunc(np.less_equal)\nnot_equal = ufunc(np.not_equal)\nequal = ufunc(np.equal)\nisneginf = partial(equal, -np.inf)\nisposinf = partial(equal, np.inf)\nlogical_and = ufunc(np.logical_and)\nlogical_or = ufunc(np.logical_or)\nlogical_xor = ufunc(np.logical_xor)\nlogical_not = ufunc(np.logical_not)\nmaximum = ufunc(np.maximum)\nminimum = ufunc(np.minimum)\nfmax = ufunc(np.fmax)\nfmin = ufunc(np.fmin)\n\n# bitwise functions\nbitwise_and = ufunc(np.bitwise_and)\nbitwise_or = ufunc(np.bitwise_or)\nbitwise_xor = ufunc(np.bitwise_xor)\nbitwise_not = ufunc(np.bitwise_not)\ninvert = bitwise_not\n\n# floating functions\nisfinite = ufunc(np.isfinite)\nisinf = ufunc(np.isinf)\nisnan = ufunc(np.isnan)\nsignbit = ufunc(np.signbit)\ncopysign = ufunc(np.copysign)\nnextafter = ufunc(np.nextafter)\nspacing = ufunc(np.spacing)\n# modf: see below\nldexp = ufunc(np.ldexp)\n# frexp: see below\nfmod = ufunc(np.fmod)\nfloor = ufunc(np.floor)\nceil = ufunc(np.ceil)\ntrunc = ufunc(np.trunc)\n\n# more math routines, from this page:\n# https://docs.scipy.org/doc/numpy/reference/routines.math.html\ndegrees = ufunc(np.degrees)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_radians_angle.return.np_angle_x_deg_deg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_radians_angle.return.np_angle_x_deg_deg_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 281, "end_line": 304, "span_ids": ["impl:106", "angle"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "radians = ufunc(np.radians)\nrint = ufunc(np.rint)\nfabs = ufunc(np.fabs)\nsign = ufunc(np.sign)\nabsolute = ufunc(np.absolute)\n\n# non-ufunc elementwise functions\nclip = wrap_elemwise(np.clip)\nisreal = wrap_elemwise(np.isreal, array_wrap=True)\niscomplex = wrap_elemwise(np.iscomplex, array_wrap=True)\nreal = wrap_elemwise(np.real, array_wrap=True)\nimag = wrap_elemwise(np.imag, array_wrap=True)\nfix = wrap_elemwise(np.fix, array_wrap=True)\ni0 = wrap_elemwise(np.i0, array_wrap=True)\nsinc = wrap_elemwise(np.sinc, array_wrap=True)\nnan_to_num = wrap_elemwise(np.nan_to_num, array_wrap=True)\n\n\n@derived_from(np)\ndef angle(x, deg=0):\n deg = bool(deg)\n if hasattr(x, \"_elemwise\"):\n return x._elemwise(__array_wrap__, np.angle, x, deg)\n return np.angle(x, deg=deg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frexp_frexp.return.L_R": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_frexp_frexp.return.L_R", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 304, "end_line": 326, "span_ids": ["frexp"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef frexp(x):\n # Not actually object dtype, just need to specify something\n tmp = elemwise(np.frexp, x, dtype=object)\n left = \"mantissa-\" + tmp.name\n right = \"exponent-\" + tmp.name\n ldsk = {\n (left,) + key[1:]: (getitem, key, 0)\n for key in core.flatten(tmp.__dask_keys__())\n }\n rdsk = {\n (right,) + key[1:]: (getitem, key, 1)\n for key in core.flatten(tmp.__dask_keys__())\n }\n\n a = np.empty_like(getattr(x, \"_meta\", x), shape=(1,) * x.ndim, dtype=x.dtype)\n l, r = np.frexp(a)\n\n graph = HighLevelGraph.from_collections(left, ldsk, dependencies=[tmp])\n L = Array(graph, left, chunks=tmp.chunks, meta=l)\n graph = HighLevelGraph.from_collections(right, rdsk, dependencies=[tmp])\n R = Array(graph, right, chunks=tmp.chunks, meta=r)\n return L, R", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_modf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_modf_", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 329, "end_line": 359, "span_ids": ["divmod", "modf"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef modf(x):\n # Not actually object dtype, just need to specify something\n tmp = elemwise(np.modf, x, dtype=object)\n left = \"modf1-\" + tmp.name\n right = \"modf2-\" + tmp.name\n ldsk = {\n (left,) + key[1:]: (getitem, key, 0)\n for key in core.flatten(tmp.__dask_keys__())\n }\n rdsk = {\n (right,) + key[1:]: (getitem, key, 1)\n for key in core.flatten(tmp.__dask_keys__())\n }\n\n a = np.empty_like(getattr(x, \"_meta\", x), shape=(1,) * x.ndim, dtype=x.dtype)\n l, r = np.modf(a)\n\n graph = HighLevelGraph.from_collections(left, ldsk, dependencies=[tmp])\n L = Array(graph, left, chunks=tmp.chunks, meta=l)\n graph = HighLevelGraph.from_collections(right, rdsk, dependencies=[tmp])\n R = Array(graph, right, chunks=tmp.chunks, meta=r)\n return L, R\n\n\n@derived_from(np)\ndef divmod(x, y):\n res1 = x // y\n res2 = x % y\n return res1, res2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_compute_meta_compute_meta.with_np_errstate_all_ign.return.meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_compute_meta_compute_meta.with_np_errstate_all_ign.return.meta", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 177, "span_ids": ["compute_meta"], "tokens": 386}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute_meta(func, _dtype, *args, **kwargs):\n with np.errstate(all=\"ignore\"), warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n\n args_meta = [meta_from_array(x) if is_arraylike(x) else x for x in args]\n kwargs_meta = {\n k: meta_from_array(v) if is_arraylike(v) else v for k, v in kwargs.items()\n }\n\n # todo: look for alternative to this, causes issues when using map_blocks()\n # with np.vectorize, such as dask.array.routines._isnonzero_vec().\n if isinstance(func, np.vectorize):\n meta = func(*args_meta)\n else:\n try:\n # some reduction functions need to know they are computing meta\n if has_keyword(func, \"computing_meta\"):\n kwargs_meta[\"computing_meta\"] = True\n meta = func(*args_meta, **kwargs_meta)\n except TypeError as e:\n if any(\n s in str(e)\n for s in [\n \"unexpected keyword argument\",\n \"is an invalid keyword for\",\n \"Did not understand the following kwargs\",\n ]\n ):\n raise\n else:\n return None\n except ValueError as e:\n # min/max functions have no identity, just use the same input type when there's only one\n if len(\n args_meta\n ) == 1 and \"zero-size array to reduction operation\" in str(e):\n meta = args_meta[0]\n else:\n return None\n except Exception:\n return None\n\n if _dtype and getattr(meta, \"dtype\", None) != _dtype:\n with contextlib.suppress(AttributeError):\n meta = meta.astype(_dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_allclose_allclose.return._a_b_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_allclose_allclose.return._a_b_all_", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 150, "end_line": 159, "span_ids": ["allclose"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def allclose(a, b, equal_nan=False, **kwargs):\n a = normalize_to_array(a)\n b = normalize_to_array(b)\n if getattr(a, \"dtype\", None) != \"O\":\n return np.allclose(a, b, equal_nan=equal_nan, **kwargs)\n if equal_nan:\n return a.shape == b.shape and all(\n np.isnan(b) if np.isnan(a) else a == b for (a, b) in zip(a.flat, b.flat)\n )\n return (a == b).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_same_keys_assert_eq_shape.for_aa_bb_in_zip_a_b_.if_math_isnan_aa_or_math.else_.assert_aa_bb": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_same_keys_assert_eq_shape.for_aa_bb_in_zip_a_b_.if_math_isnan_aa_or_math.else_.assert_aa_bb", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 192, "end_line": 224, "span_ids": ["_not_empty", "_check_dsk", "assert_eq_shape", "same_keys"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def same_keys(a, b):\n def key(k):\n if isinstance(k, str):\n return (k, -1, -1, -1)\n else:\n return k\n\n return sorted(a.dask, key=key) == sorted(b.dask, key=key)\n\n\ndef _not_empty(x):\n return x.shape and 0 not in x.shape\n\n\ndef _check_dsk(dsk):\n \"\"\"Check that graph is well named and non-overlapping\"\"\"\n if not isinstance(dsk, HighLevelGraph):\n return\n\n dsk.validate()\n assert all(isinstance(k, (tuple, str)) for k in dsk.layers)\n freqs = frequencies(concat(dsk.layers.values()))\n non_one = {k: v for k, v in freqs.items() if v != 1}\n assert not non_one, non_one\n\n\ndef assert_eq_shape(a, b, check_nan=True):\n for aa, bb in zip(a, b):\n if math.isnan(aa) or math.isnan(bb):\n if check_nan:\n assert math.isnan(aa) == math.isnan(bb)\n else:\n assert aa == bb", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__get_dt_meta_computed__get_dt_meta_computed.return.x_adt_x_meta_x_compute": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__get_dt_meta_computed__get_dt_meta_computed.return.x_adt_x_meta_x_compute", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 264, "span_ids": ["_get_dt_meta_computed"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_dt_meta_computed(\n x, check_shape=True, check_graph=True, check_chunks=True, scheduler=None\n):\n x_original = x\n x_meta = None\n x_computed = None\n\n if isinstance(x, Array):\n assert x.dtype is not None\n adt = x.dtype\n if check_graph:\n _check_dsk(x.dask)\n x_meta = getattr(x, \"_meta\", None)\n if check_chunks:\n # Replace x with persisted version to avoid computing it twice.\n x = _check_chunks(x, scheduler=scheduler)\n x = x.compute(scheduler=scheduler)\n x_computed = x\n if hasattr(x, \"todense\"):\n x = x.todense()\n if not hasattr(x, \"dtype\"):\n x = np.array(x, dtype=\"O\")\n if _not_empty(x):\n assert x.dtype == x_original.dtype\n if check_shape:\n assert_eq_shape(x_original.shape, x.shape, check_nan=False)\n else:\n if not hasattr(x, \"dtype\"):\n x = np.array(x, dtype=\"O\")\n adt = getattr(x, \"dtype\", None)\n\n return x, adt, x_meta, x_computed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_validate_axis_validate_axis.return.axis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_validate_axis_validate_axis.return.axis", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 450, "end_line": 462, "span_ids": ["validate_axis"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def validate_axis(axis, ndim):\n \"\"\"Validate an input to axis= keywords\"\"\"\n if isinstance(axis, (tuple, list)):\n return tuple(validate_axis(ax, ndim) for ax in axis)\n if not isinstance(axis, numbers.Integral):\n raise TypeError(\"Axis value must be an integer, got %s\" % axis)\n if axis < -ndim or axis >= ndim:\n raise np.AxisError(\n \"Axis %d is out of bounds for array of dimension %d\" % (axis, ndim)\n )\n if axis < 0:\n axis += ndim\n return axis", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_from_functools_import_par__parse_wrap_args.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_from_functools_import_par__parse_wrap_args.return._", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 42, "span_ids": ["imports", "_parse_wrap_args"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from functools import partial\nfrom itertools import product\n\nimport numpy as np\nfrom tlz import curry\n\nfrom ..base import tokenize\nfrom ..blockwise import blockwise as core_blockwise\nfrom ..layers import ArrayChunkShapeDep\nfrom ..utils import funcname\nfrom .core import Array, normalize_chunks\nfrom .utils import meta_from_array\n\n\ndef _parse_wrap_args(func, args, kwargs, shape):\n if isinstance(shape, np.ndarray):\n shape = shape.tolist()\n\n if not isinstance(shape, (tuple, list)):\n shape = (shape,)\n\n name = kwargs.pop(\"name\", None)\n chunks = kwargs.pop(\"chunks\", \"auto\")\n\n dtype = kwargs.pop(\"dtype\", None)\n if dtype is None:\n dtype = func(shape, *args, **kwargs).dtype\n dtype = np.dtype(dtype)\n\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n\n name = name or funcname(func) + \"-\" + tokenize(\n func, shape, chunks, dtype, args, kwargs\n )\n\n return {\n \"shape\": shape,\n \"dtype\": dtype,\n \"kwargs\": kwargs,\n \"chunks\": chunks,\n \"name\": name,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_like_wrap_func_like.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_like_wrap_func_like.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 75, "end_line": 100, "span_ids": ["wrap_func_like"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrap_func_like(func, *args, **kwargs):\n \"\"\"\n Transform np creation function into blocked version\n \"\"\"\n x = args[0]\n meta = meta_from_array(x)\n shape = kwargs.get(\"shape\", x.shape)\n\n parsed = _parse_wrap_args(func, args, kwargs, shape)\n shape = parsed[\"shape\"]\n dtype = parsed[\"dtype\"]\n chunks = parsed[\"chunks\"]\n name = parsed[\"name\"]\n kwargs = parsed[\"kwargs\"]\n\n keys = product([name], *[range(len(bd)) for bd in chunks])\n shapes = product(*chunks)\n shapes = list(shapes)\n kw = [kwargs for _ in shapes]\n for i, s in enumerate(list(shapes)):\n kw[i][\"shape\"] = s\n vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))\n\n dsk = dict(zip(keys, vals))\n\n return Array(dsk, name, chunks, meta=meta.astype(dtype))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/__init__.py_try__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/__init__.py_try__", "embedding": null, "metadata": {"file_path": "dask/bag/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 26, "span_ids": ["impl"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "try:\n from ..base import compute\n from .avro import read_avro\n from .core import Bag, Item\n from .core import bag_map as map\n from .core import bag_range as range\n from .core import bag_zip as zip\n from .core import (\n concat,\n from_delayed,\n from_sequence,\n from_url,\n map_partitions,\n to_textfiles,\n )\n from .text import read_text\n from .utils import assert_eq\nexcept ImportError as e:\n msg = (\n \"Dask bag requirements are not installed.\\n\\n\"\n \"Please either conda or pip install as follows:\\n\\n\"\n \" conda install dask # either conda install\\n\"\n ' python -m pip install \"dask[bag]\" --upgrade # or python -m pip install'\n )\n raise ImportError(str(e) + \"\\n\\n\" + msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_io_read_bytes.return.fo_read_size_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_io_read_bytes.return.fo_read_size_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["read_bytes", "imports", "read_long"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport uuid\n\nfrom fsspec.core import OpenFile, get_fs_token_paths, open_files\nfrom fsspec.utils import read_block\nfrom fsspec.utils import tokenize as fs_tokenize\n\nfrom ..highlevelgraph import HighLevelGraph\n\nMAGIC = b\"Obj\\x01\"\nSYNC_SIZE = 16\n\n\ndef read_long(fo):\n \"\"\"variable-length, zig-zag encoding.\"\"\"\n c = fo.read(1)\n b = ord(c)\n n = b & 0x7F\n shift = 7\n while (b & 0x80) != 0:\n b = ord(fo.read(1))\n n |= (b & 0x7F) << shift\n shift += 7\n return (n >> 1) ^ -(n & 1)\n\n\ndef read_bytes(fo):\n \"\"\"a long followed by that many bytes of data.\"\"\"\n size = read_long(fo)\n return fo.read(size)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_header_open_head.return.head_size": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_header_open_head.return.head_size", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 33, "end_line": 68, "span_ids": ["open_head", "read_header"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_header(fo):\n \"\"\"Extract an avro file's header\n\n fo: file-like\n This should be in bytes mode, e.g., io.BytesIO\n\n Returns dict representing the header\n\n Parameters\n ----------\n fo: file-like\n \"\"\"\n assert fo.read(len(MAGIC)) == MAGIC, \"Magic avro bytes missing\"\n meta = {}\n out = {\"meta\": meta}\n while True:\n n_keys = read_long(fo)\n if n_keys == 0:\n break\n for i in range(n_keys):\n # ignore dtype mapping for bag version\n read_bytes(fo) # schema keys\n read_bytes(fo) # schema values\n out[\"sync\"] = fo.read(SYNC_SIZE)\n out[\"header_size\"] = fo.tell()\n fo.seek(0)\n out[\"head_bytes\"] = fo.read(out[\"header_size\"])\n return out\n\n\ndef open_head(fs, path, compression):\n \"\"\"Open a file just to read its head and size\"\"\"\n with OpenFile(fs, path, compression=compression) as f:\n head = read_header(f)\n size = fs.info(path)[\"size\"]\n return head, size", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_avro_read_avro.if_blocksize_is_not_None_.else_.return.from_delayed_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_avro_read_avro.if_blocksize_is_not_None_.else_.return.from_delayed_chunks_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 71, "end_line": 137, "span_ids": ["read_avro"], "tokens": 566}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_avro(urlpath, blocksize=100000000, storage_options=None, compression=None):\n \"\"\"Read set of avro files\n\n Use this with arbitrary nested avro schemas. Please refer to the\n fastavro documentation for its capabilities:\n https://github.com/fastavro/fastavro\n\n Parameters\n ----------\n urlpath: string or list\n Absolute or relative filepath, URL (may include protocols like\n ``s3://``), or globstring pointing to data.\n blocksize: int or None\n Size of chunks in bytes. If None, there will be no chunking and each\n file will become one partition.\n storage_options: dict or None\n passed to backend file-system\n compression: str or None\n Compression format of the targe(s), like 'gzip'. Should only be used\n with blocksize=None.\n \"\"\"\n from dask import compute, delayed\n from dask.bag import from_delayed\n from dask.utils import import_required\n\n import_required(\n \"fastavro\", \"fastavro is a required dependency for using bag.read_avro().\"\n )\n\n storage_options = storage_options or {}\n if blocksize is not None:\n fs, fs_token, paths = get_fs_token_paths(\n urlpath, mode=\"rb\", storage_options=storage_options\n )\n dhead = delayed(open_head)\n out = compute(*[dhead(fs, path, compression) for path in paths])\n heads, sizes = zip(*out)\n dread = delayed(read_chunk)\n\n offsets = []\n lengths = []\n for size in sizes:\n off = list(range(0, size, blocksize))\n length = [blocksize] * len(off)\n offsets.append(off)\n lengths.append(length)\n\n out = []\n for path, offset, length, head in zip(paths, offsets, lengths, heads):\n delimiter = head[\"sync\"]\n f = OpenFile(fs, path, compression=compression)\n token = fs_tokenize(\n fs_token, delimiter, path, fs.ukey(path), compression, offset\n )\n keys = [f\"read-avro-{o}-{token}\" for o in offset]\n values = [\n dread(f, o, l, head, dask_key_name=key)\n for o, key, l in zip(offset, keys, length)\n ]\n out.extend(values)\n\n return from_delayed(out)\n else:\n files = open_files(urlpath, compression=compression, **storage_options)\n dread = delayed(read_file)\n chunks = [dread(fo) for fo in files]\n return from_delayed(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py__verify_schema_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py__verify_schema_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 274, "end_line": 290, "span_ids": ["_write_avro_part", "_verify_schema"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _verify_schema(s):\n assert isinstance(s, dict), \"Schema must be dictionary\"\n for field in [\"name\", \"type\", \"fields\"]:\n assert field in s, \"Schema missing '%s' field\" % field\n assert s[\"type\"] == \"record\", \"Schema must be of type 'record'\"\n assert isinstance(s[\"fields\"], list), \"Fields entry must be a list\"\n for f in s[\"fields\"]:\n assert \"name\" in f and \"type\" in f, \"Field spec incomplete: %s\" % f\n\n\ndef _write_avro_part(part, f, schema, codec, sync_interval, metadata):\n \"\"\"Create single avro file from list of dictionaries\"\"\"\n import fastavro\n\n with f as f:\n fastavro.writer(f, schema, part, codec, sync_interval, metadata)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/chunk.py_barrier_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/chunk.py_barrier_", "embedding": null, "metadata": {"file_path": "dask/bag/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 34, "span_ids": ["foldby_combine2", "barrier", "groupby_tasks_group_hash", "getitem", "var_aggregate", "var_chunk"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def barrier(*args):\n return None\n\n\ndef getitem(x, key):\n \"\"\"Like :func:`operator.getitem`, but allows setting key using partial\n ``partial(chunk.getitem, key=key)\n \"\"\"\n return x[key]\n\n\ndef foldby_combine2(combine, acc, x):\n return combine(acc, x[1])\n\n\ndef groupby_tasks_group_hash(x, hash, grouper):\n return hash(grouper(x)), x\n\n\ndef var_chunk(seq):\n squares, total, n = 0.0, 0.0, 0\n for x in seq:\n squares += x**2\n total += x\n n += 1\n return squares, total, n\n\n\ndef var_aggregate(x, ddof):\n squares, totals, counts = list(zip(*x))\n x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)\n result = (x2 / n) - (x / n) ** 2\n return result * n / (n - ddof)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_io_no_result.type_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_io_no_result.type_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 67, "span_ids": ["imports"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport itertools\nimport math\nimport operator\nimport uuid\nimport warnings\nfrom collections import defaultdict\nfrom collections.abc import Iterable, Iterator\nfrom functools import partial, reduce, wraps\nfrom random import Random\nfrom urllib.request import urlopen\n\nimport tlz as toolz\nfrom fsspec.core import open_files\nfrom tlz import (\n accumulate,\n compose,\n count,\n curry,\n first,\n frequencies,\n groupby,\n join,\n merge,\n merge_with,\n partition_all,\n peek,\n pluck,\n reduceby,\n remove,\n second,\n take,\n topk,\n unique,\n valmap,\n)\n\nfrom .. import config\nfrom ..base import DaskMethodsMixin, dont_optimize, replace_name_in_key, tokenize\nfrom ..context import globalmethod\nfrom ..core import flatten, get_dependencies, istask, quote, reverse_dict\nfrom ..delayed import Delayed, unpack_collections\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..multiprocessing import get as mpget\nfrom ..optimization import cull, fuse, inline\nfrom ..sizeof import sizeof\nfrom ..utils import (\n apply,\n digit,\n ensure_bytes,\n ensure_dict,\n ensure_unicode,\n funcname,\n insert,\n iter_chunks,\n key_split,\n parse_bytes,\n system_encoding,\n takes_multiple_arguments,\n)\nfrom . import chunk\nfrom .avro import to_avro\n\nno_default = \"__no__default__\"\nno_result = type(\n \"no_result\", (object,), {\"__slots__\": (), \"__reduce__\": lambda self: \"no_result\"}\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_inline_singleton_lists.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_inline_singleton_lists.return.dsk", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 98, "end_line": 132, "span_ids": ["inline_singleton_lists", "lazify"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lazify(dsk):\n \"\"\"\n Remove unnecessary calls to ``list`` in tasks.\n\n See Also\n --------\n dask.bag.core.lazify_task\n \"\"\"\n return valmap(lazify_task, dsk)\n\n\ndef inline_singleton_lists(dsk, keys, dependencies=None):\n \"\"\"Inline lists that are only used once.\n\n >>> d = {'b': (list, 'a'),\n ... 'c': (sum, 'b', 1)}\n >>> inline_singleton_lists(d, 'c')\n {'c': (, (, 'a'), 1)}\n\n Pairs nicely with lazify afterwards.\n \"\"\"\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, task=v) for k, v in dsk.items()}\n dependents = reverse_dict(dependencies)\n\n inline_keys = {\n k\n for k, v in dsk.items()\n if istask(v) and v and v[0] is list and len(dependents[k]) == 1\n }\n inline_keys.difference_update(flatten(keys))\n dsk = inline(dsk, inline_keys, inline_constants=False)\n for k in inline_keys:\n del dsk[k]\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_optimize__to_textfiles_chunk.with_lazy_file_as_f_.if_last_endline_.f_write_endline_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_optimize__to_textfiles_chunk.with_lazy_file_as_f_.if_last_endline_.f_write_endline_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 133, "end_line": 162, "span_ids": ["_to_textfiles_chunk", "optimize"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize(dsk, keys, fuse_keys=None, rename_fused_keys=None, **kwargs):\n \"\"\"Optimize a dask from a dask Bag.\"\"\"\n dsk = ensure_dict(dsk)\n dsk2, dependencies = cull(dsk, keys)\n kwargs = {}\n if rename_fused_keys is not None:\n kwargs[\"rename_keys\"] = rename_fused_keys\n dsk3, dependencies = fuse(dsk2, keys + (fuse_keys or []), dependencies, **kwargs)\n dsk4 = inline_singleton_lists(dsk3, keys, dependencies)\n dsk5 = lazify(dsk4)\n return dsk5\n\n\ndef _to_textfiles_chunk(data, lazy_file, last_endline):\n with lazy_file as f:\n if isinstance(f, io.TextIOWrapper):\n endline = \"\\n\"\n ensure = ensure_unicode\n else:\n endline = b\"\\n\"\n ensure = ensure_bytes\n started = False\n for d in data:\n if started:\n f.write(endline)\n else:\n started = True\n f.write(ensure(d))\n if last_endline:\n f.write(endline)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_to_textfiles_to_textfiles.if_compute_.else_.return.out_to_delayed_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_to_textfiles_to_textfiles.if_compute_.else_.return.out_to_delayed_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 167, "end_line": 257, "span_ids": ["to_textfiles"], "tokens": 726}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_textfiles(\n b,\n path,\n name_function=None,\n compression=\"infer\",\n encoding=system_encoding,\n compute=True,\n storage_options=None,\n last_endline=False,\n **kwargs,\n):\n \"\"\"Write dask Bag to disk, one filename per partition, one line per element.\n\n **Paths**: This will create one file for each partition in your bag. You\n can specify the filenames in a variety of ways.\n\n Use a globstring\n\n >>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP\n\n The * will be replaced by the increasing sequence 1, 2, ...\n\n ::\n\n /path/to/data/0.json.gz\n /path/to/data/1.json.gz\n\n Use a globstring and a ``name_function=`` keyword argument. The\n name_function function should expect an integer and produce a string.\n Strings produced by name_function must preserve the order of their\n respective partition indices.\n\n >>> from datetime import date, timedelta\n >>> def name(i):\n ... return str(date(2015, 1, 1) + i * timedelta(days=1))\n\n >>> name(0)\n '2015-01-01'\n >>> name(15)\n '2015-01-16'\n\n >>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP\n\n ::\n\n /path/to/data/2015-01-01.json.gz\n /path/to/data/2015-01-02.json.gz\n ...\n\n You can also provide an explicit list of paths.\n\n >>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP\n >>> b.to_textfiles(paths) # doctest: +SKIP\n\n **Compression**: Filenames with extensions corresponding to known\n compression algorithms (gz, bz2) will be compressed accordingly.\n\n **Bag Contents**: The bag calling ``to_textfiles`` must be a bag of\n text strings. For example, a bag of dictionaries could be written to\n JSON text files by mapping ``json.dumps`` on to the bag first, and\n then calling ``to_textfiles`` :\n\n >>> b_dict.map(json.dumps).to_textfiles(\"/path/to/data/*.json\") # doctest: +SKIP\n\n **Last endline**: By default the last line does not end with a newline\n character. Pass ``last_endline=True`` to invert the default.\n \"\"\"\n mode = \"wb\" if encoding is None else \"wt\"\n files = open_files(\n path,\n compression=compression,\n mode=mode,\n encoding=encoding,\n name_function=name_function,\n num=b.npartitions,\n **(storage_options or {}),\n )\n\n name = \"to-textfiles-\" + uuid.uuid4().hex\n dsk = {\n (name, i): (_to_textfiles_chunk, (b.name, i), f, last_endline)\n for i, f in enumerate(files)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])\n out = type(b)(graph, name, b.npartitions)\n\n if compute:\n out.compute(**kwargs)\n return [f.path for f in files]\n else:\n return out.to_delayed()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_finalize_StringAccessor.__getattr__.try_.except_AttributeError_.if_key_in_dir_str_.else_.raise": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_finalize_StringAccessor.__getattr__.try_.except_AttributeError_.if_key_in_dir_str_.else_.raise", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 266, "end_line": 317, "span_ids": ["StringAccessor.__dir__", "finalize_item", "StringAccessor._strmap", "StringAccessor.__init__", "StringAccessor", "finalize", "StringAccessor.__getattr__"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def finalize(results):\n if not results:\n return results\n if isinstance(results, Iterator):\n results = list(results)\n if isinstance(results[0], Iterable) and not isinstance(results[0], str):\n results = toolz.concat(results)\n if isinstance(results, Iterator):\n results = list(results)\n return results\n\n\ndef finalize_item(results):\n return results[0]\n\n\nclass StringAccessor:\n \"\"\"String processing functions\n\n Examples\n --------\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])\n >>> list(b.str.lower())\n ['alice smith', 'bob jones', 'charlie smith']\n\n >>> list(b.str.match('*Smith'))\n ['Alice Smith', 'Charlie Smith']\n\n >>> list(b.str.split(' '))\n [['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]\n \"\"\"\n\n def __init__(self, bag):\n self._bag = bag\n\n def __dir__(self):\n return sorted(set(dir(type(self)) + dir(str)))\n\n def _strmap(self, key, *args, **kwargs):\n return self._bag.map(operator.methodcaller(key, *args, **kwargs))\n\n def __getattr__(self, key):\n try:\n return object.__getattribute__(self, key)\n except AttributeError:\n if key in dir(str):\n func = getattr(str, key)\n return robust_wraps(func)(partial(self._strmap, key))\n else:\n raise", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_StringAccessor.match_robust_wraps.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_StringAccessor.match_robust_wraps.return._", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 311, "end_line": 338, "span_ids": ["StringAccessor.match", "robust_wraps"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StringAccessor:\n\n def match(self, pattern):\n \"\"\"Filter strings by those that match a pattern.\n\n Examples\n --------\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])\n >>> list(b.str.match('*Smith'))\n ['Alice Smith', 'Charlie Smith']\n\n See Also\n --------\n fnmatch.fnmatch\n \"\"\"\n from fnmatch import fnmatch\n\n return self._bag.filter(partial(fnmatch, pat=pattern))\n\n\ndef robust_wraps(wrapper):\n \"\"\"A weak version of wraps that only copies doc.\"\"\"\n\n def _(wrapped):\n wrapped.__doc__ = wrapper.__doc__\n return wrapped\n\n return _", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item_Item.__int__.__float__.__complex__.__bool__.DaskMethodsMixin_compute": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item_Item.__int__.__float__.__complex__.__bool__.DaskMethodsMixin_compute", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 343, "end_line": 412, "span_ids": ["Item.__init__", "Item.from_delayed", "Item.__getstate__", "Item.__dask_keys__", "Item.__dask_graph__", "Item.apply", "Item.__dask_postcompute__", "Item.__dask_postpersist__", "Item:6", "Item._rebuild", "Item._args", "Item", "Item.__dask_layers__", "Item.__dask_tokenize__", "Item:2", "Item.__setstate__"], "tokens": 567}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Item(DaskMethodsMixin):\n def __init__(self, dsk, key, layer=None):\n self.dask = dsk\n self.key = key\n self.name = key\n\n # NOTE: Layer only used by `Item.from_delayed`, to handle Delayed objects created by other collections.\n # e.g.: Item.from_delayed(da.ones(1).to_delayed()[0])\n # See Delayed.__init__\n self._layer = layer or key\n if isinstance(dsk, HighLevelGraph) and self._layer not in dsk.layers:\n raise ValueError(\n f\"Layer {self._layer} not in the HighLevelGraph's layers: {list(dsk.layers)}\"\n )\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [self.key]\n\n def __dask_layers__(self):\n return (self._layer,)\n\n def __dask_tokenize__(self):\n return self.key\n\n __dask_optimize__ = globalmethod(optimize, key=\"bag_optimize\", falsey=dont_optimize)\n __dask_scheduler__ = staticmethod(mpget)\n\n def __dask_postcompute__(self):\n return finalize_item, ()\n\n def __dask_postpersist__(self):\n return self._rebuild, ()\n\n def _rebuild(self, dsk, *, rename=None):\n key = replace_name_in_key(self.key, rename) if rename else self.key\n return Item(dsk, key)\n\n @staticmethod\n def from_delayed(value):\n \"\"\"Create bag item from a dask.delayed value.\n\n See ``dask.bag.from_delayed`` for details\n \"\"\"\n from dask.delayed import Delayed, delayed\n\n if not isinstance(value, Delayed) and hasattr(value, \"key\"):\n value = delayed(value)\n assert isinstance(value, Delayed)\n return Item(value.dask, value.key, layer=value.__dask_layers__()[0])\n\n @property\n def _args(self):\n return (self.dask, self.key)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self.key = state\n\n def apply(self, func):\n name = \"{}-{}\".format(funcname(func), tokenize(self, func, \"apply\"))\n dsk = {name: (func, self.key)}\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return Item(graph, name)\n\n __int__ = __float__ = __complex__ = __bool__ = DaskMethodsMixin.compute", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag_Bag.str.property_fget_StringAcces": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag_Bag.str.property_fget_StringAcces", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 431, "end_line": 500, "span_ids": ["Bag.__dask_tokenize__", "Bag.__dask_layers__", "Bag.__dask_graph__", "Bag.__str__", "Bag:7", "Bag", "Bag._rebuild", "Bag.__dask_keys__", "Bag.__dask_postcompute__", "Bag.__dask_postpersist__", "Bag:3", "Bag.__init__"], "tokens": 587}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n \"\"\"Parallel collection of Python objects\n\n Examples\n --------\n Create Bag from sequence\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10))\n [0, 20, 40]\n\n Create Bag from filename or globstring of filenames\n\n >>> b = db.read_text('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP\n\n Create manually (expert use)\n\n >>> dsk = {('x', 0): (range, 5),\n ... ('x', 1): (range, 5),\n ... ('x', 2): (range, 5)}\n >>> b = db.Bag(dsk, 'x', npartitions=3)\n\n >>> sorted(b.map(lambda x: x * 10))\n [0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]\n\n >>> int(b.fold(lambda x, y: x + y))\n 30\n \"\"\"\n\n def __init__(self, dsk, name, npartitions):\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n self.dask = dsk\n self.name = name\n self.npartitions = npartitions\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [(self.name, i) for i in range(self.npartitions)]\n\n def __dask_layers__(self):\n return (self.name,)\n\n def __dask_tokenize__(self):\n return self.name\n\n __dask_optimize__ = globalmethod(optimize, key=\"bag_optimize\", falsey=dont_optimize)\n __dask_scheduler__ = staticmethod(mpget)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return self._rebuild, ()\n\n def _rebuild(self, dsk, *, rename=None):\n name = self.name\n if rename:\n name = rename.get(name, name)\n return type(self)(dsk, name, self.npartitions)\n\n def __str__(self):\n return \"dask.bag<%s, npartitions=%d>\" % (key_split(self.name), self.npartitions)\n\n __repr__ = __str__\n\n str = property(fget=StringAccessor)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_Bag.map.return.bag_map_func_self_args": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_Bag.map.return.bag_map_func_self_args", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 480, "end_line": 539, "span_ids": ["Bag.map"], "tokens": 510}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def map(self, func, *args, **kwargs):\n \"\"\"Apply a function elementwise across one or more bags.\n\n Note that all ``Bag`` arguments must be partitioned identically.\n\n Parameters\n ----------\n func : callable\n *args, **kwargs : Bag, Item, or object\n Extra arguments and keyword arguments to pass to ``func`` *after*\n the calling bag instance. Non-Bag args/kwargs are broadcasted\n across all calls to ``func``.\n\n Notes\n -----\n For calls with multiple `Bag` arguments, corresponding partitions\n should have the same length; if they do not, the call will error at\n compute time.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5), npartitions=2)\n >>> b2 = db.from_sequence(range(5, 10), npartitions=2)\n\n Apply a function to all elements in a bag:\n\n >>> b.map(lambda x: x + 1).compute()\n [1, 2, 3, 4, 5]\n\n Apply a function with arguments from multiple bags:\n\n >>> from operator import add\n >>> b.map(add, b2).compute()\n [5, 7, 9, 11, 13]\n\n Non-bag arguments are broadcast across all calls to the mapped\n function:\n\n >>> b.map(add, 1).compute()\n [1, 2, 3, 4, 5]\n\n Keyword arguments are also supported, and have the same semantics as\n regular arguments:\n\n >>> def myadd(x, y=0):\n ... return x + y\n >>> b.map(myadd, y=b2).compute()\n [5, 7, 9, 11, 13]\n >>> b.map(myadd, y=1).compute()\n [1, 2, 3, 4, 5]\n\n Both arguments and keyword arguments can also be instances of\n ``dask.bag.Item``. Here we'll add the max value in the bag to each\n element:\n\n >>> b.map(myadd, b.max()).compute()\n [4, 5, 6, 7, 8]\n \"\"\"\n return bag_map(func, self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.starmap_Bag.starmap.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.starmap_Bag.starmap.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 563, "end_line": 619, "span_ids": ["Bag.starmap"], "tokens": 540}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def starmap(self, func, **kwargs):\n \"\"\"Apply a function using argument tuples from the given bag.\n\n This is similar to ``itertools.starmap``, except it also accepts\n keyword arguments. In pseudocode, this is could be written as:\n\n >>> def starmap(func, bag, **kwargs):\n ... return (func(*args, **kwargs) for args in bag)\n\n Parameters\n ----------\n func : callable\n **kwargs : Item, Delayed, or object, optional\n Extra keyword arguments to pass to ``func``. These can either be\n normal objects, ``dask.bag.Item``, or ``dask.delayed.Delayed``.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> data = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]\n >>> b = db.from_sequence(data, npartitions=2)\n\n Apply a function to each argument tuple:\n\n >>> from operator import add\n >>> b.starmap(add).compute()\n [3, 7, 11, 15, 19]\n\n Apply a function to each argument tuple, with additional keyword\n arguments:\n\n >>> def myadd(x, y, z=0):\n ... return x + y + z\n >>> b.starmap(myadd, z=10).compute()\n [13, 17, 21, 25, 29]\n\n Keyword arguments can also be instances of ``dask.bag.Item`` or\n ``dask.delayed.Delayed``:\n\n >>> max_second = b.pluck(1).max()\n >>> max_second.compute()\n 10\n >>> b.starmap(myadd, z=max_second).compute()\n [13, 17, 21, 25, 29]\n \"\"\"\n name = \"{}-{}\".format(funcname(func), tokenize(self, func, \"starmap\", **kwargs))\n dependencies = [self]\n if kwargs:\n kwargs, collections = unpack_scalar_dask_kwargs(kwargs)\n dependencies.extend(collections)\n\n dsk = {\n (name, i): (reify, (starmap_chunk, func, (self.name, i), kwargs))\n for i in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag._args_Bag.filter.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag._args_Bag.filter.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 621, "end_line": 648, "span_ids": ["Bag.__setstate__", "Bag.filter", "Bag.__getstate__", "Bag._args"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n @property\n def _args(self):\n return (self.dask, self.name, self.npartitions)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self.name, self.npartitions = state\n\n def filter(self, predicate):\n \"\"\"Filter elements in collection by a predicate function.\n\n >>> def iseven(x):\n ... return x % 2 == 0\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> list(b.filter(iseven))\n [0, 2, 4]\n \"\"\"\n name = f\"filter-{funcname(predicate)}-{tokenize(self, predicate)}\"\n dsk = {\n (name, i): (reify, (filter, predicate, (self.name, i)))\n for i in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.random_sample_Bag.random_sample.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.random_sample_Bag.random_sample.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 630, "end_line": 663, "span_ids": ["Bag.random_sample"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def random_sample(self, prob, random_state=None):\n \"\"\"Return elements from bag with probability of ``prob``.\n\n Parameters\n ----------\n prob : float\n A float between 0 and 1, representing the probability that each\n element will be returned.\n random_state : int or random.Random, optional\n If an integer, will be used to seed a new ``random.Random`` object.\n If provided, results in deterministic sampling.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> list(b.random_sample(0.5, 43))\n [0, 3, 4]\n >>> list(b.random_sample(0.5, 43))\n [0, 3, 4]\n \"\"\"\n if not 0 <= prob <= 1:\n raise ValueError(\"prob must be a number in the interval [0, 1]\")\n if not isinstance(random_state, Random):\n random_state = Random(random_state)\n\n name = \"random-sample-%s\" % tokenize(self, prob, random_state.getstate())\n state_data = random_state_data_python(self.npartitions, random_state)\n dsk = {\n (name, i): (reify, (random_sample, (self.name, i), state, prob))\n for i, state in zip(range(self.npartitions), state_data)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.remove_Bag.remove.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.remove_Bag.remove.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 685, "end_line": 702, "span_ids": ["Bag.remove"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def remove(self, predicate):\n \"\"\"Remove elements in collection that match predicate.\n\n >>> def iseven(x):\n ... return x % 2 == 0\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> list(b.remove(iseven))\n [1, 3]\n \"\"\"\n name = f\"remove-{funcname(predicate)}-{tokenize(self, predicate)}\"\n dsk = {\n (name, i): (reify, (remove, predicate, (self.name, i)))\n for i in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_partitions_Bag.map_partitions.return.map_partitions_func_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.map_partitions_Bag.map_partitions.return.map_partitions_func_self", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 684, "end_line": 725, "span_ids": ["Bag.map_partitions"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def map_partitions(self, func, *args, **kwargs):\n \"\"\"Apply a function to every partition across one or more bags.\n\n Note that all ``Bag`` arguments must be partitioned identically.\n\n Parameters\n ----------\n func : callable\n The function to be called on every partition.\n This function should expect an ``Iterator`` or ``Iterable`` for\n every partition and should return an ``Iterator`` or ``Iterable``\n in return.\n *args, **kwargs : Bag, Item, Delayed, or object\n Arguments and keyword arguments to pass to ``func``.\n Partitions from this bag will be the first argument, and these will\n be passed *after*.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(1, 101), npartitions=10)\n >>> def div(nums, den=1):\n ... return [num / den for num in nums]\n\n Using a python object:\n\n >>> hi = b.max().compute()\n >>> hi\n 100\n >>> b.map_partitions(div, den=hi).take(5)\n (0.01, 0.02, 0.03, 0.04, 0.05)\n\n Using an ``Item``:\n\n >>> b.map_partitions(div, den=b.max()).take(5)\n (0.01, 0.02, 0.03, 0.04, 0.05)\n\n Note that while both versions give the same output, the second forms a\n single graph, and then computes everything at once, and in some cases\n may be more efficient.\n \"\"\"\n return map_partitions(func, self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.pluck_Bag.pluck.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.pluck_Bag.pluck.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 747, "end_line": 771, "span_ids": ["Bag.pluck"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def pluck(self, key, default=no_default):\n \"\"\"Select item from all tuples/dicts in collection.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},\n ... {'name': 'Bob', 'credits': [10, 20]}])\n >>> list(b.pluck('name'))\n ['Alice', 'Bob']\n >>> list(b.pluck('credits').pluck(0))\n [1, 10]\n \"\"\"\n name = \"pluck-\" + tokenize(self, key, default)\n key = quote(key)\n if default == no_default:\n dsk = {\n (name, i): (list, (pluck, key, (self.name, i)))\n for i in range(self.npartitions)\n }\n else:\n dsk = {\n (name, i): (list, (pluck, key, (self.name, i), default))\n for i in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.unzip_Bag.unzip.return.tuple_self_pluck_i_for_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.unzip_Bag.unzip.return.tuple_self_pluck_i_for_i", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 753, "end_line": 770, "span_ids": ["Bag.unzip"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def unzip(self, n):\n \"\"\"Transform a bag of tuples to ``n`` bags of their elements.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence([(i, i + 1, i + 2) for i in range(10)])\n >>> first, second, third = b.unzip(3)\n >>> isinstance(first, db.Bag)\n True\n >>> first.compute()\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n Note that this is equivalent to:\n\n >>> first, second, third = (b.pluck(i) for i in range(3))\n \"\"\"\n return tuple(self.pluck(i) for i in range(n))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_textfiles_Bag.to_avro.return.to_avro_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_textfiles_Bag.to_avro.return.to_avro_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 792, "end_line": 840, "span_ids": ["Bag.to_textfiles", "Bag.to_avro"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n @wraps(to_textfiles)\n def to_textfiles(\n self,\n path,\n name_function=None,\n compression=\"infer\",\n encoding=system_encoding,\n compute=True,\n storage_options=None,\n last_endline=False,\n **kwargs,\n ):\n return to_textfiles(\n self,\n path,\n name_function,\n compression,\n encoding,\n compute,\n storage_options=storage_options,\n last_endline=last_endline,\n **kwargs,\n )\n\n @wraps(to_avro)\n def to_avro(\n self,\n filename,\n schema,\n name_function=None,\n storage_options=None,\n codec=\"null\",\n sync_interval=16000,\n metadata=None,\n compute=True,\n **kwargs,\n ):\n return to_avro(\n self,\n filename,\n schema,\n name_function,\n storage_options,\n codec,\n sync_interval,\n metadata,\n compute,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.fold_Bag.fold.if_initial_is_not_no_defa.else_.return.self_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.fold_Bag.fold.if_initial_is_not_no_defa.else_.return.self_reduction_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 822, "end_line": 887, "span_ids": ["Bag.fold"], "tokens": 490}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def fold(\n self, binop, combine=None, initial=no_default, split_every=None, out_type=Item\n ):\n \"\"\"Parallelizable reduction\n\n Fold is like the builtin function ``reduce`` except that it works in\n parallel. Fold takes two binary operator functions, one to reduce each\n partition of our dataset and another to combine results between\n partitions\n\n 1. ``binop``: Binary operator to reduce within each partition\n 2. ``combine``: Binary operator to combine results from binop\n\n Sequentially this would look like the following:\n\n >>> intermediates = [reduce(binop, part) for part in partitions] # doctest: +SKIP\n >>> final = reduce(combine, intermediates) # doctest: +SKIP\n\n If only one function is given then it is used for both functions\n ``binop`` and ``combine`` as in the following example to compute the\n sum:\n\n >>> def add(x, y):\n ... return x + y\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5))\n >>> b.fold(add).compute()\n 10\n\n In full form we provide both binary operators as well as their default\n arguments\n\n >>> b.fold(binop=add, combine=add, initial=0).compute()\n 10\n\n More complex binary operators are also doable\n\n >>> def add_to_set(acc, x):\n ... ''' Add new element x to set acc '''\n ... return acc | set([x])\n >>> b.fold(add_to_set, set.union, initial=set()).compute()\n {0, 1, 2, 3, 4}\n\n See Also\n --------\n\n Bag.foldby\n \"\"\"\n combine = combine or binop\n if initial is not no_default:\n return self.reduction(\n curry(_reduce, binop, initial=initial),\n curry(_reduce, combine),\n split_every=split_every,\n out_type=out_type,\n )\n else:\n from tlz.curried import reduce\n\n return self.reduction(\n reduce(binop),\n reduce(combine),\n split_every=split_every,\n out_type=out_type,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.frequencies_Bag.frequencies.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.frequencies_Bag.frequencies.return.result", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 889, "end_line": 906, "span_ids": ["Bag.frequencies"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def frequencies(self, split_every=None, sort=False):\n \"\"\"Count number of occurrences of each distinct element.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice', 'Bob', 'Alice'])\n >>> dict(b.frequencies()) # doctest: +SKIP\n {'Alice': 2, 'Bob', 1}\n \"\"\"\n result = self.reduction(\n frequencies,\n merge_frequencies,\n out_type=Bag,\n split_every=split_every,\n name=\"frequencies\",\n ).map_partitions(dictitems)\n if sort:\n result = result.map_partitions(sorted, key=second, reverse=True)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.topk_Bag.topk.return.self_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.topk_Bag.topk.return.self_reduction_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 908, "end_line": 933, "span_ids": ["Bag.topk"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def topk(self, k, key=None, split_every=None):\n \"\"\"K largest elements in collection\n\n Optionally ordered by some key function\n\n >>> import dask.bag as db\n >>> b = db.from_sequence([10, 3, 5, 7, 11, 4])\n >>> list(b.topk(2))\n [11, 10]\n\n >>> list(b.topk(2, lambda x: -x))\n [3, 4]\n \"\"\"\n if key:\n if callable(key) and takes_multiple_arguments(key):\n key = partial(apply, key)\n func = partial(topk, k, key=key)\n else:\n func = partial(topk, k)\n return self.reduction(\n func,\n compose(func, toolz.concat),\n out_type=Bag,\n split_every=split_every,\n name=\"topk\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.distinct_Bag.distinct.return.self_reduction_func_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.distinct_Bag.distinct.return.self_reduction_func_agg_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 935, "end_line": 960, "span_ids": ["Bag.distinct"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def distinct(self, key=None):\n \"\"\"Distinct elements of collection\n\n Unordered without repeats.\n\n Parameters\n ----------\n key: {callable,str}\n Defines uniqueness of items in bag by calling ``key`` on each item.\n If a string is passed ``key`` is considered to be ``lambda x: x[key]``.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice', 'Bob', 'Alice'])\n >>> sorted(b.distinct())\n ['Alice', 'Bob']\n >>> b = db.from_sequence([{'name': 'Alice'}, {'name': 'Bob'}, {'name': 'Alice'}])\n >>> b.distinct(key=lambda x: x['name']).compute()\n [{'name': 'Alice'}, {'name': 'Bob'}]\n >>> b.distinct(key='name').compute()\n [{'name': 'Alice'}, {'name': 'Bob'}]\n \"\"\"\n func = chunk_distinct if key is None else partial(chunk_distinct, key=key)\n agg = merge_distinct if key is None else partial(merge_distinct, key=key)\n return self.reduction(func, agg, out_type=Bag, name=\"distinct\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.reduction_Bag.reduction.if_out_type_is_Item_.else_.return.Bag_graph_fmt_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.reduction_Bag.reduction.if_out_type_is_Item_.else_.return.Bag_graph_fmt_1_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 982, "end_line": 1050, "span_ids": ["Bag.reduction"], "tokens": 513}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def reduction(\n self, perpartition, aggregate, split_every=None, out_type=Item, name=None\n ):\n \"\"\"Reduce collection with reduction operators.\n\n Parameters\n ----------\n perpartition: function\n reduction to apply to each partition\n aggregate: function\n reduction to apply to the results of all partitions\n split_every: int (optional)\n Group partitions into groups of this size while performing reduction\n Defaults to 8\n out_type: {Bag, Item}\n The out type of the result, Item if a single element, Bag if a list\n of elements. Defaults to Item.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(10))\n >>> b.reduction(sum, sum).compute()\n 45\n \"\"\"\n if split_every is None:\n split_every = 8\n if split_every is False:\n split_every = self.npartitions\n\n token = tokenize(self, perpartition, aggregate, split_every)\n a = f\"{name or funcname(perpartition)}-part-{token}\"\n is_last = self.npartitions == 1\n dsk = {\n (a, i): (empty_safe_apply, perpartition, (self.name, i), is_last)\n for i in range(self.npartitions)\n }\n k = self.npartitions\n b = a\n fmt = f\"{name or funcname(aggregate)}-aggregate-{token}\"\n depth = 0\n\n while k > split_every:\n c = fmt + str(depth)\n for i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(c, i)] = (\n empty_safe_aggregate,\n aggregate,\n [(b, j) for j in inds],\n False,\n )\n\n k = i + 1\n b = c\n depth += 1\n\n dsk[(fmt, 0)] = (\n empty_safe_aggregate,\n aggregate,\n [(b, j) for j in range(k)],\n True,\n )\n\n graph = HighLevelGraph.from_collections(fmt, dsk, dependencies=[self])\n if out_type is Item:\n dsk[fmt] = dsk.pop((fmt, 0))\n return Item(graph, fmt)\n else:\n return Bag(graph, fmt, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.sum_Bag.std.return.self_var_ddof_ddof_apply": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.sum_Bag.std.return.self_var_ddof_ddof_apply", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1052, "end_line": 1124, "span_ids": ["Bag.max", "Bag.mean", "Bag.count", "Bag.min", "Bag.all", "Bag.std", "Bag.any", "Bag.var", "Bag.sum"], "tokens": 499}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def sum(self, split_every=None):\n \"\"\"Sum all elements\"\"\"\n return self.reduction(sum, sum, split_every=split_every)\n\n def max(self, split_every=None):\n \"\"\"Maximum element\"\"\"\n return self.reduction(max, max, split_every=split_every)\n\n def min(self, split_every=None):\n \"\"\"Minimum element\"\"\"\n return self.reduction(min, min, split_every=split_every)\n\n def any(self, split_every=None):\n \"\"\"Are any of the elements truthy?\n\n Examples\n --------\n >>> import dask.bag as db\n >>> bool_bag = db.from_sequence([True, True, False])\n >>> bool_bag.any().compute()\n True\n \"\"\"\n return self.reduction(any, any, split_every=split_every)\n\n def all(self, split_every=None):\n \"\"\"Are all elements truthy?\n\n Examples\n --------\n >>> import dask.bag as db\n >>> bool_bag = db.from_sequence([True, True, False])\n >>> bool_bag.all().compute()\n False\n \"\"\"\n return self.reduction(all, all, split_every=split_every)\n\n def count(self, split_every=None):\n \"\"\"Count the number of elements.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> numbers = db.from_sequence([1, 2, 3])\n >>> numbers.count().compute()\n 3\n \"\"\"\n return self.reduction(count, sum, split_every=split_every)\n\n def mean(self):\n \"\"\"Arithmetic mean\"\"\"\n\n def mean_chunk(seq):\n total, n = 0.0, 0\n for x in seq:\n total += x\n n += 1\n return total, n\n\n def mean_aggregate(x):\n totals, counts = list(zip(*x))\n return 1.0 * sum(totals) / sum(counts)\n\n return self.reduction(mean_chunk, mean_aggregate, split_every=False)\n\n def var(self, ddof=0):\n \"\"\"Variance\"\"\"\n return self.reduction(\n chunk.var_chunk, partial(chunk.var_aggregate, ddof=ddof), split_every=False\n )\n\n def std(self, ddof=0):\n \"\"\"Standard deviation\"\"\"\n return self.var(ddof=ddof).apply(math.sqrt)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.join_Bag.join.return.type_self_graph_name_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.join_Bag.join.return.type_self_graph_name_s", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1082, "end_line": 1149, "span_ids": ["Bag.join"], "tokens": 583}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def join(self, other, on_self, on_other=None):\n \"\"\"Joins collection with another collection.\n\n Other collection must be one of the following:\n\n 1. An iterable. We recommend tuples over lists for internal\n performance reasons.\n 2. A delayed object, pointing to a tuple. This is recommended if the\n other collection is sizable and you're using the distributed\n scheduler. Dask is able to pass around data wrapped in delayed\n objects with greater sophistication.\n 3. A Bag with a single partition\n\n You might also consider Dask Dataframe, whose join operations are much\n more heavily optimized.\n\n Parameters\n ----------\n other: Iterable, Delayed, Bag\n Other collection on which to join\n on_self: callable\n Function to call on elements in this collection to determine a\n match\n on_other: callable (defaults to on_self)\n Function to call on elements in the other collection to determine a\n match\n\n Examples\n --------\n >>> import dask.bag as db\n >>> people = db.from_sequence(['Alice', 'Bob', 'Charlie'])\n >>> fruit = ['Apple', 'Apricot', 'Banana']\n >>> list(people.join(fruit, lambda x: x[0]))\n [('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]\n \"\"\"\n name = \"join-\" + tokenize(self, other, on_self, on_other)\n dsk = {}\n if isinstance(other, Bag):\n if other.npartitions == 1:\n dsk.update(other.dask)\n other = other.__dask_keys__()[0]\n dsk[\"join-%s-other\" % name] = (list, other)\n else:\n msg = (\n \"Multi-bag joins are not implemented. \"\n \"We recommend Dask dataframe if appropriate\"\n )\n raise NotImplementedError(msg)\n elif isinstance(other, Delayed):\n dsk.update(other.dask)\n other = other._key\n elif isinstance(other, Iterable):\n other = other\n else:\n msg = (\n \"Joined argument must be single-partition Bag, \"\n \" delayed object, or Iterable, got %s\" % type(other).__name\n )\n raise TypeError(msg)\n\n if on_other is None:\n on_other = on_self\n\n for i in range(self.npartitions):\n dsk[(name, i)] = (list, (join, on_other, other, on_self, (self.name, i)))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.product_Bag.product.return.type_self_graph_name_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.product_Bag.product.return.type_self_graph_name_n", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1195, "end_line": 1209, "span_ids": ["Bag.product"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def product(self, other):\n \"\"\"Cartesian product between two bags.\"\"\"\n assert isinstance(other, Bag)\n name = \"product-\" + tokenize(self, other)\n n, m = self.npartitions, other.npartitions\n dsk = {\n (name, i * m + j): (\n list,\n (itertools.product, (self.name, i), (other.name, j)),\n )\n for i in range(n)\n for j in range(m)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, other])\n return type(self)(graph, name, n * m)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby_Bag.foldby._Combined_reduction_and": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby_Bag.foldby._Combined_reduction_and", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1167, "end_line": 1282, "span_ids": ["Bag.foldby"], "tokens": 917}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def foldby(\n self,\n key,\n binop,\n initial=no_default,\n combine=None,\n combine_initial=no_default,\n split_every=None,\n ):\n \"\"\"Combined reduction and groupby.\n\n Foldby provides a combined groupby and reduce for efficient parallel\n split-apply-combine tasks.\n\n The computation\n\n >>> b.foldby(key, binop, init) # doctest: +SKIP\n\n is equivalent to the following:\n\n >>> def reduction(group): # doctest: +SKIP\n ... return reduce(binop, group, init) # doctest: +SKIP\n\n >>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP\n\n But uses minimal communication and so is *much* faster.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(10))\n >>> iseven = lambda x: x % 2 == 0\n >>> add = lambda x, y: x + y\n >>> dict(b.foldby(iseven, add))\n {True: 20, False: 25}\n\n **Key Function**\n\n The key function determines how to group the elements in your bag.\n In the common case where your bag holds dictionaries then the key\n function often gets out one of those elements.\n\n >>> def key(x):\n ... return x['name']\n\n This case is so common that it is special cased, and if you provide a\n key that is not a callable function then dask.bag will turn it into one\n automatically. The following are equivalent:\n\n >>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP\n >>> b.foldby('name', ...) # doctest: +SKIP\n\n **Binops**\n\n It can be tricky to construct the right binary operators to perform\n analytic queries. The ``foldby`` method accepts two binary operators,\n ``binop`` and ``combine``. Binary operators two inputs and output must\n have the same type.\n\n Binop takes a running total and a new element and produces a new total:\n\n >>> def binop(total, x):\n ... return total + x['amount']\n\n Combine takes two totals and combines them:\n\n >>> def combine(total1, total2):\n ... return total1 + total2\n\n Each of these binary operators may have a default first value for\n total, before any other value is seen. For addition binary operators\n like above this is often ``0`` or the identity element for your\n operation.\n\n **split_every**\n\n Group partitions into groups of this size while performing reduction.\n Defaults to 8.\n\n >>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP\n\n Examples\n --------\n\n We can compute the maximum of some ``(key, value)`` pairs, grouped\n by the ``key``. (You might be better off converting the ``Bag`` to\n a ``dask.dataframe`` and using its groupby).\n\n >>> import random\n >>> import dask.bag as db\n\n >>> tokens = list('abcdefg')\n >>> values = range(10000)\n >>> a = [(random.choice(tokens), random.choice(values))\n ... for _ in range(100)]\n >>> a[:2] # doctest: +SKIP\n [('g', 676), ('a', 871)]\n\n >>> a = db.from_sequence(a)\n\n >>> def binop(t, x):\n ... return max((t, x), key=lambda x: x[1])\n\n >>> a.foldby(lambda x: x[0], binop).compute() # doctest: +SKIP\n [('g', ('g', 984)),\n ('a', ('a', 871)),\n ('b', ('b', 999)),\n ('c', ('c', 765)),\n ('f', ('f', 955)),\n ('e', ('e', 991)),\n ('d', ('d', 854))]\n\n See Also\n --------\n\n toolz.reduceby\n pyspark.combineByKey\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby.if_split_every_is_None__Bag.foldby.return.type_self_graph_e_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.foldby.if_split_every_is_None__Bag.foldby.return.type_self_graph_e_1_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1274, "end_line": 1340, "span_ids": ["Bag.foldby"], "tokens": 553}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def foldby(\n self,\n key,\n binop,\n initial=no_default,\n combine=None,\n combine_initial=no_default,\n split_every=None,\n ):\n if split_every is None:\n split_every = 8\n if split_every is False:\n split_every = self.npartitions\n\n token = tokenize(self, key, binop, initial, combine, combine_initial)\n a = \"foldby-a-\" + token\n if combine is None:\n combine = binop\n if initial is not no_default:\n dsk = {\n (a, i): (reduceby, key, binop, (self.name, i), initial)\n for i in range(self.npartitions)\n }\n else:\n dsk = {\n (a, i): (reduceby, key, binop, (self.name, i))\n for i in range(self.npartitions)\n }\n\n combine2 = partial(chunk.foldby_combine2, combine)\n depth = 0\n k = self.npartitions\n b = a\n while k > split_every:\n c = b + str(depth)\n if combine_initial is not no_default:\n for i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(c, i)] = (\n reduceby,\n 0,\n combine2,\n (toolz.concat, (map, dictitems, [(b, j) for j in inds])),\n combine_initial,\n )\n else:\n for i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(c, i)] = (\n merge_with,\n (partial, reduce, combine),\n [(b, j) for j in inds],\n )\n\n k = i + 1\n b = c\n depth += 1\n\n e = \"foldby-b-\" + token\n if combine_initial is not no_default:\n dsk[(e, 0)] = (\n dictitems,\n (\n reduceby,\n 0,\n combine2,\n (toolz.concat, (map, dictitems, [(b, j) for j in range(k)])),\n combine_initial,\n ),\n )\n else:\n dsk[(e, 0)] = (\n dictitems,\n (merge_with, (partial, reduce, combine), [(b, j) for j in range(k)]),\n )\n\n graph = HighLevelGraph.from_collections(e, dsk, dependencies=[self])\n return type(self)(graph, e, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.take_Bag.take.if_compute_.else_.return.b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.take_Bag.take.if_compute_.else_.return.b", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1351, "end_line": 1404, "span_ids": ["Bag.take"], "tokens": 465}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def take(self, k, npartitions=1, compute=True, warn=True):\n \"\"\"Take the first k elements.\n\n Parameters\n ----------\n k : int\n The number of elements to return\n npartitions : int, optional\n Elements are only taken from the first ``npartitions``, with a\n default of 1. If there are fewer than ``k`` rows in the first\n ``npartitions`` a warning will be raised and any found rows\n returned. Pass -1 to use all partitions.\n compute : bool, optional\n Whether to compute the result, default is True.\n warn : bool, optional\n Whether to warn if the number of elements returned is less than\n requested, default is True.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(1_000))\n >>> b.take(3)\n (0, 1, 2)\n \"\"\"\n\n if npartitions <= -1:\n npartitions = self.npartitions\n if npartitions > self.npartitions:\n raise ValueError(\n \"only {} partitions, take \"\n \"received {}\".format(self.npartitions, npartitions)\n )\n\n token = tokenize(self, k, npartitions)\n name = \"take-\" + token\n\n if npartitions > 1:\n name_p = \"take-partial-\" + token\n\n dsk = {}\n for i in range(npartitions):\n dsk[(name_p, i)] = (list, (take, k, (self.name, i)))\n\n concat = (toolz.concat, ([(name_p, i) for i in range(npartitions)]))\n dsk[(name, 0)] = (safe_take, k, concat, warn)\n else:\n dsk = {(name, 0): (safe_take, k, (self.name, 0), warn)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n b = Bag(graph, name, 1)\n\n if compute:\n return tuple(b.compute())\n else:\n return b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.flatten_Bag.__iter__.return.iter_self_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.flatten_Bag.__iter__.return.iter_self_compute_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1450, "end_line": 1470, "span_ids": ["Bag.flatten", "Bag.__iter__"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def flatten(self):\n \"\"\"Concatenate nested lists into one long list.\n\n >>> import dask.bag as db\n >>> b = db.from_sequence([[1], [2, 3]])\n >>> list(b)\n [[1], [2, 3]]\n\n >>> list(b.flatten())\n [1, 2, 3]\n \"\"\"\n name = \"flatten-\" + tokenize(self)\n dsk = {\n (name, i): (list, (toolz.concat, (self.name, i)))\n for i in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return type(self)(graph, name, self.npartitions)\n\n def __iter__(self):\n return iter(self.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.groupby_Bag.groupby.if_shuffle_disk_.else_.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.groupby_Bag.groupby.if_shuffle_disk_.else_.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1472, "end_line": 1532, "span_ids": ["Bag.groupby"], "tokens": 505}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def groupby(\n self,\n grouper,\n method=None,\n npartitions=None,\n blocksize=2**20,\n max_branch=None,\n shuffle=None,\n ):\n \"\"\"Group collection by key function\n\n This requires a full dataset read, serialization and shuffle.\n This is expensive. If possible you should use ``foldby``.\n\n Parameters\n ----------\n grouper: function\n Function on which to group elements\n shuffle: str\n Either 'disk' for an on-disk shuffle or 'tasks' to use the task\n scheduling framework. Use 'disk' if you are on a single machine\n and 'tasks' if you are on a distributed cluster.\n npartitions: int\n If using the disk-based shuffle, the number of output partitions\n blocksize: int\n If using the disk-based shuffle, the size of shuffle blocks (bytes)\n max_branch: int\n If using the task-based shuffle, the amount of splitting each\n partition undergoes. Increase this for fewer copies but more\n scheduler overhead.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(10))\n >>> iseven = lambda x: x % 2 == 0\n >>> dict(b.groupby(iseven)) # doctest: +SKIP\n {True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}\n\n See Also\n --------\n Bag.foldby\n \"\"\"\n if method is not None:\n raise Exception(\"The method= keyword has been moved to shuffle=\")\n if shuffle is None:\n shuffle = config.get(\"shuffle\", None)\n if shuffle is None:\n if config.get(\"scheduler\", None) in (\"dask.distributed\", \"distributed\"):\n shuffle = \"tasks\"\n else:\n shuffle = \"disk\"\n if shuffle == \"disk\":\n return groupby_disk(\n self, grouper, npartitions=npartitions, blocksize=blocksize\n )\n elif shuffle == \"tasks\":\n return groupby_tasks(self, grouper, max_branch=max_branch)\n else:\n msg = \"Shuffle must be 'disk' or 'tasks'\"\n raise NotImplementedError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.repartition_Bag.repartition.if_npartitions_is_not_Non.elif_partition_size_is_no.return.repartition_size_self_pa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.repartition_Bag.repartition.if_npartitions_is_not_Non.elif_partition_size_is_no.return.repartition_size_self_pa", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1572, "end_line": 1604, "span_ids": ["Bag.repartition"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def repartition(self, npartitions=None, partition_size=None):\n \"\"\"Repartition Bag across new divisions.\n\n Parameters\n ----------\n npartitions : int, optional\n Number of partitions of output.\n partition_size : int or string, optional\n Max number of bytes of memory for each partition. Use numbers or\n strings like 5MB.\n\n .. warning::\n\n This keyword argument triggers computation to determine\n the memory size of each partition, which may be expensive.\n\n Notes\n -----\n Exactly one of ``npartitions`` or ``partition_size`` should be specified.\n A ``ValueError`` will be raised when that is not the case.\n\n Examples\n --------\n >>> b.repartition(5) # set to have 5 partitions # doctest: +SKIP\n \"\"\"\n if sum([partition_size is not None, npartitions is not None]) != 1:\n raise ValueError(\n \"Please provide exactly one ``npartitions`` or ``partition_size`` keyword arguments\"\n )\n if npartitions is not None:\n return repartition_npartitions(self, npartitions)\n elif partition_size is not None:\n return repartition_size(self, partition_size)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.accumulate_Bag.accumulate.return.Bag_graph_b_self_nparti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.accumulate_Bag.accumulate.return.Bag_graph_b_self_nparti", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1671, "end_line": 1706, "span_ids": ["Bag.accumulate"], "tokens": 420}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def accumulate(self, binop, initial=no_default):\n \"\"\"Repeatedly apply binary function to a sequence, accumulating results.\n\n This assumes that the bag is ordered. While this is typically the case\n not all Dask.bag functions preserve this property.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> from operator import add\n >>> b = db.from_sequence([1, 2, 3, 4, 5], npartitions=2)\n >>> b.accumulate(add).compute()\n [1, 3, 6, 10, 15]\n\n Accumulate also takes an optional argument that will be used as the\n first value.\n\n >>> b.accumulate(add, initial=-1).compute()\n [-1, 0, 2, 5, 9, 14]\n \"\"\"\n token = tokenize(self, binop, initial)\n binop_name = funcname(binop)\n a = f\"{binop_name}-part-{token}\"\n b = f\"{binop_name}-first-{token}\"\n c = f\"{binop_name}-second-{token}\"\n dsk = {\n (a, 0): (accumulate_part, binop, (self.name, 0), initial, True),\n (b, 0): (first, (a, 0)),\n (c, 0): (second, (a, 0)),\n }\n for i in range(1, self.npartitions):\n dsk[(a, i)] = (accumulate_part, binop, (self.name, i), (c, i - 1))\n dsk[(b, i)] = (first, (a, i))\n dsk[(c, i)] = (second, (a, i))\n graph = HighLevelGraph.from_collections(b, dsk, dependencies=[self])\n return Bag(graph, b, self.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_accumulate_part_collect.return.list_d_items_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_accumulate_part_collect.return.list_d_items_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1709, "end_line": 1733, "span_ids": ["collect", "accumulate_part", "partition"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def accumulate_part(binop, seq, initial, is_first=False):\n if initial == no_default:\n res = list(accumulate(binop, seq))\n else:\n res = list(accumulate(binop, seq, initial=initial))\n if is_first:\n return res, res[-1] if res else [], initial\n return res[1:], res[-1]\n\n\ndef partition(grouper, sequence, npartitions, p, nelements=2**20):\n \"\"\"Partition a bag along a grouper, store partitions on disk.\"\"\"\n for block in partition_all(nelements, sequence):\n d = groupby(grouper, block)\n d2 = defaultdict(list)\n for k, v in d.items():\n d2[abs(hash(k)) % npartitions].extend(v)\n p.append(d2, fsync=True)\n return p\n\n\ndef collect(grouper, group, p, barrier_token):\n \"\"\"Collect partitions from disk and yield k,v group pairs.\"\"\"\n d = groupby(grouper, p.get(group, lock=False))\n return list(d.items())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_sequence_from_sequence.return.Bag_d_name_len_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_sequence_from_sequence.return.Bag_d_name_len_d_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1736, "end_line": 1780, "span_ids": ["from_sequence"], "tokens": 360}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_sequence(seq, partition_size=None, npartitions=None):\n \"\"\"Create a dask Bag from Python sequence.\n\n This sequence should be relatively small in memory. Dask Bag works\n best when it handles loading your data itself. Commonly we load a\n sequence of filenames into a Bag and then use ``.map`` to open them.\n\n Parameters\n ----------\n seq: Iterable\n A sequence of elements to put into the dask\n partition_size: int (optional)\n The length of each partition\n npartitions: int (optional)\n The number of desired partitions\n\n It is best to provide either ``partition_size`` or ``npartitions``\n (though not both.)\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)\n\n See Also\n --------\n read_text: Create bag from text files\n \"\"\"\n seq = list(seq)\n if npartitions and not partition_size:\n partition_size = int(math.ceil(len(seq) / npartitions))\n if npartitions is None and partition_size is None:\n if len(seq) < 100:\n partition_size = 1\n else:\n partition_size = int(len(seq) / 100)\n\n parts = list(partition_all(partition_size, seq))\n name = \"from_sequence-\" + tokenize(seq, partition_size)\n if len(parts) > 0:\n d = {(name, i): list(part) for i, part in enumerate(parts)}\n else:\n d = {(name, 0): []}\n\n return Bag(d, name, len(d))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_url_from_url.return.Bag_dsk_name_len_urls_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_url_from_url.return.Bag_dsk_name_len_urls_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1783, "end_line": 1812, "span_ids": ["from_url"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_url(urls):\n \"\"\"Create a dask Bag from a url.\n\n Examples\n --------\n >>> a = from_url('http://raw.githubusercontent.com/dask/dask/main/README.rst')\n >>> a.npartitions\n 1\n\n >>> a.take(8) # doctest: +SKIP\n (b'Dask\\\\n',\n b'====\\\\n',\n b'\\\\n',\n b'|Build Status| |Coverage| |Doc Status| |Discourse| |Version Status| |NumFOCUS|\\\\n',\n b'\\\\n',\n b'Dask is a flexible parallel computing library for analytics. See\\\\n',\n b'documentation_ for more information.\\\\n',\n b'\\\\n')\n\n >>> b = from_url(['http://github.com', 'http://google.com'])\n >>> b.npartitions\n 2\n \"\"\"\n if isinstance(urls, str):\n urls = [urls]\n name = \"from_url-\" + uuid.uuid4().hex\n dsk = {}\n for i, u in enumerate(urls):\n dsk[(name, i)] = (list, (urlopen, u))\n return Bag(dsk, name, len(urls))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_dictitems_reify.return.seq": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_dictitems_reify.return.seq", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1748, "end_line": 1780, "span_ids": ["dictitems", "concat", "reify"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dictitems(d):\n \"\"\"A pickleable version of dict.items\n\n >>> dictitems({'x': 1})\n [('x', 1)]\n \"\"\"\n return list(d.items())\n\n\ndef concat(bags):\n \"\"\"Concatenate many bags together, unioning all elements.\n\n >>> import dask.bag as db\n >>> a = db.from_sequence([1, 2, 3])\n >>> b = db.from_sequence([4, 5, 6])\n >>> c = db.concat([a, b])\n\n >>> list(c)\n [1, 2, 3, 4, 5, 6]\n \"\"\"\n name = \"concat-\" + tokenize(*bags)\n counter = itertools.count(0)\n dsk = {(name, next(counter)): key for bag in bags for key in bag.__dask_keys__()}\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags)\n return Bag(graph, name, len(dsk))\n\n\ndef reify(seq):\n if isinstance(seq, Iterator):\n seq = list(seq)\n if len(seq) and isinstance(seq[0], Iterator):\n seq = list(map(list, seq))\n return seq", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_delayed_from_delayed.return.Bag_graph_name_len_valu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_from_delayed_from_delayed.return.Bag_graph_name_len_valu", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1783, "end_line": 1823, "span_ids": ["from_delayed"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_delayed(values):\n \"\"\"Create bag from many dask Delayed objects.\n\n These objects will become the partitions of the resulting Bag. They should\n evaluate to a ``list`` or some other concrete sequence.\n\n Parameters\n ----------\n values: list of delayed values\n An iterable of dask Delayed objects. Each evaluating to a list.\n\n Returns\n -------\n Bag\n\n Examples\n --------\n >>> x, y, z = [delayed(load_sequence_from_file)(fn)\n ... for fn in filenames] # doctest: +SKIP\n >>> b = from_delayed([x, y, z]) # doctest: +SKIP\n\n See also\n --------\n dask.delayed\n \"\"\"\n from dask.delayed import Delayed, delayed\n\n if isinstance(values, Delayed):\n values = [values]\n values = [\n delayed(v) if not isinstance(v, Delayed) and hasattr(v, \"key\") else v\n for v in values\n ]\n\n name = \"bag-from-delayed-\" + tokenize(*values)\n names = [(name, i) for i in range(len(values))]\n values2 = [(reify, v.key) for v in values]\n dsk = dict(zip(names, values2))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=values)\n return Bag(graph, name, len(values))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_chunk_distinct_merge_frequencies.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_chunk_distinct_merge_frequencies.return.out", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1826, "end_line": 1849, "span_ids": ["chunk_distinct", "merge_frequencies", "merge_distinct"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def chunk_distinct(seq, key=None):\n if key is not None and not callable(key):\n key = partial(chunk.getitem, key=key)\n return list(unique(seq, key=key))\n\n\ndef merge_distinct(seqs, key=None):\n return chunk_distinct(toolz.concat(seqs), key=key)\n\n\ndef merge_frequencies(seqs):\n if isinstance(seqs, Iterable):\n seqs = list(seqs)\n if not seqs:\n return {}\n first, rest = seqs[0], seqs[1:]\n if not rest:\n return first\n out = defaultdict(int)\n out.update(first)\n for d in rest:\n for k, v in d.items():\n out[k] += v\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_range_bag_range.return.Bag_dsk_name_npartition": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_range_bag_range.return.Bag_dsk_name_npartition", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1919, "end_line": 1939, "span_ids": ["bag_range"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bag_range(n, npartitions):\n \"\"\"Numbers from zero to n\n\n Examples\n --------\n\n >>> import dask.bag as db\n >>> b = db.range(5, npartitions=2)\n >>> list(b)\n [0, 1, 2, 3, 4]\n \"\"\"\n size = n // npartitions\n name = \"range-%d-npartitions-%d\" % (n, npartitions)\n ijs = list(enumerate(take(npartitions, range(0, n, size))))\n dsk = {(name, i): (reify, (range, j, min(j + size, n))) for i, j in ijs}\n\n if n % npartitions != 0:\n i, j = ijs[-1]\n dsk[(name, i)] = (reify, (range, j, n))\n\n return Bag(dsk, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_zip_bag_zip.return.Bag_graph_name_npartiti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_zip_bag_zip.return.Bag_graph_name_npartiti", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1942, "end_line": 1990, "span_ids": ["bag_zip"], "tokens": 557}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bag_zip(*bags):\n \"\"\"Partition-wise bag zip\n\n All passed bags must have the same number of partitions.\n\n NOTE: corresponding partitions should have the same length; if they do not,\n the \"extra\" elements from the longer partition(s) will be dropped. If you\n have this case chances are that what you really need is a data alignment\n mechanism like pandas's, and not a missing value filler like zip_longest.\n\n Examples\n --------\n\n Correct usage:\n\n >>> import dask.bag as db\n >>> evens = db.from_sequence(range(0, 10, 2), partition_size=4)\n >>> odds = db.from_sequence(range(1, 10, 2), partition_size=4)\n >>> pairs = db.zip(evens, odds)\n >>> list(pairs)\n [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]\n\n Incorrect usage:\n\n >>> numbers = db.range(31, npartitions=1)\n >>> fizz = numbers.filter(lambda n: n % 3 == 0)\n >>> buzz = numbers.filter(lambda n: n % 5 == 0)\n >>> fizzbuzz = db.zip(fizz, buzz)\n >>> list(fizzbuzz)\n [(0, 0), (3, 5), (6, 10), (9, 15), (12, 20), (15, 25), (18, 30)]\n\n When what you really wanted was more along the lines of the following:\n\n >>> list(fizzbuzz) # doctest: +SKIP\n (0, 0), (3, None), (None, 5), (6, None), (9, None), (None, 10),\n (12, None), (15, 15), (18, None), (None, 20),\n (21, None), (24, None), (None, 25), (27, None), (30, 30)\n \"\"\"\n npartitions = bags[0].npartitions\n assert all(bag.npartitions == npartitions for bag in bags)\n # TODO: do more checks\n\n name = \"zip-\" + tokenize(*bags)\n dsk = {\n (name, i): (reify, (zip,) + tuple((bag.name, i) for bag in bags))\n for i in range(npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags)\n return Bag(graph, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_chunk_map_chunk.return._MapChunk_f_iters_kwarg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_chunk_map_chunk.return._MapChunk_f_iters_kwarg", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1925, "end_line": 1943, "span_ids": ["map_chunk"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_chunk(f, iters, iter_kwarg_keys=None, kwargs=None):\n \"\"\"Map ``f`` across one or more iterables, maybe with keyword arguments.\n\n Low-level function used in ``bag_map``, not user facing.\n\n Arguments\n ---------\n f : callable\n iters : List[Iterable]\n iter_kwarg_keys : List[str] or None\n Keyword names to use for pair with the tail end of ``iters``, allowing\n keyword arguments to be passed in from iterators.\n kwargs : dict or None\n Additional constant keyword arguments to use on every call to ``f``.\n \"\"\"\n if kwargs:\n f = partial(f, **kwargs)\n iters = [iter(a) for a in iters]\n return _MapChunk(f, iters, kwarg_keys=iter_kwarg_keys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__MapChunk__MapChunk.check_all_iterators_consumed.if_len_self_iters_1_.for_i_in_self_iters_.try_.else_.raise_ValueError_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__MapChunk__MapChunk.check_all_iterators_consumed.if_len_self_iters_1_.for_i_in_self_iters_.try_.else_.raise_ValueError_msg_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1946, "end_line": 1981, "span_ids": ["_MapChunk.__next__", "_MapChunk.__init__", "_MapChunk.check_all_iterators_consumed", "_MapChunk"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _MapChunk(Iterator):\n def __init__(self, f, iters, kwarg_keys=None):\n self.f = f\n self.iters = iters\n self.kwarg_keys = kwarg_keys or ()\n self.nkws = len(self.kwarg_keys)\n\n def __next__(self):\n try:\n vals = [next(i) for i in self.iters]\n except StopIteration:\n self.check_all_iterators_consumed()\n raise\n\n if self.nkws:\n args = vals[: -self.nkws]\n kwargs = dict(zip(self.kwarg_keys, vals[-self.nkws :]))\n return self.f(*args, **kwargs)\n return self.f(*vals)\n\n def check_all_iterators_consumed(self):\n if len(self.iters) > 1:\n for i in self.iters:\n if isinstance(i, itertools.repeat):\n continue\n try:\n next(i)\n except StopIteration:\n pass\n else:\n msg = (\n \"map called with multiple bags that aren't identically \"\n \"partitioned. Please ensure that all bag arguments \"\n \"have the same partition lengths\"\n )\n raise ValueError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_starmap_chunk_unpack_scalar_dask_kwargs.return.kwargs2_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_starmap_chunk_unpack_scalar_dask_kwargs.return.kwargs2_dependencies", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1984, "end_line": 2008, "span_ids": ["starmap_chunk", "unpack_scalar_dask_kwargs"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def starmap_chunk(f, x, kwargs):\n if kwargs:\n f = partial(f, **kwargs)\n return itertools.starmap(f, x)\n\n\ndef unpack_scalar_dask_kwargs(kwargs):\n \"\"\"Extracts dask values from kwargs.\n\n Currently only ``dask.bag.Item`` and ``dask.delayed.Delayed`` are\n supported. Returns a merged dask graph and a task resulting in a keyword\n dict.\n \"\"\"\n kwargs2 = {}\n dependencies = []\n for k, v in kwargs.items():\n vv, collections = unpack_collections(v)\n if not collections:\n kwargs2[k] = v\n else:\n kwargs2[k] = vv\n dependencies.extend(collections)\n if dependencies:\n kwargs2 = (dict, (zip, list(kwargs2), list(kwargs2.values())))\n return kwargs2, dependencies", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map.build_iters_bag_map.return.return_type_graph_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map.build_iters_bag_map.return.return_type_graph_name_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2103, "end_line": 2128, "span_ids": ["bag_map"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bag_map(func, *args, **kwargs):\n # ... other code\n\n def build_iters(n):\n args = [(a.name, n) if isinstance(a, Bag) else a for a in args2]\n if bag_kwargs:\n args.extend((b.name, n) for b in bag_kwargs.values())\n return args\n\n if bag_kwargs:\n iter_kwarg_keys = list(bag_kwargs)\n else:\n iter_kwarg_keys = None\n\n dsk = {\n (name, n): (\n reify,\n (map_chunk, func, build_iters(n), iter_kwarg_keys, other_kwargs),\n )\n for n in range(npartitions)\n }\n\n # If all bags are the same type, use that type, otherwise fallback to Bag\n return_type = set(map(type, bags))\n return_type = return_type.pop() if len(return_type) == 1 else Bag\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags + dependencies)\n\n return return_type(graph, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_partitions_map_partitions.return.return_type_graph_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_map_partitions_map_partitions.return.return_type_graph_name_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2198, "end_line": 2300, "span_ids": ["map_partitions"], "tokens": 760}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_partitions(func, *args, **kwargs):\n \"\"\"Apply a function to every partition across one or more bags.\n\n Note that all ``Bag`` arguments must be partitioned identically.\n\n Parameters\n ----------\n func : callable\n *args, **kwargs : Bag, Item, Delayed, or object\n Arguments and keyword arguments to pass to ``func``.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(1, 101), npartitions=10)\n >>> def div(nums, den=1):\n ... return [num / den for num in nums]\n\n Using a python object:\n\n >>> hi = b.max().compute()\n >>> hi\n 100\n >>> b.map_partitions(div, den=hi).take(5)\n (0.01, 0.02, 0.03, 0.04, 0.05)\n\n Using an ``Item``:\n\n >>> b.map_partitions(div, den=b.max()).take(5)\n (0.01, 0.02, 0.03, 0.04, 0.05)\n\n Note that while both versions give the same output, the second forms a\n single graph, and then computes everything at once, and in some cases\n may be more efficient.\n \"\"\"\n name = \"{}-{}\".format(\n funcname(func), tokenize(func, \"map-partitions\", *args, **kwargs)\n )\n dependencies = []\n\n bags = []\n args2 = []\n for a in args:\n if isinstance(a, Bag):\n bags.append(a)\n args2.append(a)\n elif isinstance(a, (Item, Delayed)):\n args2.append(a.key)\n dependencies.append(a)\n else:\n args2.append(a)\n\n bag_kwargs = {}\n other_kwargs = {}\n for k, v in kwargs.items():\n if isinstance(v, Bag):\n bag_kwargs[k] = v\n bags.append(v)\n else:\n other_kwargs[k] = v\n\n other_kwargs, collections = unpack_scalar_dask_kwargs(other_kwargs)\n dependencies.extend(collections)\n\n if not bags:\n raise ValueError(\"At least one argument must be a Bag.\")\n\n npartitions = {b.npartitions for b in bags}\n if len(npartitions) > 1:\n raise ValueError(\"All bags must have the same number of partitions.\")\n npartitions = npartitions.pop()\n\n def build_args(n):\n return [(a.name, n) if isinstance(a, Bag) else a for a in args2]\n\n def build_bag_kwargs(n):\n if not bag_kwargs:\n return {}\n return (\n dict,\n (zip, list(bag_kwargs), [(b.name, n) for b in bag_kwargs.values()]),\n )\n\n if kwargs:\n dsk = {\n (name, n): (\n apply,\n func,\n build_args(n),\n (merge, build_bag_kwargs(n), other_kwargs),\n )\n for n in range(npartitions)\n }\n else:\n dsk = {(name, n): (func,) + tuple(build_args(n)) for n in range(npartitions)}\n\n # If all bags are the same type, use that type, otherwise fallback to Bag\n return_type = set(map(type, bags))\n return_type = return_type.pop() if len(return_type) == 1 else Bag\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags + dependencies)\n\n return return_type(graph, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__reduce_groupby_tasks.return.type_b_graph_name_len_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__reduce_groupby_tasks.return.type_b_graph_name_len_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2303, "end_line": 2404, "span_ids": ["_reduce", "make_group", "groupby_tasks"], "tokens": 654}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _reduce(binop, sequence, initial=no_default):\n if initial is not no_default:\n return reduce(binop, sequence, initial)\n else:\n return reduce(binop, sequence)\n\n\ndef make_group(k, stage):\n def h(x):\n return x[0] // k**stage % k\n\n return h\n\n\ndef groupby_tasks(b, grouper, hash=hash, max_branch=32):\n max_branch = max_branch or 32\n n = b.npartitions\n\n stages = int(math.ceil(math.log(n) / math.log(max_branch))) or 1\n if stages > 1:\n k = int(math.ceil(n ** (1 / stages)))\n else:\n k = n\n\n groups = []\n splits = []\n joins = []\n\n inputs = [tuple(digit(i, j, k) for j in range(stages)) for i in range(k**stages)]\n\n b2 = b.map(partial(chunk.groupby_tasks_group_hash, hash=hash, grouper=grouper))\n\n token = tokenize(b, grouper, hash, max_branch)\n\n shuffle_join_name = \"shuffle-join-\" + token\n shuffle_group_name = \"shuffle-group-\" + token\n shuffle_split_name = \"shuffle-split-\" + token\n\n start = {}\n\n for idx, inp in enumerate(inputs):\n group = {}\n split = {}\n if idx < b.npartitions:\n start[(shuffle_join_name, 0, inp)] = (b2.name, idx)\n else:\n start[(shuffle_join_name, 0, inp)] = []\n\n for stage in range(1, stages + 1):\n _key_tuple = (shuffle_group_name, stage, inp)\n group[_key_tuple] = (\n groupby,\n (make_group, k, stage - 1),\n (shuffle_join_name, stage - 1, inp),\n )\n\n for i in range(k):\n split[(shuffle_split_name, stage, i, inp)] = (\n dict.get,\n _key_tuple,\n i,\n {},\n )\n\n groups.append(group)\n splits.append(split)\n\n for stage in range(1, stages + 1):\n join = {\n (shuffle_join_name, stage, inp): (\n list,\n (\n toolz.concat,\n [\n (\n shuffle_split_name,\n stage,\n inp[stage - 1],\n insert(inp, stage - 1, j),\n )\n for j in range(k)\n ],\n ),\n )\n for inp in inputs\n }\n\n joins.append(join)\n\n name = \"shuffle-\" + token\n\n end = {\n (name, i): (list, (dict.items, (groupby, grouper, (pluck, 1, j))))\n for i, j in enumerate(join)\n }\n\n groups.extend(splits)\n groups.extend(joins)\n\n dsk = merge(start, end, *(groups))\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b2])\n return type(b)(graph, name, len(inputs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_groupby_disk_groupby_disk.return.type_b_graph_name_npar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_groupby_disk_groupby_disk.return.type_b_graph_name_npar", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2407, "end_line": 2445, "span_ids": ["groupby_disk"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def groupby_disk(b, grouper, npartitions=None, blocksize=2**20):\n if npartitions is None:\n npartitions = b.npartitions\n token = tokenize(b, grouper, npartitions, blocksize)\n\n import partd\n\n p = (\"partd-\" + token,)\n dirname = config.get(\"temporary_directory\", None)\n if dirname:\n file = (apply, partd.File, (), {\"dir\": dirname})\n else:\n file = (partd.File,)\n try:\n dsk1 = {p: (partd.Python, (partd.Snappy, file))}\n except AttributeError:\n dsk1 = {p: (partd.Python, file)}\n\n # Partition data on disk\n name = f\"groupby-part-{funcname(grouper)}-{token}\"\n dsk2 = {\n (name, i): (partition, grouper, (b.name, i), npartitions, p, blocksize)\n for i in range(b.npartitions)\n }\n\n # Barrier\n barrier_token = \"groupby-barrier-\" + token\n\n dsk3 = {barrier_token: (chunk.barrier,) + tuple(dsk2)}\n\n # Collect groups\n name = \"groupby-collect-\" + token\n dsk4 = {\n (name, i): (collect, grouper, i, p, barrier_token) for i in range(npartitions)\n }\n\n dsk = merge(dsk1, dsk2, dsk3, dsk4)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])\n return type(b)(graph, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_empty_safe_apply_safe_take.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_empty_safe_apply_safe_take.return.r", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2448, "end_line": 2474, "span_ids": ["safe_take", "empty_safe_aggregate", "empty_safe_apply"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def empty_safe_apply(func, part, is_last):\n if isinstance(part, Iterator):\n try:\n _, part = peek(part)\n except StopIteration:\n if not is_last:\n return no_result\n return func(part)\n elif not is_last and len(part) == 0:\n return no_result\n else:\n return func(part)\n\n\ndef empty_safe_aggregate(func, parts, is_last):\n parts2 = (p for p in parts if p is not no_result)\n return empty_safe_apply(func, parts2, is_last)\n\n\ndef safe_take(n, b, warn=True):\n r = list(take(n, b))\n if len(r) != n and warn:\n warnings.warn(\n f\"Insufficient elements for `take`. {n} elements requested, only {len(r)} \"\n \"elements available. Try passing larger `npartitions` to `take`.\"\n )\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_sample_random_sample.for_i_in_x_.if_random_state_random_.yield_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_sample_random_sample.for_i_in_x_.if_random_state_random_.yield_i", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2416, "end_line": 2432, "span_ids": ["random_sample"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def random_sample(x, state_data, prob):\n \"\"\"Filter elements of `x` by a probability `prob`.\n\n Parameters\n ----------\n x : iterable\n state_data : tuple\n A tuple that can be passed to ``random.Random.setstate``.\n prob : float\n A float between 0 and 1, representing the probability that each\n element will be yielded.\n \"\"\"\n random_state = Random()\n random_state.setstate(state_data)\n for i in x:\n if random_state.random() < prob:\n yield i", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_state_data_python_random_state_data_python.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_random_state_data_python_random_state_data_python.return._", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2435, "end_line": 2457, "span_ids": ["random_state_data_python"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def random_state_data_python(n, random_state=None):\n \"\"\"Return a list of tuples that can be passed to\n ``random.Random.setstate``.\n\n Parameters\n ----------\n n : int\n Number of tuples to return.\n random_state : int or ``random.Random``, optional\n If an int, is used to seed a new ``random.Random``.\n \"\"\"\n if not isinstance(random_state, Random):\n random_state = Random(random_state)\n\n maxuint32 = 1 << 32\n return [\n (\n 3,\n tuple(random_state.randint(0, maxuint32) for i in range(624)) + (624,),\n None,\n )\n for i in range(n)\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_split_to_dataframe.return.res_astype_dtypes_copy_F": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_split_to_dataframe.return.res_astype_dtypes_copy_F", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2460, "end_line": 2483, "span_ids": ["split", "to_dataframe"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def split(seq, n):\n \"\"\"Split apart a sequence into n equal pieces.\n\n >>> split(range(10), 3)\n [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]\n \"\"\"\n if not isinstance(seq, (list, tuple)):\n seq = list(seq)\n\n part = len(seq) / n\n L = [seq[int(part * i) : int(part * (i + 1))] for i in range(n - 1)]\n L.append(seq[int(part * (n - 1)) :])\n return L\n\n\ndef to_dataframe(seq, columns, dtypes):\n import pandas as pd\n\n seq = reify(seq)\n # pd.DataFrame expects lists, only copy if necessary\n if not isinstance(seq, list):\n seq = list(seq)\n res = pd.DataFrame(seq, columns=list(columns))\n return res.astype(dtypes, copy=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_npartitions_total_mem_usage.return.sizeof_partition_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_npartitions_total_mem_usage.return.sizeof_partition_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2486, "end_line": 2518, "span_ids": ["total_mem_usage", "repartition_npartitions"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_npartitions(bag, npartitions):\n \"\"\"Changes the number of partitions of the bag.\n\n This can be used to reduce or increase the number of partitions\n of the bag.\n \"\"\"\n if npartitions == bag.npartitions:\n return bag\n\n new_name = \"repartition-%d-%s\" % (npartitions, tokenize(bag, npartitions))\n if bag.npartitions > npartitions:\n ratio = bag.npartitions / npartitions\n new_partitions_boundaries = [\n int(old_partition_index * ratio)\n for old_partition_index in range(npartitions + 1)\n ]\n return _repartition_from_boundaries(bag, new_partitions_boundaries, new_name)\n else: # npartitions > bag.npartitions\n div, mod = divmod(npartitions, bag.npartitions)\n nsplits = [div] * bag.npartitions\n nsplits[-1] += mod\n return _split_partitions(bag, nsplits, new_name)\n\n\ndef total_mem_usage(partition):\n from copy import deepcopy\n from itertools import chain\n\n # if repartition is called multiple times prior to calling compute(), the partitions\n # will be itertools.chain objects. Copy the object to avoid consuming the iterable.\n if isinstance(partition, chain):\n partition = reify(deepcopy(partition))\n return sizeof(partition)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_size_repartition_size.return._repartition_from_boundar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_repartition_size_repartition_size.return._repartition_from_boundar", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2582, "end_line": 2607, "span_ids": ["repartition_size"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_size(bag, size):\n \"\"\"\n Repartition bag so that new partitions have approximately `size` memory usage each\n \"\"\"\n if isinstance(size, str):\n size = parse_bytes(size)\n size = int(size)\n mem_usages = bag.map_partitions(total_mem_usage).compute()\n\n # 1. split each partition that is larger than partition size\n nsplits = [1 + mem_usage // size for mem_usage in mem_usages]\n if any(nsplit > 1 for nsplit in nsplits):\n split_name = f\"repartition-split-{tokenize(bag, size)}\"\n bag = _split_partitions(bag, nsplits, split_name)\n # update mem_usages to account for the split partitions\n split_mem_usages = []\n for n, usage in zip(nsplits, mem_usages):\n split_mem_usages.extend([usage / n] * n)\n mem_usages = split_mem_usages\n\n # 2. now that all partitions are less than size, concat them up to size\n assert all(mem_usage <= size for mem_usage in mem_usages)\n new_npartitions = list(map(len, iter_chunks(mem_usages, size)))\n new_partitions_boundaries = accumulate(operator.add, new_npartitions)\n new_name = f\"repartition-{tokenize(bag, size)}\"\n return _repartition_from_boundaries(bag, new_partitions_boundaries, new_name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__split_partitions__split_partitions.return.Bag_graph_name_new_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__split_partitions__split_partitions.return.Bag_graph_name_new_name_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2610, "end_line": 2642, "span_ids": ["_split_partitions"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _split_partitions(bag, nsplits, new_name):\n \"\"\"Split a Dask bag into new partitions\n\n Parameters\n ----------\n bag: Dask bag\n nsplits: List[int]\n Number of target bags for each partition\n The length of nsplits should be the same as bag.npartitions\n new_name: str\n\n See Also\n --------\n repartition_npartitions\n repartition_size\n \"\"\"\n if len(nsplits) != bag.npartitions:\n raise ValueError(f\"nsplits should have len={bag.npartitions}\")\n dsk = {}\n split_name = f\"split-{tokenize(bag, nsplits)}\"\n j = 0\n for i, k in enumerate(nsplits):\n if k == 1:\n dsk[new_name, j] = (bag.name, i)\n j += 1\n else:\n dsk[split_name, i] = (split, (bag.name, i), k)\n for jj in range(k):\n dsk[new_name, j] = (operator.getitem, (split_name, i), jj)\n j += 1\n\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[bag])\n return Bag(graph, name=new_name, npartitions=sum(nsplits))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__repartition_from_boundaries_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py__repartition_from_boundaries_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2584, "end_line": 2610, "span_ids": ["_repartition_from_boundaries"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _repartition_from_boundaries(bag, new_partitions_boundaries, new_name):\n if not isinstance(new_partitions_boundaries, list):\n new_partitions_boundaries = list(new_partitions_boundaries)\n if new_partitions_boundaries[0] > 0:\n new_partitions_boundaries.insert(0, 0)\n if new_partitions_boundaries[-1] < bag.npartitions:\n new_partitions_boundaries.append(bag.npartitions)\n num_new_partitions = len(new_partitions_boundaries) - 1\n dsk = {}\n for new_partition_index in range(num_new_partitions):\n value = (\n list,\n (\n toolz.concat,\n [\n (bag.name, old_partition_index)\n for old_partition_index in range(\n new_partitions_boundaries[new_partition_index],\n new_partitions_boundaries[new_partition_index + 1],\n )\n ],\n ),\n )\n dsk[new_name, new_partition_index] = value\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[bag])\n return Bag(graph, name=new_name, npartitions=num_new_partitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_heapq_sample.return._sample_population_popula": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_heapq_sample.return._sample_population_popula", "embedding": null, "metadata": {"file_path": "dask/bag/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["imports", "sample"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import heapq\nimport math\nimport random as rnd\nfrom functools import partial\n\nfrom .core import Bag\n\n\ndef sample(population, k):\n \"\"\"Chooses k unique random elements from a bag.\n\n Returns a new bag containing elements from the population while\n leaving the original population unchanged.\n\n Parameters\n ----------\n population: Bag\n Elements to sample.\n k: integer, optional\n Number of elements to sample.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> from dask.bag import random\n >>> b = db.from_sequence(range(5), npartitions=2)\n >>> list(random.sample(b, 3).compute()) # doctest: +SKIP\n [1, 3, 5]\n \"\"\"\n return _sample(population=population, k=k, replace=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_choices__sample.return.population_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py_choices__sample.return.population_reduction_", "embedding": null, "metadata": {"file_path": "dask/bag/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 33, "end_line": 60, "span_ids": ["_sample", "choices"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def choices(population, k=1):\n \"\"\"\n Return a k sized list of elements chosen with replacement.\n\n Parameters\n ----------\n population: Bag\n Elements to sample.\n k: integer, optional\n Number of elements to sample.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> from dask.bag import random\n >>> b = db.from_sequence(range(5), npartitions=2)\n >>> list(random.choices(b, 3).compute()) # doctest: +SKIP\n [1, 1, 5]\n \"\"\"\n return _sample(population=population, k=k, replace=True)\n\n\ndef _sample(population, k, replace=False):\n return population.reduction(\n partial(_sample_map_partitions, k=k, replace=replace),\n partial(_sample_reduce, k=k, replace=replace),\n out_type=Bag,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_map_partitions__sample_map_partitions.return.sampled_lx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_map_partitions__sample_map_partitions.return.sampled_lx", "embedding": null, "metadata": {"file_path": "dask/bag/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 66, "end_line": 91, "span_ids": ["_sample_map_partitions"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sample_map_partitions(population, k, replace):\n \"\"\"\n Map function used on the sample and choices functions.\n Parameters\n ----------\n population : list\n List of elements to sample.\n k : int, optional\n Number of elements to sample. Default is 1.\n\n Returns\n -------\n sample: list\n List of sampled elements from the partition.\n lx: int\n Number of elements on the partition.\n k: int\n Number of elements to sample.\n \"\"\"\n population = list(population)\n lx = len(population)\n real_k = k if k <= lx else lx\n sample_func = rnd.choices if replace else rnd.sample\n # because otherwise it raises IndexError:\n sampled = [] if real_k == 0 else sample_func(population=population, k=real_k)\n return sampled, lx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_reduce_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/random.py__sample_reduce_", "embedding": null, "metadata": {"file_path": "dask/bag/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 90, "end_line": 133, "span_ids": ["_sample_reduce", "_weighted_sampling_without_replacement"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sample_reduce(reduce_iter, k, replace):\n \"\"\"\n Reduce function used on the sample and choice functions.\n\n Parameters\n ----------\n reduce_iter : iterable\n Each element is a tuple coming generated by the _sample_map_partitions function.\n\n Returns a sequence of uniformly distributed samples;\n \"\"\"\n ns_ks = []\n s = []\n n = 0\n # unfolding reduce outputs\n for i in reduce_iter:\n (s_i, n_i) = i\n s.extend(s_i)\n n += n_i\n k_i = len(s_i)\n ns_ks.append((n_i, k_i))\n\n if k < 0 or (k > n and not replace):\n raise ValueError(\"Sample larger than population or is negative\")\n\n # creating the probability array\n p = []\n for n_i, k_i in ns_ks:\n if k_i > 0:\n p_i = n_i / (k_i * n)\n p += [p_i] * k_i\n\n sample_func = rnd.choices if replace else _weighted_sampling_without_replacement\n return sample_func(population=s, weights=p, k=k)\n\n\ndef _weighted_sampling_without_replacement(population, weights, k):\n \"\"\"\n Source:\n Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis\n \"\"\"\n elt = [(math.log(rnd.random()) / weights[i], i) for i in range(len(weights))]\n return [population[x[1]] for x in heapq.nlargest(k, elt)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_os_test_onefile_oneblock.assert_b_compute_exp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_os_test_onefile_oneblock.assert_b_compute_exp", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 33, "span_ids": ["imports", "test_onefile_oneblock"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport random\n\nimport pytest\n\nimport dask.bag as db\n\nfastavro = pytest.importorskip(\"fastavro\")\n\nexpected = [\n {\n \"name\": random.choice([\"fred\", \"wilma\", \"barney\", \"betty\"]),\n \"number\": random.randint(0, 100),\n }\n for _ in range(1000)\n]\nschema = {\n \"doc\": \"Descr\",\n \"name\": \"Random\",\n \"namespace\": \"test\",\n \"type\": \"record\",\n \"fields\": [{\"name\": \"name\", \"type\": \"string\"}, {\"name\": \"number\", \"type\": \"int\"}],\n}\n\n\ndef test_onefile_oneblock(tmpdir):\n tmpdir = str(tmpdir)\n fn = os.path.join(tmpdir, \"one.avro\")\n with open(fn, \"wb\") as f:\n fastavro.writer(f, records=expected, schema=schema)\n b = db.read_avro(fn, blocksize=None)\n assert b.npartitions == 1\n assert b.compute() == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_oneblock_test_twofile_oneblock.assert_b_compute_exp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_oneblock_test_twofile_oneblock.assert_b_compute_exp", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 34, "end_line": 44, "span_ids": ["test_twofile_oneblock"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_twofile_oneblock(tmpdir):\n tmpdir = str(tmpdir)\n fn1 = os.path.join(tmpdir, \"one.avro\")\n fn2 = os.path.join(tmpdir, \"two.avro\")\n with open(fn1, \"wb\") as f:\n fastavro.writer(f, records=expected[:500], schema=schema)\n with open(fn2, \"wb\") as f:\n fastavro.writer(f, records=expected[500:], schema=schema)\n b = db.read_avro(os.path.join(tmpdir, \"*.avro\"), blocksize=None)\n assert b.npartitions == 2\n assert b.compute() == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_multiblock_test_twofile_multiblock.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_twofile_multiblock_test_twofile_multiblock.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 47, "end_line": 61, "span_ids": ["test_twofile_multiblock"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_twofile_multiblock(tmpdir):\n tmpdir = str(tmpdir)\n fn1 = os.path.join(tmpdir, \"one.avro\")\n fn2 = os.path.join(tmpdir, \"two.avro\")\n with open(fn1, \"wb\") as f:\n fastavro.writer(f, records=expected[:500], schema=schema, sync_interval=100)\n with open(fn2, \"wb\") as f:\n fastavro.writer(f, records=expected[500:], schema=schema, sync_interval=100)\n b = db.read_avro(os.path.join(tmpdir, \"*.avro\"), blocksize=None)\n assert b.npartitions == 2\n assert b.compute() == expected\n\n b = db.read_avro(os.path.join(tmpdir, \"*.avro\"), blocksize=1000)\n assert b.npartitions > 2\n assert b.compute() == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_simple_test_roundtrip_simple.assert_b_compute_b2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_simple_test_roundtrip_simple.assert_b_compute_b2_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 80, "span_ids": ["test_roundtrip_simple"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_roundtrip_simple(tmpdir):\n from dask.delayed import Delayed\n\n tmpdir = str(tmpdir)\n fn = os.path.join(tmpdir, \"out*.avro\")\n b = db.from_sequence([{\"a\": i} for i in [1, 2, 3, 4, 5]], npartitions=2)\n schema = {\n \"name\": \"Test\",\n \"type\": \"record\",\n \"fields\": [{\"name\": \"a\", \"type\": \"int\"}],\n }\n out = b.to_avro(fn, schema, compute=False)\n assert isinstance(out[0], Delayed)\n out = b.to_avro(fn, schema)\n assert len(out) == 2\n b2 = db.read_avro(fn)\n assert b.compute() == b2.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_test_roundtrip.assert_b_compute_b2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_roundtrip_test_roundtrip.assert_b_compute_b2_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 83, "end_line": 92, "span_ids": ["test_roundtrip"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"codec\", [\"null\", \"deflate\", \"snappy\"])\ndef test_roundtrip(tmpdir, codec):\n tmpdir = str(tmpdir)\n if codec == \"snappy\":\n pytest.importorskip(\"snappy\")\n fn = os.path.join(tmpdir, \"out*.avro\")\n b = db.from_sequence(expected, npartitions=3)\n b.to_avro(fn, schema=schema, codec=codec)\n b2 = db.read_avro(fn)\n assert b.compute() == b2.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_invalid_schema_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_avro.py_test_invalid_schema_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_avro.py", "file_name": "test_avro.py", "file_type": "text/x-python", "category": "test", "start_line": 95, "end_line": 117, "span_ids": ["test_invalid_schema"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_invalid_schema(tmpdir):\n tmpdir = str(tmpdir)\n b = db.from_sequence(expected, npartitions=3)\n fn = os.path.join(tmpdir, \"out*.avro\")\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema=[])\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"doc\": \"unknown\"})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"name\": \"test\"})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"name\": \"test\", \"type\": \"wrong\"})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"name\": \"test\", \"type\": \"record\"})\n with pytest.raises(AssertionError):\n b.to_avro(fn, schema={\"name\": \"test\", \"type\": \"record\"})\n with pytest.raises(AssertionError):\n b.to_avro(\n fn, schema={\"name\": \"test\", \"type\": \"record\", \"fields\": [{\"name\": \"a\"}]}\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_map_test_bag_map.None_3.db_map_myadd_b_b_unequa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_map_test_bag_map.None_3.db_map_myadd_b_b_unequa", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 113, "span_ids": ["test_bag_map"], "tokens": 618}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bag_map():\n b = db.from_sequence(range(100), npartitions=10)\n b2 = db.from_sequence(range(100, 200), npartitions=10)\n x = b.compute()\n x2 = b2.compute()\n\n def myadd(a=1, b=2, c=3):\n return a + b + c\n\n assert_eq(db.map(myadd, b), list(map(myadd, x)))\n assert_eq(db.map(myadd, a=b), list(map(myadd, x)))\n assert_eq(db.map(myadd, b, b2), list(map(myadd, x, x2)))\n assert_eq(db.map(myadd, b, 10), [myadd(i, 10) for i in x])\n assert_eq(db.map(myadd, 10, b=b), [myadd(10, b=i) for i in x])\n\n sol = [myadd(i, b=j, c=100) for (i, j) in zip(x, x2)]\n assert_eq(db.map(myadd, b, b=b2, c=100), sol)\n\n sol = [myadd(i, c=100) for (i, j) in zip(x, x2)]\n assert_eq(db.map(myadd, b, c=100), sol)\n\n x_sum = sum(x)\n sol = [myadd(x_sum, b=i, c=100) for i in x2]\n assert_eq(db.map(myadd, b.sum(), b=b2, c=100), sol)\n\n sol = [myadd(i, b=x_sum, c=100) for i in x2]\n assert_eq(db.map(myadd, b2, b.sum(), c=100), sol)\n\n sol = [myadd(a=100, b=x_sum, c=i) for i in x2]\n assert_eq(db.map(myadd, a=100, b=b.sum(), c=b2), sol)\n\n a = dask.delayed(10)\n assert_eq(db.map(myadd, b, a), [myadd(i, 10) for i in x])\n assert_eq(db.map(myadd, b, b=a), [myadd(i, b=10) for i in x])\n\n # Mispatched npartitions\n fewer_parts = db.from_sequence(range(100), npartitions=5)\n with pytest.raises(ValueError):\n db.map(myadd, b, fewer_parts)\n\n # No bags\n with pytest.raises(ValueError):\n db.map(myadd, b.sum(), 1, 2)\n\n # Unequal partitioning\n unequal = db.from_sequence(range(110), npartitions=10)\n with pytest.raises(ValueError):\n db.map(myadd, b, unequal, c=b2).compute()\n with pytest.raises(ValueError):\n db.map(myadd, b, b=unequal, c=b2).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_method_test_map_method.assert_b_map_myadd_b_sum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_method_test_map_method.assert_b_map_myadd_b_sum", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 133, "span_ids": ["test_map_method"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_method():\n b = db.from_sequence(range(100), npartitions=10)\n b2 = db.from_sequence(range(100, 200), npartitions=10)\n x = b.compute()\n x2 = b2.compute()\n\n def myadd(a, b=2, c=3):\n return a + b + c\n\n assert b.map(myadd).compute() == list(map(myadd, x))\n assert b.map(myadd, b2).compute() == list(map(myadd, x, x2))\n assert b.map(myadd, 10).compute() == [myadd(i, 10) for i in x]\n assert b.map(myadd, b=10).compute() == [myadd(i, b=10) for i in x]\n assert b.map(myadd, b2, c=10).compute() == [\n myadd(i, j, 10) for (i, j) in zip(x, x2)\n ]\n x_sum = sum(x)\n assert b.map(myadd, b.sum(), c=10).compute() == [myadd(i, x_sum, 10) for i in x]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_starmap_test_starmap.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_starmap_test_starmap.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 136, "end_line": 150, "span_ids": ["test_starmap"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_starmap():\n data = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]\n b = db.from_sequence(data, npartitions=2)\n\n def myadd(a, b, c=0):\n return a + b + c\n\n assert b.starmap(myadd).compute() == [myadd(*a) for a in data]\n assert b.starmap(myadd, c=10).compute() == [myadd(*a, c=10) for a in data]\n max_second = b.pluck(1).max()\n assert b.starmap(myadd, c=max_second).compute() == [\n myadd(*a, c=max_second.compute()) for a in data\n ]\n c = dask.delayed(10)\n assert b.starmap(myadd, c=c).compute() == [myadd(*a, c=10) for a in data]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_filter_test_repr.assert_from_sequence_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_filter_test_repr.assert_from_sequence_in", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 163, "end_line": 193, "span_ids": ["test_repr", "test_iter", "test_filter", "test_remove"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_filter():\n c = b.filter(iseven)\n expected = merge(\n dsk,\n {\n (c.name, i): (reify, (filter, iseven, (b.name, i)))\n for i in range(b.npartitions)\n },\n )\n assert c.dask == expected\n assert c.name == b.filter(iseven).name\n\n\ndef test_remove():\n f = lambda x: x % 2 == 0\n c = b.remove(f)\n assert list(c) == [1, 3] * 3\n assert c.name == b.remove(f).name\n\n\ndef test_iter():\n assert sorted(list(b)) == sorted(L)\n assert sorted(list(b.map(inc))) == sorted(list(range(1, 6)) * 3)\n\n\n@pytest.mark.parametrize(\"func\", [str, repr])\ndef test_repr(func):\n assert str(b.npartitions) in func(b)\n assert b.name[:5] in func(b)\n\n assert \"from_sequence\" in func(db.from_sequence(range(5)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_test_pluck.assert_b_pluck_1_0_na": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_test_pluck.assert_b_pluck_1_0_na", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 186, "end_line": 192, "span_ids": ["test_pluck"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pluck():\n d = {(\"x\", 0): [(1, 10), (2, 20)], (\"x\", 1): [(3, 30), (4, 40)]}\n b = Bag(d, \"x\", 2)\n assert set(b.pluck(0)) == {1, 2, 3, 4}\n assert set(b.pluck(1)) == {10, 20, 30, 40}\n assert set(b.pluck([1, 0])) == {(10, 1), (20, 2), (30, 3), (40, 4)}\n assert b.pluck([1, 0]).name == b.pluck([1, 0]).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_with_default_test_unzip.assert_one_name_two_na": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_pluck_with_default_test_unzip.assert_one_name_two_na", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 195, "end_line": 209, "span_ids": ["test_pluck_with_default", "test_unzip"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pluck_with_default():\n b = db.from_sequence([\"Hello\", \"\", \"World\"])\n pytest.raises(IndexError, lambda: list(b.pluck(0)))\n assert list(b.pluck(0, None)) == [\"H\", None, \"W\"]\n assert b.pluck(0, None).name == b.pluck(0, None).name\n assert b.pluck(0).name != b.pluck(0, None).name\n\n\ndef test_unzip():\n b = db.from_sequence(range(100)).map(lambda x: (x, x + 1, x + 2))\n one, two, three = b.unzip(3)\n assert list(one) == list(range(100))\n assert list(three) == [i + 2 for i in range(100)]\n assert one.name == b.unzip(3)[0].name\n assert one.name != two.name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_test_fold.assert_set_e_fold_add_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_test_fold.assert_set_e_fold_add_in", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 212, "end_line": 239, "span_ids": ["test_fold"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fold():\n c = b.fold(add)\n assert c.compute() == sum(L)\n assert c.key == b.fold(add).key\n\n c2 = b.fold(add, initial=10)\n assert c2.key != c.key\n assert c2.compute() == sum(L) + 10 * b.npartitions\n assert c2.key == b.fold(add, initial=10).key\n\n c = db.from_sequence(range(5), npartitions=3)\n\n def binop(acc, x):\n acc = acc.copy()\n acc.add(x)\n return acc\n\n d = c.fold(binop, set.union, initial=set())\n assert d.compute() == set(c)\n assert d.key == c.fold(binop, set.union, initial=set()).key\n\n d = db.from_sequence(\"hello\")\n assert set(d.fold(lambda a, b: \"\".join([a, b]), initial=\"\").compute()) == set(\n \"hello\"\n )\n\n e = db.from_sequence([[1], [2], [3]], npartitions=2)\n assert set(e.fold(add, initial=[]).compute(scheduler=\"sync\")) == {1, 2, 3}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_bag_test_distinct.assert_bag_filter_None_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_fold_bag_test_distinct.assert_bag_filter_None_d", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 242, "end_line": 258, "span_ids": ["test_fold_bag", "test_distinct"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fold_bag():\n def binop(tot, x):\n tot.add(x)\n return tot\n\n c = b.fold(binop, combine=set.union, initial=set(), out_type=Bag)\n assert isinstance(c, Bag)\n assert_eq(c, list(set(range(5))))\n\n\ndef test_distinct():\n assert sorted(b.distinct()) == [0, 1, 2, 3, 4]\n assert b.distinct().name == b.distinct().name\n assert \"distinct\" in b.distinct().name\n assert b.distinct().count().compute() == 5\n bag = db.from_sequence([0] * 50, npartitions=50)\n assert bag.filter(None).distinct().compute() == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_distinct_with_key_test_distinct_with_key.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_distinct_with_key_test_distinct_with_key.None_1", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 261, "end_line": 266, "span_ids": ["test_distinct_with_key"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_distinct_with_key():\n seq = [{\"a\": i} for i in [0, 1, 2, 1, 2, 3, 2, 3, 4, 5]]\n bag = db.from_sequence(seq, npartitions=3)\n expected = list(unique(seq, key=lambda x: x[\"a\"]))\n assert_eq(bag.distinct(key=\"a\"), expected)\n assert_eq(bag.distinct(key=lambda x: x[\"a\"]), expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_test_frequencies.assert_eq_bag2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_test_frequencies.assert_eq_bag2_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 284, "span_ids": ["test_frequencies"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frequencies():\n c = b.frequencies()\n assert dict(c) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}\n c2 = b.frequencies(split_every=2)\n assert dict(c2) == {0: 3, 1: 3, 2: 3, 3: 3, 4: 3}\n assert c.name == b.frequencies().name\n assert c.name != c2.name\n assert c2.name == b.frequencies(split_every=2).name\n # test bag with empty partitions\n b2 = db.from_sequence(range(20), partition_size=2)\n b2 = b2.filter(lambda x: x < 10)\n d = b2.frequencies()\n assert dict(d) == dict(zip(range(10), [1] * 10))\n bag = db.from_sequence([0, 0, 0, 0], npartitions=4)\n bag2 = bag.filter(None).frequencies(split_every=2)\n assert_eq(bag2, [])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_sorted_test_topk.assert_b_topk_4_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_frequencies_sorted_test_topk.assert_b_topk_4_name_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 287, "end_line": 299, "span_ids": ["test_topk", "test_frequencies_sorted"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frequencies_sorted():\n b = db.from_sequence([\"a\", \"b\", \"b\", \"b\", \"c\", \"c\"])\n assert list(b.frequencies(sort=True).compute()) == [(\"b\", 3), (\"c\", 2), (\"a\", 1)]\n\n\ndef test_topk():\n assert list(b.topk(4)) == [4, 4, 4, 3]\n c = b.topk(4, key=lambda x: -x)\n assert list(c) == [0, 0, 0, 1]\n c2 = b.topk(4, key=lambda x: -x, split_every=2)\n assert list(c2) == [0, 0, 0, 1]\n assert c.name != c2.name\n assert b.topk(4).name == b.topk(4).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_non_callable_key_test_topk_with_non_callable_key.assert_b_topk_2_key_1_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_non_callable_key_test_topk_with_non_callable_key.assert_b_topk_2_key_1_n", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 302, "end_line": 307, "span_ids": ["test_topk_with_non_callable_key"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 2])\ndef test_topk_with_non_callable_key(npartitions):\n b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=npartitions)\n assert list(b.topk(2, key=1)) == [(1, 10), (2, 9)]\n assert list(b.topk(2, key=0)) == [(3, 8), (2, 9)]\n assert b.topk(2, key=1).name == b.topk(2, key=1).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_multiarg_lambda_test_reduction_names.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_topk_with_multiarg_lambda_test_reduction_names.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 310, "end_line": 336, "span_ids": ["test_topk_with_multiarg_lambda", "test_reductions", "test_reduction_names", "test_lambdas"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_topk_with_multiarg_lambda():\n b = db.from_sequence([(1, 10), (2, 9), (3, 8)], npartitions=2)\n assert list(b.topk(2, key=lambda a, b: b)) == [(1, 10), (2, 9)]\n\n\ndef test_lambdas():\n assert list(b.map(lambda x: x + 1)) == list(b.map(inc))\n\n\ndef test_reductions():\n assert int(b.count()) == 15\n assert int(b.sum()) == 30\n assert int(b.max()) == 4\n assert int(b.min()) == 0\n assert b.any().compute() is True\n assert b.all().compute() is False\n assert b.all().key == b.all().key\n assert b.all().key != b.any().key\n\n\ndef test_reduction_names():\n assert b.sum().name.startswith(\"sum\")\n assert b.reduction(sum, sum).name.startswith(\"sum\")\n assert any(\n isinstance(k, str) and k.startswith(\"max\") for k in b.reduction(sum, max).dask\n )\n assert b.reduction(sum, sum, name=\"foo\").name.startswith(\"foo\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_tree_reductions_test_tree_reductions.assert_c_key_b_sum_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_tree_reductions_test_tree_reductions.assert_c_key_b_sum_k", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 339, "end_line": 357, "span_ids": ["test_tree_reductions"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tree_reductions():\n b = db.from_sequence(range(12))\n c = b.reduction(sum, sum, split_every=2)\n d = b.reduction(sum, sum, split_every=6)\n e = b.reduction(sum, sum, split_every=5)\n\n assert c.compute() == d.compute() == e.compute()\n\n assert len(c.dask) > len(d.dask)\n\n c = b.sum(split_every=2)\n d = b.sum(split_every=5)\n\n assert c.compute() == d.compute()\n assert len(c.dask) > len(d.dask)\n\n assert c.key != d.key\n assert c.key == b.sum(split_every=2).key\n assert c.key != b.sum().key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_aggregation_test_var.assert_float_b_var_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_aggregation_test_var.assert_float_b_var_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 360, "end_line": 386, "span_ids": ["test_non_splittable_reductions", "test_aggregation", "test_var", "test_std"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 3, 4])\ndef test_aggregation(npartitions):\n L = list(range(15))\n b = db.range(15, npartitions=npartitions)\n assert_eq(b.mean(), sum(L) / len(L))\n assert_eq(b.sum(), sum(L))\n assert_eq(b.count(), len(L))\n\n\n@pytest.mark.parametrize(\"npartitions\", [1, 10])\ndef test_non_splittable_reductions(npartitions):\n np = pytest.importorskip(\"numpy\")\n data = list(range(100))\n c = db.from_sequence(data, npartitions=npartitions)\n\n assert_eq(c.mean(), np.mean(data))\n assert_eq(c.std(), np.std(data))\n\n\ndef test_std():\n assert_eq(b.std(), math.sqrt(2.0))\n assert float(b.std()) == math.sqrt(2.0)\n\n\ndef test_var():\n assert_eq(b.var(), 2.0)\n assert float(b.var()) == 2.0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_join_test_join.assert_c_name_b_join_o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_join_test_join.assert_c_name_b_join_o", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 389, "end_line": 397, "span_ids": ["test_join"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"transform\", [identity, dask.delayed, lambda x: db.from_sequence(x, npartitions=1)]\n)\ndef test_join(transform):\n other = transform([1, 2, 3])\n c = b.join(other, on_self=isodd, on_other=iseven)\n assert_eq(c, list(join(iseven, [1, 2, 3], isodd, list(b))))\n assert_eq(b.join(other, isodd), list(join(isodd, [1, 2, 3], isodd, list(b))))\n assert c.name == b.join(other, on_self=isodd, on_other=iseven).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_test_foldby.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_test_foldby.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 400, "end_line": 407, "span_ids": ["test_foldby"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_foldby():\n c = b.foldby(iseven, add, 0, add, 0)\n assert (reduceby, iseven, add, (b.name, 0), 0) in list(c.dask.values())\n assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())\n assert c.name == b.foldby(iseven, add, 0, add, 0).name\n\n c = b.foldby(iseven, lambda acc, x: acc + x)\n assert set(c) == set(reduceby(iseven, lambda acc, x: acc + x, L, 0).items())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_tree_reduction_test_map_partitions.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_foldby_tree_reduction_test_map_partitions.None_2", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 410, "end_line": 430, "span_ids": ["test_foldby_tree_reduction", "test_map_partitions"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_foldby_tree_reduction():\n dsk = list()\n for n in [1, 7, 32]:\n b = db.from_sequence(range(100), npartitions=n)\n c = b.foldby(iseven, add)\n dsk.extend([c])\n for m in [False, None, 2, 3]:\n d = b.foldby(iseven, add, split_every=m)\n e = b.foldby(iseven, add, 0, split_every=m)\n f = b.foldby(iseven, add, 0, add, split_every=m)\n g = b.foldby(iseven, add, 0, add, 0, split_every=m)\n dsk.extend([d, e, f, g])\n results = dask.compute(dsk)\n first = results[0]\n assert all([r == first for r in results])\n\n\ndef test_map_partitions():\n assert list(b.map_partitions(len)) == [5, 5, 5]\n assert b.map_partitions(len).name == b.map_partitions(len).name\n assert b.map_partitions(lambda a: len(a) + 1).name != b.map_partitions(len).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_partitions_args_kwargs_test_map_partitions_args_kwargs.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_partitions_args_kwargs_test_map_partitions_args_kwargs.None_9", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 433, "end_line": 461, "span_ids": ["test_map_partitions_args_kwargs"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_args_kwargs():\n x = [random.randint(-100, 100) for i in range(100)]\n y = [random.randint(-100, 100) for i in range(100)]\n\n dx = db.from_sequence(x, npartitions=10)\n dy = db.from_sequence(y, npartitions=10)\n\n def maximum(x, y=0):\n y = repeat(y) if isinstance(y, int) else y\n return [max(a, b) for (a, b) in zip(x, y)]\n\n sol = maximum(x, y=10)\n assert_eq(db.map_partitions(maximum, dx, y=10), sol)\n assert_eq(dx.map_partitions(maximum, y=10), sol)\n assert_eq(dx.map_partitions(maximum, 10), sol)\n\n sol = maximum(x, y)\n assert_eq(db.map_partitions(maximum, dx, dy), sol)\n assert_eq(dx.map_partitions(maximum, y=dy), sol)\n assert_eq(dx.map_partitions(maximum, dy), sol)\n\n dy_mean = dy.mean().apply(int)\n sol = maximum(x, int(sum(y) / len(y)))\n assert_eq(dx.map_partitions(maximum, y=dy_mean), sol)\n assert_eq(dx.map_partitions(maximum, dy_mean), sol)\n\n dy_mean = dask.delayed(dy_mean)\n assert_eq(dx.map_partitions(maximum, y=dy_mean), sol)\n assert_eq(dx.map_partitions(maximum, dy_mean), sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_random_sample_size_test_random_sample_random_state.assert_list_b_list_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_random_sample_size_test_random_sample_random_state.assert_list_b_list_c_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 464, "end_line": 512, "span_ids": ["test_random_sample_different_definitions", "test_random_sample_random_state", "test_random_sample_repeated_computation", "test_random_sample_size", "test_random_sample_prob_range"], "tokens": 396}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random_sample_size():\n \"\"\"\n Number of randomly sampled elements are in the expected range.\n \"\"\"\n a = db.from_sequence(range(1000), npartitions=5)\n # we expect a size of approx. 100, but leave large margins to avoid\n # random failures\n assert 10 < len(list(a.random_sample(0.1, 42))) < 300\n\n\ndef test_random_sample_prob_range():\n \"\"\"\n Specifying probabilities outside the range [0, 1] raises ValueError.\n \"\"\"\n a = db.from_sequence(range(50), npartitions=5)\n with pytest.raises(ValueError):\n a.random_sample(-1)\n with pytest.raises(ValueError):\n a.random_sample(1.1)\n\n\ndef test_random_sample_repeated_computation():\n \"\"\"\n Repeated computation of a defined random sampling operation\n generates identical results.\n \"\"\"\n a = db.from_sequence(range(50), npartitions=5)\n b = a.random_sample(0.2)\n assert list(b) == list(b) # computation happens here\n\n\ndef test_random_sample_different_definitions():\n \"\"\"\n Repeatedly defining a random sampling operation yields different results\n upon computation if no random seed is specified.\n \"\"\"\n a = db.from_sequence(range(50), npartitions=5)\n assert list(a.random_sample(0.5)) != list(a.random_sample(0.5))\n assert a.random_sample(0.5).name != a.random_sample(0.5).name\n\n\ndef test_random_sample_random_state():\n \"\"\"\n Sampling with fixed random seed generates identical results.\n \"\"\"\n a = db.from_sequence(range(50), npartitions=5)\n b = a.random_sample(0.5, 1234)\n c = a.random_sample(0.5, 1234)\n assert list(b) == list(c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_lazify_task_test_lazify_task.assert_lazify_task_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_lazify_task_test_lazify_task.assert_lazify_task_a_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 515, "end_line": 524, "span_ids": ["test_lazify_task"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_lazify_task():\n task = (sum, (reify, (map, inc, [1, 2, 3])))\n assert lazify_task(task) == (sum, (map, inc, [1, 2, 3]))\n\n task = (reify, (map, inc, [1, 2, 3]))\n assert lazify_task(task) == task\n\n a = (reify, (map, inc, (reify, (filter, iseven, \"y\"))))\n b = (reify, (map, inc, (filter, iseven, \"y\")))\n assert lazify_task(a) == b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_f_test_lazify.assert_lazify_a_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_f_test_lazify.assert_lazify_a_b", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 537, "end_line": 547, "span_ids": ["test_lazify", "impl:7"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "f = lambda x: x\n\n\ndef test_lazify():\n a = {\n \"x\": (reify, (map, inc, (reify, (filter, iseven, \"y\")))),\n \"a\": (f, \"x\"),\n \"b\": (f, \"x\"),\n }\n b = {\"x\": (reify, (map, inc, (filter, iseven, \"y\"))), \"a\": (f, \"x\"), \"b\": (f, \"x\")}\n assert lazify(a) == b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_inline_singleton_lists_test_inline_singleton_lists.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_inline_singleton_lists_test_inline_singleton_lists.None_5", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 540, "end_line": 557, "span_ids": ["test_inline_singleton_lists"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_singleton_lists():\n inp = {\"b\": (list, \"a\"), \"c\": (f, \"b\", 1)}\n out = {\"c\": (f, (list, \"a\"), 1)}\n assert inline_singleton_lists(inp, [\"c\"]) == out\n\n out = {\"c\": (f, \"a\", 1)}\n assert optimize(inp, [\"c\"], rename_fused_keys=False) == out\n\n # If list is an output key, don't fuse it\n assert inline_singleton_lists(inp, [\"b\", \"c\"]) == inp\n assert optimize(inp, [\"b\", \"c\"], rename_fused_keys=False) == inp\n\n inp = {\"b\": (list, \"a\"), \"c\": (f, \"b\", 1), \"d\": (f, \"b\", 2)}\n assert inline_singleton_lists(inp, [\"c\", \"d\"]) == inp\n\n # Doesn't inline constants\n inp = {\"b\": (4, 5), \"c\": (f, \"b\")}\n assert inline_singleton_lists(inp, [\"c\"]) == inp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_rename_fused_keys_bag_test_rename_fused_keys_bag.assert_optimize_inp_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_rename_fused_keys_bag_test_rename_fused_keys_bag.assert_optimize_inp_c_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 560, "end_line": 571, "span_ids": ["test_rename_fused_keys_bag"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rename_fused_keys_bag():\n inp = {\"b\": (list, \"a\"), \"c\": (f, \"b\", 1)}\n\n outp = optimize(inp, [\"c\"], rename_fused_keys=False)\n assert outp.keys() == {\"c\"}\n assert outp[\"c\"][1:] == (\"a\", 1)\n\n with dask.config.set({\"optimization.fuse.rename-keys\": False}):\n assert optimize(inp, [\"c\"]) == outp\n\n # By default, fused keys are renamed\n assert optimize(inp, [\"c\"]) != outp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_test_take_npartitions.with_pytest_raises_ValueE.b_take_1_npartitions_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_test_take_npartitions.with_pytest_raises_ValueE.b_take_1_npartitions_5_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 574, "end_line": 585, "span_ids": ["test_take_npartitions", "test_take"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take():\n assert list(b.take(2)) == [0, 1]\n assert b.take(2) == (0, 1)\n assert isinstance(b.take(2, compute=False), Bag)\n\n\ndef test_take_npartitions():\n assert list(b.take(6, npartitions=2)) == [0, 1, 2, 3, 4, 0]\n assert b.take(6, npartitions=-1) == (0, 1, 2, 3, 4, 0)\n assert b.take(3, npartitions=-1) == (0, 1, 2)\n with pytest.raises(ValueError):\n b.take(1, npartitions=5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_test_read_text_large.with_tmpfile_as_fn_.assert_list_b_list_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_test_read_text_large.with_tmpfile_as_fn_.assert_list_b_list_d_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 635, "end_line": 645, "span_ids": ["test_read_text_large"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text_large():\n with tmpfile() as fn:\n with open(fn, \"wb\") as f:\n f.write((\"Hello, world!\" + os.linesep).encode() * 100)\n b = db.read_text(fn, blocksize=100)\n c = db.read_text(fn)\n assert len(b.dask) > 5\n assert list(map(str, b.str.strip())) == list(map(str, c.str.strip()))\n\n d = db.read_text([fn], blocksize=100)\n assert list(b) == list(d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_encoding_test_read_text_encoding.with_tmpfile_as_fn_.assert_list_b_list_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_encoding_test_read_text_encoding.with_tmpfile_as_fn_.assert_list_b_list_d_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 648, "end_line": 660, "span_ids": ["test_read_text_encoding"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text_encoding():\n with tmpfile() as fn:\n with open(fn, \"wb\") as f:\n f.write((\"\u4f60\u597d\uff01\" + os.linesep).encode(\"gb18030\") * 100)\n b = db.read_text(fn, blocksize=100, encoding=\"gb18030\")\n c = db.read_text(fn, encoding=\"gb18030\")\n assert len(b.dask) > 5\n b_enc = b.str.strip().map(lambda x: x.encode(\"utf-8\"))\n c_enc = c.str.strip().map(lambda x: x.encode(\"utf-8\"))\n assert list(b_enc) == list(c_enc)\n\n d = db.read_text([fn], blocksize=100, encoding=\"gb18030\")\n assert list(b) == list(d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_gzip_test_read_text_large_gzip.with_tmpfile_gz_as_fn_.assert_join_c_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_read_text_large_gzip_test_read_text_large_gzip.with_tmpfile_gz_as_fn_.assert_join_c_compute_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 663, "end_line": 676, "span_ids": ["test_read_text_large_gzip"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text_large_gzip():\n with tmpfile(\"gz\") as fn:\n data = b\"Hello, world!\\n\" * 100\n f = GzipFile(fn, \"wb\")\n f.write(data)\n f.close()\n\n with pytest.raises(ValueError):\n # not allowed blocks when compressed\n db.read_text(fn, blocksize=50, linedelimiter=\"\\n\")\n\n c = db.read_text(fn, blocksize=None)\n assert c.npartitions == 1\n assert \"\".join(c.compute()) == data.decode()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_s3_test_from_s3.assert_c_npartitions_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_s3_test_from_s3.assert_c_npartitions_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 693, "end_line": 718, "span_ids": ["test_from_s3"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/6914\")\n@pytest.mark.slow\n@pytest.mark.network\ndef test_from_s3():\n # note we don't test connection modes with aws_access_key and\n # aws_secret_key because these are not on travis-ci\n pytest.importorskip(\"s3fs\")\n\n five_tips = (\n \"total_bill,tip,sex,smoker,day,time,size\\n\",\n \"16.99,1.01,Female,No,Sun,Dinner,2\\n\",\n \"10.34,1.66,Male,No,Sun,Dinner,3\\n\",\n \"21.01,3.5,Male,No,Sun,Dinner,3\\n\",\n \"23.68,3.31,Male,No,Sun,Dinner,2\\n\",\n )\n\n # test compressed data\n e = db.read_text(\"s3://tip-data/t*.gz\", storage_options=dict(anon=True))\n assert e.take(5) == five_tips\n\n # test multiple keys in bucket\n c = db.read_text(\n [\"s3://tip-data/tips.gz\", \"s3://tip-data/tips.json\", \"s3://tip-data/tips.csv\"],\n storage_options=dict(anon=True),\n )\n assert c.npartitions == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_sequence_test_from_empty_sequence.assert_df_empty_DataFra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_sequence_test_from_empty_sequence.assert_df_empty_DataFra", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 721, "end_line": 738, "span_ids": ["test_from_sequence", "test_from_long_sequence", "test_from_empty_sequence"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_sequence():\n b = db.from_sequence([1, 2, 3, 4, 5], npartitions=3)\n assert len(b.dask) == 3\n assert set(b) == {1, 2, 3, 4, 5}\n\n\ndef test_from_long_sequence():\n L = list(range(1001))\n b = db.from_sequence(L)\n assert set(b) == set(L)\n\n\ndef test_from_empty_sequence():\n pytest.importorskip(\"dask.dataframe\")\n b = db.from_sequence([])\n assert b.npartitions == 1\n df = b.to_dataframe(meta={\"a\": \"int\"}).compute()\n assert df.empty, \"DataFrame is not empty\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_product_test_product.assert_z_name_x_produc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_product_test_product.assert_z_name_x_produc", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 737, "end_line": 748, "span_ids": ["test_product"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_product():\n b2 = b.product(b)\n assert b2.npartitions == b.npartitions**2\n assert set(b2) == {(i, j) for i in L for j in L}\n\n x = db.from_sequence([1, 2, 3, 4])\n y = db.from_sequence([10, 20, 30])\n z = x.product(y)\n assert set(z) == {(i, j) for i in [1, 2, 3, 4] for j in [10, 20, 30]}\n\n assert z.name != b2.name\n assert z.name == x.product(y).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_partition_collect_test_groupby.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_partition_collect_test_groupby.None_3", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 739, "end_line": 761, "span_ids": ["test_groupby", "test_partition_collect"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_collect():\n with partd.Pickle() as p:\n partition(identity, range(6), 3, p)\n assert set(p.get(0)) == {0, 3}\n assert set(p.get(1)) == {1, 4}\n assert set(p.get(2)) == {2, 5}\n\n assert sorted(collect(identity, 0, p, \"\")) == [(0, [0]), (3, [3])]\n\n\ndef test_groupby():\n c = b.groupby(identity)\n result = dict(c)\n assert result == {\n 0: [0, 0, 0],\n 1: [1, 1, 1],\n 2: [2, 2, 2],\n 3: [3, 3, 3],\n 4: [4, 4, 4],\n }\n assert c.npartitions == b.npartitions\n assert c.name == b.groupby(identity).name\n assert c.name != b.groupby(lambda x: x + 1).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_with_indexer_test_groupby_with_npartitions_changed.assert_result_npartitions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_with_indexer_test_groupby_with_npartitions_changed.assert_result_npartitions", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 764, "end_line": 781, "span_ids": ["test_groupby_with_indexer", "test_groupby_with_npartitions_changed"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_with_indexer():\n b = db.from_sequence([[1, 2, 3], [1, 4, 9], [2, 3, 4]])\n result = dict(b.groupby(0))\n assert valmap(sorted, result) == {1: [[1, 2, 3], [1, 4, 9]], 2: [[2, 3, 4]]}\n\n\ndef test_groupby_with_npartitions_changed():\n result = b.groupby(lambda x: x, npartitions=1)\n result2 = dict(result)\n assert result2 == {\n 0: [0, 0, 0],\n 1: [1, 1, 1],\n 2: [2, 2, 2],\n 3: [3, 3, 3],\n 4: [4, 4, 4],\n }\n\n assert result.npartitions == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_dataframe_test_to_dataframe.for_f_in_iter_tuple_.check_parts_df_sol_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_dataframe_test_to_dataframe.for_f_in_iter_tuple_.check_parts_df_sol_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 813, "end_line": 871, "span_ids": ["test_to_dataframe"], "tokens": 575}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_dataframe():\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n\n def check_parts(df, sol):\n assert all(\n (p.dtypes == sol.dtypes).all() for p in dask.compute(*df.to_delayed())\n )\n\n dsk = {(\"test\", 0): [(1, 2)], (\"test\", 1): [], (\"test\", 2): [(10, 20), (100, 200)]}\n b = Bag(dsk, \"test\", 3)\n sol = pd.DataFrame(b.compute(), columns=[\"a\", \"b\"])\n\n # Elements are tuples\n df = b.to_dataframe()\n dd.utils.assert_eq(df, sol.rename(columns={\"a\": 0, \"b\": 1}), check_index=False)\n df = b.to_dataframe(columns=[\"a\", \"b\"])\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n df = b.to_dataframe(meta=[(\"a\", \"i8\"), (\"b\", \"i8\")])\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n\n # Elements are dictionaries\n b = b.map(lambda x: dict(zip([\"a\", \"b\"], x)))\n df = b.to_dataframe()\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n assert df._name == b.to_dataframe()._name\n\n # With metadata specified\n for meta in [sol, [(\"a\", \"i8\"), (\"b\", \"i8\")]]:\n df = b.to_dataframe(meta=meta)\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n\n # Error to specify both columns and meta\n with pytest.raises(ValueError):\n b.to_dataframe(columns=[\"a\", \"b\"], meta=sol)\n\n # Inference fails if empty first partition\n b2 = b.filter(lambda x: x[\"a\"] > 200)\n with pytest.raises(ValueError):\n b2.to_dataframe()\n\n # Single column\n b = b.pluck(\"a\")\n sol = sol[[\"a\"]]\n df = b.to_dataframe(meta=sol)\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)\n\n # Works with iterators and tuples\n sol = pd.DataFrame({\"a\": range(100)})\n b = db.from_sequence(range(100), npartitions=5)\n for f in [iter, tuple]:\n df = b.map_partitions(f).to_dataframe(meta=sol)\n dd.utils.assert_eq(df, sol, check_index=False)\n check_parts(df, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_ext_open_test_to_textfiles.with_tmpdir_as_dir_.f_close_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_ext_open_test_to_textfiles.with_tmpdir_as_dir_.f_close_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 886, "end_line": 902, "span_ids": ["impl:9", "test_to_textfiles"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "ext_open = [(\"gz\", GzipFile), (\"bz2\", BZ2File), (\"\", open)]\n\n\n@pytest.mark.parametrize(\"ext,myopen\", ext_open)\ndef test_to_textfiles(ext, myopen):\n b = db.from_sequence([\"abc\", \"123\", \"xyz\"], npartitions=2)\n with tmpdir() as dir:\n c = b.to_textfiles(os.path.join(dir, \"*.\" + ext), compute=False)\n dask.compute(*c, scheduler=\"sync\")\n assert os.path.exists(os.path.join(dir, \"1.\" + ext))\n\n f = myopen(os.path.join(dir, \"1.\" + ext), \"rb\")\n text = f.read()\n if hasattr(text, \"decode\"):\n text = text.decode()\n assert \"xyz\" in text\n f.close()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_preserves_order_test_to_textfiles_name_function_preserves_order.with_tmpdir_as_dn_.assert_seq_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_preserves_order_test_to_textfiles_name_function_preserves_order.with_tmpdir_as_dn_.assert_seq_out", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 893, "end_line": 922, "span_ids": ["test_to_textfiles_name_function_preserves_order"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_textfiles_name_function_preserves_order():\n seq = [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ]\n b = db.from_sequence(seq, npartitions=16)\n with tmpdir() as dn:\n b.to_textfiles(dn)\n\n out = (\n db.read_text(os.path.join(dn, \"*\"), encoding=\"ascii\")\n .map(str)\n .map(str.strip)\n .compute()\n )\n assert seq == out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_encoding_test_to_textfiles_encoding.for_ext_myopen_in_ext_op.with_tmpdir_as_dir_.f_close_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_encoding_test_to_textfiles_encoding.for_ext_myopen_in_ext_op.with_tmpdir_as_dir_.f_close_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 950, "end_line": 965, "span_ids": ["test_to_textfiles_encoding"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_textfiles_encoding():\n b = db.from_sequence([\"\u6c7d\u8f66\", \"\u82f9\u679c\", \"\u5929\u6c14\"], npartitions=2)\n for ext, myopen in ext_open:\n with tmpdir() as dir:\n c = b.to_textfiles(\n os.path.join(dir, \"*.\" + ext), encoding=\"gb18030\", compute=False\n )\n dask.compute(*c)\n assert os.path.exists(os.path.join(dir, \"1.\" + ext))\n\n f = myopen(os.path.join(dir, \"1.\" + ext), \"rb\")\n text = f.read()\n if hasattr(text, \"decode\"):\n text = text.decode(\"gb18030\")\n assert \"\u5929\u6c14\" in text\n f.close()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_inputs_test_to_textfiles_endlines.with_tmpfile_as_fn_.for_last_endline_in_False.assert_result_a_n_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_inputs_test_to_textfiles_endlines.with_tmpfile_as_fn_.for_last_endline_in_False.assert_result_a_n_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 987, "end_line": 1011, "span_ids": ["test_to_textfiles_endlines", "test_to_textfiles_inputs"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_textfiles_inputs():\n B = db.from_sequence([\"abc\", \"123\", \"xyz\"], npartitions=2)\n with tmpfile() as a:\n with tmpfile() as b:\n B.to_textfiles([a, b])\n assert os.path.exists(a)\n assert os.path.exists(b)\n\n with tmpdir() as dirname:\n B.to_textfiles(dirname)\n assert os.path.exists(dirname)\n assert os.path.exists(os.path.join(dirname, \"0.part\"))\n\n with pytest.raises(TypeError):\n B.to_textfiles(5)\n\n\ndef test_to_textfiles_endlines():\n b = db.from_sequence([\"a\", \"b\", \"c\"], npartitions=1)\n with tmpfile() as fn:\n for last_endline in False, True:\n b.to_textfiles([fn], last_endline=last_endline)\n with open(fn) as f:\n result = f.readlines()\n assert result == [\"a\\n\", \"b\\n\", \"c\\n\" if last_endline else \"c\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_test_string_namespace.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_test_string_namespace.None_6", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 995, "end_line": 1011, "span_ids": ["test_string_namespace"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_string_namespace():\n b = db.from_sequence([\"Alice Smith\", \"Bob Jones\", \"Charlie Smith\"], npartitions=2)\n\n assert \"split\" in dir(b.str)\n assert \"match\" in dir(b.str)\n\n assert list(b.str.lower()) == [\"alice smith\", \"bob jones\", \"charlie smith\"]\n assert list(b.str.split(\" \")) == [\n [\"Alice\", \"Smith\"],\n [\"Bob\", \"Jones\"],\n [\"Charlie\", \"Smith\"],\n ]\n assert list(b.str.match(\"*Smith\")) == [\"Alice Smith\", \"Charlie Smith\"]\n\n pytest.raises(AttributeError, lambda: b.str.sfohsofhf)\n assert b.str.match(\"*Smith\").name == b.str.match(\"*Smith\").name\n assert b.str.match(\"*Smith\").name != b.str.match(\"*John\").name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_with_unicode_BagOfDicts.set.return.self_map_setter_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_string_namespace_with_unicode_BagOfDicts.set.return.self_map_setter_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1014, "end_line": 1055, "span_ids": ["test_string_namespace_with_unicode", "test_ensure_compute_output_is_concrete", "test_str_empty_split", "BagOfDicts.get", "test_map_with_iterator_function", "BagOfDicts", "BagOfDicts.set"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_string_namespace_with_unicode():\n b = db.from_sequence([\"Alice Smith\", \"Bob Jones\", \"Charlie Smith\"], npartitions=2)\n assert list(b.str.lower()) == [\"alice smith\", \"bob jones\", \"charlie smith\"]\n\n\ndef test_str_empty_split():\n b = db.from_sequence([\"Alice Smith\", \"Bob Jones\", \"Charlie Smith\"], npartitions=2)\n assert list(b.str.split()) == [\n [\"Alice\", \"Smith\"],\n [\"Bob\", \"Jones\"],\n [\"Charlie\", \"Smith\"],\n ]\n\n\ndef test_map_with_iterator_function():\n b = db.from_sequence([[1, 2, 3], [4, 5, 6]], npartitions=2)\n\n def f(L):\n for x in L:\n yield x + 1\n\n c = b.map(f)\n\n assert list(c) == [[2, 3, 4], [5, 6, 7]]\n\n\ndef test_ensure_compute_output_is_concrete():\n b = db.from_sequence([1, 2, 3])\n result = b.map(lambda x: x + 1).compute()\n assert not isinstance(result, Iterator)\n\n\nclass BagOfDicts(db.Bag):\n def get(self, key, default=None):\n return self.map(lambda d: d.get(key, default))\n\n def set(self, key, value):\n def setter(d):\n d[key] = value\n return d\n\n return self.map(setter)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_test_to_delayed.assert_t_compute_21": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_test_to_delayed.assert_t_compute_21", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1082, "end_line": 1091, "span_ids": ["test_to_delayed"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_delayed():\n b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)\n a, b, c = b.map(inc).to_delayed()\n assert all(isinstance(x, Delayed) for x in [a, b, c])\n assert b.compute() == [4, 5]\n\n b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=3)\n t = b.sum().to_delayed()\n assert isinstance(t, Delayed)\n assert t.compute() == 21", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_test_from_delayed.assert_asum_value_compute": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_test_from_delayed.assert_asum_value_compute", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1118, "end_line": 1130, "span_ids": ["test_from_delayed"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed():\n from dask.delayed import delayed\n\n a, b, c = delayed([1, 2, 3]), delayed([4, 5, 6]), delayed([7, 8, 9])\n bb = from_delayed([a, b, c])\n assert bb.name == from_delayed([a, b, c]).name\n\n assert isinstance(bb, Bag)\n assert list(bb) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n asum_value = delayed(sum)(a)\n asum_item = db.Item.from_delayed(asum_value)\n assert asum_value.compute() == asum_item.compute() == 6", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_iterator_test_range.for_npartitions_in_1_7_.assert_list_b_list_ra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_from_delayed_iterator_test_range.for_npartitions_in_1_7_.assert_list_b_list_ra", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1156, "end_line": 1177, "span_ids": ["test_from_delayed_iterator", "test_range"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed_iterator():\n from dask.delayed import delayed\n\n def lazy_records(n):\n return ({\"operations\": [1, 2]} for _ in range(n))\n\n delayed_records = delayed(lazy_records, pure=False)\n bag = db.from_delayed([delayed_records(5) for _ in range(5)])\n assert db.compute(\n bag.count(),\n bag.pluck(\"operations\").count(),\n bag.pluck(\"operations\").flatten().count(),\n scheduler=\"sync\",\n ) == (25, 25, 50)\n\n\ndef test_range():\n for npartitions in [1, 7, 10, 28]:\n b = db.range(100, npartitions=npartitions)\n assert len(b.dask) == npartitions\n assert b.npartitions == npartitions\n assert list(b) == list(range(100))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_zip_test_zip.assert_list_pairs_lis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_zip_test_zip.assert_list_pairs_lis", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1160, "end_line": 1166, "span_ids": ["test_zip"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 7, 10, 28])\ndef test_zip(npartitions, hi=1000):\n evens = db.from_sequence(range(0, hi, 2), npartitions=npartitions)\n odds = db.from_sequence(range(1, hi, 2), npartitions=npartitions)\n pairs = db.zip(evens, odds)\n assert pairs.npartitions == npartitions\n assert list(pairs) == list(zip(range(0, hi, 2), range(1, hi, 2)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_npartitions_test_repartition_npartitions.assert_all_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_npartitions_test_repartition_npartitions.assert_all_results_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1169, "end_line": 1177, "span_ids": ["test_repartition_npartitions"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"nin\", [1, 2, 7, 11, 23])\n@pytest.mark.parametrize(\"nout\", [1, 2, 5, 12, 23])\ndef test_repartition_npartitions(nin, nout):\n b = db.from_sequence(range(100), npartitions=nin)\n c = b.repartition(npartitions=nout)\n assert c.npartitions == nout\n assert_eq(b, c)\n results = dask.get(c.dask, c.__dask_keys__())\n assert all(results)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_partition_size_test_repartition_partition_size.assert_eq_b_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_repartition_partition_size_test_repartition_partition_size.assert_eq_b_c_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1180, "end_line": 1199, "span_ids": ["test_repartition_partition_size"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"nin, nout\",\n [\n (1, 1),\n (2, 1),\n (5, 1),\n (1, 2),\n (2, 2),\n (5, 2),\n (1, 5),\n (2, 5),\n (5, 5),\n ],\n)\ndef test_repartition_partition_size(nin, nout):\n b = db.from_sequence(range(1, 100), npartitions=nin)\n total_mem = sum(b.map_partitions(total_mem_usage).compute())\n c = b.repartition(partition_size=(total_mem // nout))\n assert c.npartitions >= nout\n assert_eq(b, c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_multiple_repartition_partition_size_test_repartition_input_errors.with_pytest_raises_ValueE.bag_repartition_npartitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_multiple_repartition_partition_size_test_repartition_input_errors.with_pytest_raises_ValueE.bag_repartition_npartitio", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1202, "end_line": 1243, "span_ids": ["test_multiple_repartition_partition_size", "test_repartition_names", "test_repartition_input_errors", "test_repartition_partition_size_complex_dtypes"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_multiple_repartition_partition_size():\n b = db.from_sequence(range(1, 100), npartitions=1)\n total_mem = sum(b.map_partitions(total_mem_usage).compute())\n\n c = b.repartition(partition_size=(total_mem // 2))\n assert c.npartitions >= 2\n assert_eq(b, c)\n\n d = c.repartition(partition_size=(total_mem // 5))\n assert d.npartitions >= 5\n assert_eq(c, d)\n\n\ndef test_repartition_partition_size_complex_dtypes():\n np = pytest.importorskip(\"numpy\")\n\n b = db.from_sequence([np.array(range(100)) for _ in range(4)], npartitions=1)\n total_mem = sum(b.map_partitions(total_mem_usage).compute())\n\n new_partition_size = total_mem // 4\n c = b.repartition(partition_size=new_partition_size)\n assert c.npartitions >= 4\n assert_eq(b, c)\n\n\ndef test_repartition_names():\n b = db.from_sequence(range(100), npartitions=5)\n c = b.repartition(2)\n assert b.name != c.name\n\n d = b.repartition(20)\n assert b.name != c.name\n assert c.name != d.name\n\n c = b.repartition(5)\n assert b is c\n\n\ndef test_repartition_input_errors():\n with pytest.raises(ValueError):\n bag = db.from_sequence(range(10))\n bag.repartition(npartitions=5, partition_size=\"5MiB\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_accumulate_test_accumulate.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_accumulate_test_accumulate.None_5", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1269, "end_line": 1281, "span_ids": ["test_accumulate"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_accumulate():\n parts = [[1, 2, 3], [4, 5], [], [6, 7]]\n dsk = {(\"test\", i): p for (i, p) in enumerate(parts)}\n b = db.Bag(dsk, \"test\", len(parts))\n r = b.accumulate(add)\n assert r.name == b.accumulate(add).name\n assert r.name != b.accumulate(add, -1).name\n assert r.compute() == [1, 3, 6, 10, 15, 21, 28]\n assert b.accumulate(add, -1).compute() == [-1, 0, 2, 5, 9, 14, 20, 27]\n assert b.accumulate(add).map(inc).compute() == [2, 4, 7, 11, 16, 22, 29]\n\n b = db.from_sequence([1, 2, 3], npartitions=1)\n assert b.accumulate(add).compute() == [1, 3, 6]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_test_groupby_tasks.None_2.for_b_in_partitions_.if_a_is_not_b_.assert_not_set_pluck_0_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_test_groupby_tasks.None_2.for_b_in_partitions_.if_a_is_not_b_.assert_not_set_pluck_0_a", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1281, "end_line": 1308, "span_ids": ["test_groupby_tasks"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_tasks():\n b = db.from_sequence(range(160), npartitions=4)\n out = b.groupby(lambda x: x % 10, max_branch=4, shuffle=\"tasks\")\n partitions = dask.get(out.dask, out.__dask_keys__())\n\n for a in partitions:\n for b in partitions:\n if a is not b:\n assert not set(pluck(0, a)) & set(pluck(0, b))\n\n b = db.from_sequence(range(1000), npartitions=100)\n out = b.groupby(lambda x: x % 123, shuffle=\"tasks\")\n assert len(out.dask) < 100**2\n partitions = dask.get(out.dask, out.__dask_keys__())\n\n for a in partitions:\n for b in partitions:\n if a is not b:\n assert not set(pluck(0, a)) & set(pluck(0, b))\n\n b = db.from_sequence(range(10000), npartitions=345)\n out = b.groupby(lambda x: x % 2834, max_branch=24, shuffle=\"tasks\")\n partitions = dask.get(out.dask, out.__dask_keys__())\n\n for a in partitions:\n for b in partitions:\n if a is not b:\n assert not set(pluck(0, a)) & set(pluck(0, b))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_names_test_groupby_tasks_names.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_names_test_groupby_tasks_names.None_2", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1291, "end_line": 1303, "span_ids": ["test_groupby_tasks_names"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_tasks_names():\n b = db.from_sequence(range(160), npartitions=4)\n func = lambda x: x % 10\n func2 = lambda x: x % 20\n assert set(b.groupby(func, max_branch=4, shuffle=\"tasks\").dask) == set(\n b.groupby(func, max_branch=4, shuffle=\"tasks\").dask\n )\n assert set(b.groupby(func, max_branch=4, shuffle=\"tasks\").dask) != set(\n b.groupby(func, max_branch=2, shuffle=\"tasks\").dask\n )\n assert set(b.groupby(func, max_branch=4, shuffle=\"tasks\").dask) != set(\n b.groupby(func2, max_branch=4, shuffle=\"tasks\").dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_2_test_groupby_tasks_2.assert_dict_result_gr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_2_test_groupby_tasks_2.assert_dict_result_gr", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1306, "end_line": 1313, "span_ids": ["test_groupby_tasks_2"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"size,npartitions,groups\", [(1000, 20, 100), (12345, 234, 1042), (100, 1, 50)]\n)\ndef test_groupby_tasks_2(size, npartitions, groups):\n func = lambda x: x % groups\n b = db.range(size, npartitions=npartitions).groupby(func, shuffle=\"tasks\")\n result = b.compute(scheduler=\"sync\")\n assert dict(result) == groupby(func, range(size))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_3_test_reduction_empty.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_tasks_3_test_reduction_empty.None_1", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1316, "end_line": 1334, "span_ids": ["test_groupby_tasks_3", "test_to_textfiles_empty_partitions", "test_reduction_empty"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_tasks_3():\n func = lambda x: x % 10\n b = db.range(20, npartitions=5).groupby(func, shuffle=\"tasks\", max_branch=2)\n result = b.compute(scheduler=\"sync\")\n assert dict(result) == groupby(func, range(20))\n # assert b.npartitions == 5\n\n\ndef test_to_textfiles_empty_partitions():\n with tmpdir() as d:\n b = db.range(5, npartitions=5).filter(lambda x: x == 1).map(str)\n b.to_textfiles(os.path.join(d, \"*.txt\"))\n assert len(os.listdir(d)) == 5\n\n\ndef test_reduction_empty():\n b = db.from_sequence(range(10), npartitions=100)\n assert_eq(b.filter(lambda x: x % 2 == 0).max(), 8)\n assert_eq(b.filter(lambda x: x % 2 == 0).min(), 0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reduction_empty_aggregate_test_reduction_empty_aggregate.with_pytest_raises_ValueE.b_filter_None_min_split_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reduction_empty_aggregate_test_reduction_empty_aggregate.with_pytest_raises_ValueE.b_filter_None_min_split_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1337, "end_line": 1345, "span_ids": ["test_reduction_empty_aggregate"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 2, 4])\ndef test_reduction_empty_aggregate(npartitions):\n b = db.from_sequence([0, 0, 0, 1], npartitions=npartitions).filter(None)\n assert_eq(b.min(split_every=2), 1)\n vals = db.compute(b.min(split_every=2), b.max(split_every=2), scheduler=\"sync\")\n assert vals == (1, 1)\n with pytest.raises(ValueError):\n b = db.from_sequence([0, 0, 0, 0], npartitions=npartitions)\n b.filter(None).min(split_every=2).compute(scheduler=\"sync\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_StrictReal_test_bag_with_single_callable.assert_eq_b_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_StrictReal_test_bag_with_single_callable.assert_eq_b_f_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1360, "end_line": 1410, "span_ids": ["test_empty", "test_msgpack_unicode", "test_reduction_with_sparse_matrices", "test_reduction_with_non_comparable_objects", "test_bag_picklable", "test_bag_with_single_callable", "StrictReal.__ne__", "StrictReal.__eq__", "StrictReal"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StrictReal(int):\n def __eq__(self, other):\n assert isinstance(other, StrictReal)\n return self.real == other.real\n\n def __ne__(self, other):\n assert isinstance(other, StrictReal)\n return self.real != other.real\n\n\ndef test_reduction_with_non_comparable_objects():\n b = db.from_sequence([StrictReal(x) for x in range(10)], partition_size=2)\n assert_eq(b.fold(max, max), StrictReal(9))\n\n\ndef test_reduction_with_sparse_matrices():\n sp = pytest.importorskip(\"scipy.sparse\")\n b = db.from_sequence([sp.csr_matrix([0]) for x in range(4)], partition_size=2)\n\n def sp_reduce(a, b):\n return sp.vstack([a, b])\n\n assert b.fold(sp_reduce, sp_reduce).compute(scheduler=\"sync\").shape == (4, 1)\n\n\ndef test_empty():\n list(db.from_sequence([])) == []\n\n\ndef test_bag_picklable():\n from pickle import dumps, loads\n\n b = db.from_sequence(range(100))\n b2 = loads(dumps(b))\n assert b.compute() == b2.compute()\n\n s = b.sum()\n s2 = loads(dumps(s))\n assert s.compute() == s2.compute()\n\n\ndef test_msgpack_unicode():\n b = db.from_sequence([{\"a\": 1}]).groupby(\"a\")\n result = b.compute(scheduler=\"sync\")\n assert dict(result) == {1: [{\"a\": 1}]}\n\n\ndef test_bag_with_single_callable():\n f = lambda: None\n b = db.from_sequence([f])\n assert_eq(b, [f])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_optimize_fuse_keys_test_optimize_fuse_keys.assert_all_k_in_dsk_for_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_optimize_fuse_keys_test_optimize_fuse_keys.assert_all_k_in_dsk_for_k", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1401, "end_line": 1410, "span_ids": ["test_optimize_fuse_keys"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_fuse_keys():\n x = db.range(10, npartitions=2)\n y = x.map(inc)\n z = y.map(inc)\n\n dsk = z.__dask_optimize__(z.dask, z.__dask_keys__())\n assert not y.dask.keys() & dsk.keys()\n\n dsk = z.__dask_optimize__(z.dask, z.__dask_keys__(), fuse_keys=y.__dask_keys__())\n assert all(k in dsk for k in y.__dask_keys__())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reductions_are_lazy_test_repeated_groupby.assert_valmap_len_dict_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_reductions_are_lazy_test_repeated_groupby.assert_valmap_len_dict_c", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1413, "end_line": 1435, "span_ids": ["test_reductions_are_lazy", "test_repeated_groupby"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_are_lazy():\n current = [None]\n\n def part():\n for i in range(10):\n current[0] = i\n yield i\n\n def func(part):\n assert current[0] == 0\n return sum(part)\n\n b = Bag({(\"foo\", 0): part()}, \"foo\", 1)\n\n res = b.reduction(func, sum)\n\n assert_eq(res, sum(range(10)))\n\n\ndef test_repeated_groupby():\n b = db.range(10, npartitions=4)\n c = b.groupby(lambda x: x % 3)\n assert valmap(len, dict(c)) == valmap(len, dict(c))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_empty_bag_test_map_keynames.assert_set_b_map_inc___d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_empty_bag_test_map_keynames.assert_set_b_map_inc___d", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1452, "end_line": 1489, "span_ids": ["test_map_partitions_arg", "test_bag_paths", "test_empty_bag", "test_map_keynames"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_bag():\n b = db.from_sequence([])\n assert_eq(b.map(inc).all(), True)\n assert_eq(b.map(inc).any(), False)\n assert_eq(b.map(inc).sum(), False)\n assert_eq(b.map(inc).count(), False)\n\n\ndef test_bag_paths():\n b = db.from_sequence([\"abc\", \"123\", \"xyz\"], npartitions=2)\n paths = b.to_textfiles(\"foo*\")\n assert paths[0].endswith(\"foo0\")\n assert paths[1].endswith(\"foo1\")\n\n os.remove(\"foo0\")\n os.remove(\"foo1\")\n\n\ndef test_map_partitions_arg():\n def append_str(partition, s):\n return [x + s for x in partition]\n\n mybag = db.from_sequence([\"a\", \"b\", \"c\"])\n\n assert_eq(mybag.map_partitions(append_str, \"foo\"), [\"afoo\", \"bfoo\", \"cfoo\"])\n assert_eq(\n mybag.map_partitions(append_str, dask.delayed(\"foo\")), [\"afoo\", \"bfoo\", \"cfoo\"]\n )\n\n\ndef test_map_keynames():\n b = db.from_sequence([1, 2, 3])\n d = dict(b.map(inc).__dask_graph__())\n assert \"inc\" in map(dask.utils.key_split, d)\n\n assert set(b.map(inc).__dask_graph__()) != set(\n b.map_partitions(inc).__dask_graph__()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_pytest_test_sample_size_k_bigger_than_smallest_partition_size.assert_len_set_li_le": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_pytest_test_sample_size_k_bigger_than_smallest_partition_size.assert_len_set_li_le", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 89, "span_ids": ["test_choices_k_bigger_than_bag_size", "test_sample_size_k_bigger_than_smallest_partition_size", "test_sample_size_exactly_k", "imports", "test_sample_k_bigger_than_bag_size", "test_choices_empty_partition", "test_choices_k_equal_bag_size_with_unbalanced_partitions", "test_sample_empty_partition", "test_choices_size_exactly_k", "test_choices_k_bigger_than_smallest_partition_size"], "tokens": 766}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask.bag as db\nfrom dask.bag import random\n\n\ndef test_choices_size_exactly_k():\n seq = range(20)\n sut = db.from_sequence(seq, npartitions=3)\n li = list(random.choices(sut, k=2).compute())\n assert len(li) == 2\n assert all(i in seq for i in li)\n\n\ndef test_choices_k_bigger_than_bag_size():\n seq = range(3)\n sut = db.from_sequence(seq, npartitions=3)\n li = list(random.choices(sut, k=4).compute())\n assert len(li) == 4\n assert all(i in seq for i in li)\n\n\ndef test_choices_empty_partition():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n sut = sut.repartition(3)\n li = list(random.choices(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (9, 0, 1)\n assert len(li) == 2\n assert all(i in seq for i in li)\n\n\ndef test_choices_k_bigger_than_smallest_partition_size():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n li = list(random.choices(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (9, 1)\n assert len(li) == 2\n assert all(i in seq for i in li)\n\n\ndef test_choices_k_equal_bag_size_with_unbalanced_partitions():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n li = list(random.choices(sut, k=10).compute())\n assert sut.map_partitions(len).compute() == (9, 1)\n assert len(li) == 10\n assert all(i in seq for i in li)\n\n\ndef test_sample_size_exactly_k():\n seq = range(20)\n sut = db.from_sequence(seq, npartitions=3)\n li = list(random.sample(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (7, 7, 6)\n assert len(li) == 2\n assert all(i in seq for i in li)\n assert len(set(li)) == len(li)\n\n\ndef test_sample_k_bigger_than_bag_size():\n seq = range(3)\n sut = db.from_sequence(seq, npartitions=3)\n # should raise: Sample larger than population or is negative\n with pytest.raises(\n ValueError, match=\"Sample larger than population or is negative\"\n ):\n random.sample(sut, k=4).compute()\n\n\ndef test_sample_empty_partition():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n sut = sut.repartition(3)\n li = list(random.sample(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (9, 0, 1)\n assert len(li) == 2\n assert all(i in seq for i in li)\n assert len(set(li)) == len(li)\n\n\ndef test_sample_size_k_bigger_than_smallest_partition_size():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n li = list(random.sample(sut, k=2).compute())\n assert sut.map_partitions(len).compute() == (9, 1)\n assert len(li) == 2\n assert all(i in seq for i in li)\n assert len(set(li)) == len(li)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_read_text_test_read_text.with_filetexts_files2_mo.assert_join_line_for_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_read_text_test_read_text.with_filetexts_files2_mo.assert_join_line_for_b", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_text.py", "file_name": "test_text.py", "file_type": "text/x-python", "category": "test", "start_line": 45, "end_line": 85, "span_ids": ["test_read_text"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"fmt,bs,encoding,include_path\", fmt_bs_enc_path)\ndef test_read_text(fmt, bs, encoding, include_path):\n if fmt not in utils.compress:\n pytest.skip(\"compress function not provided for %s\" % fmt)\n compress = utils.compress[fmt]\n files2 = {k: compress(v.encode(encoding)) for k, v in files.items()}\n with filetexts(files2, mode=\"b\"):\n b = read_text(\n \".test.accounts.*.json\", compression=fmt, blocksize=bs, encoding=encoding\n )\n (L,) = compute(b)\n assert \"\".join(L) == expected\n\n o = read_text(\n sorted(files),\n compression=fmt,\n blocksize=bs,\n encoding=encoding,\n include_path=include_path,\n )\n b = o.pluck(0) if include_path else o\n (L,) = compute(b)\n assert \"\".join(L) == expected\n if include_path:\n (paths,) = compute(o.pluck(1))\n expected_paths = list(\n concat([[k] * v.count(\"\\n\") for k, v in files.items()])\n )\n assert len(paths) == len(expected_paths)\n for path, expected_path in zip(paths, expected_paths):\n assert path.endswith(expected_path)\n\n blocks = read_text(\n \".test.accounts.*.json\",\n compression=fmt,\n blocksize=bs,\n encoding=encoding,\n collection=False,\n )\n L = compute(*blocks)\n assert \"\".join(line for block in L for line in block) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_io_read_text.if_isinstance_blocksize_.blocksize.parse_bytes_blocksize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_io_read_text.if_isinstance_blocksize_.blocksize.parse_bytes_blocksize_", "embedding": null, "metadata": {"file_path": "dask/bag/text.py", "file_name": "text.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 90, "span_ids": ["imports", "read_text"], "tokens": 798}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nfrom functools import partial\n\nfrom fsspec.core import open_files\nfrom tlz import concat\n\nfrom ..bytes import read_bytes\nfrom ..delayed import delayed\nfrom ..utils import parse_bytes, system_encoding\nfrom .core import from_delayed\n\ndelayed = delayed(pure=True)\n\n\ndef read_text(\n urlpath,\n blocksize=None,\n compression=\"infer\",\n encoding=system_encoding,\n errors=\"strict\",\n linedelimiter=None,\n collection=True,\n storage_options=None,\n files_per_partition=None,\n include_path=False,\n):\n \"\"\"Read lines from text files\n\n Parameters\n ----------\n urlpath : string or list\n Absolute or relative filepath(s). Prefix with a protocol like ``s3://``\n to read from alternative filesystems. To read from multiple files you\n can pass a globstring or a list of paths, with the caveat that they\n must all have the same protocol.\n blocksize: None, int, or str\n Size (in bytes) to cut up larger files. Streams by default.\n Can be ``None`` for streaming, an integer number of bytes, or a string\n like \"128MiB\"\n compression: string\n Compression format like 'gzip' or 'xz'. Defaults to 'infer'\n encoding: string\n errors: string\n linedelimiter: string or None\n collection: bool, optional\n Return dask.bag if True, or list of delayed values if false\n storage_options: dict\n Extra options that make sense to a particular storage connection, e.g.\n host, port, username, password, etc.\n files_per_partition: None or int\n If set, group input files into partitions of the requested size,\n instead of one partition per file. Mutually exclusive with blocksize.\n include_path: bool\n Whether or not to include the path in the bag.\n If true, elements are tuples of (line, path).\n Default is False.\n\n Examples\n --------\n >>> b = read_text('myfiles.1.txt') # doctest: +SKIP\n >>> b = read_text('myfiles.*.txt') # doctest: +SKIP\n >>> b = read_text('myfiles.*.txt.gz') # doctest: +SKIP\n >>> b = read_text('s3://bucket/myfiles.*.txt') # doctest: +SKIP\n >>> b = read_text('s3://key:secret@bucket/myfiles.*.txt') # doctest: +SKIP\n >>> b = read_text('hdfs://namenode.example.com/myfiles.*.txt') # doctest: +SKIP\n\n Parallelize a large file by providing the number of uncompressed bytes to\n load into each partition.\n\n >>> b = read_text('largefile.txt', blocksize='10MB') # doctest: +SKIP\n\n Get file paths of the bag by setting include_path=True\n\n >>> b = read_text('myfiles.*.txt', include_path=True) # doctest: +SKIP\n >>> b.take(1) # doctest: +SKIP\n (('first line of the first file', '/home/dask/myfiles.0.txt'),)\n\n Returns\n -------\n dask.bag.Bag or list\n dask.bag.Bag if collection is True or list of Delayed lists otherwise.\n\n See Also\n --------\n from_sequence: Build bag from Python sequence\n \"\"\"\n if blocksize is not None and files_per_partition is not None:\n raise ValueError(\"Only one of blocksize or files_per_partition can be set\")\n if isinstance(blocksize, str):\n blocksize = parse_bytes(blocksize)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/utils.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/utils.py__", "embedding": null, "metadata": {"file_path": "dask/bag/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 8, "span_ids": ["assert_eq"], "tokens": 52}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_eq(a, b, scheduler=\"sync\"):\n if hasattr(a, \"compute\"):\n a = a.compute(scheduler=scheduler)\n if hasattr(b, \"compute\"):\n b = b.compute(scheduler=scheduler)\n\n assert a == b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.persist_DaskMethodsMixin.persist.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.persist_DaskMethodsMixin.persist.return.result", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 217, "end_line": 257, "span_ids": ["DaskMethodsMixin.persist"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DaskMethodsMixin:\n\n def persist(self, **kwargs):\n \"\"\"Persist this dask collection into memory\n\n This turns a lazy Dask collection into a Dask collection with the same\n metadata, but now with the results fully computed or actively computing\n in the background.\n\n The action of function differs significantly depending on the active\n task scheduler. If the task scheduler supports asynchronous computing,\n such as is the case of the dask.distributed scheduler, then persist\n will return *immediately* and the return value's task graph will\n contain Dask Future objects. However if the task scheduler only\n supports blocking computation then the call to persist will *block*\n and the return value's task graph will contain concrete Python results.\n\n This function is particularly useful when using distributed systems,\n because the results will be kept in distributed memory, rather than\n returned to the local process as with compute.\n\n Parameters\n ----------\n scheduler : string, optional\n Which scheduler to use like \"threads\", \"synchronous\" or \"processes\".\n If not provided, the default is to check the global settings first,\n and then fall back to the collection defaults.\n optimize_graph : bool, optional\n If True [default], the graph is optimized before computation.\n Otherwise the graph is run as is. This can be useful for debugging.\n **kwargs\n Extra keywords to forward to the scheduler function.\n\n Returns\n -------\n New dask collections backed by in-memory data\n\n See Also\n --------\n dask.base.persist\n \"\"\"\n (result,) = persist(self, traverse=False, **kwargs)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.compute_DaskMethodsMixin.__await__.return.f___await___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_DaskMethodsMixin.compute_DaskMethodsMixin.__await__.return.f___await___", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 261, "end_line": 303, "span_ids": ["DaskMethodsMixin.__await__", "DaskMethodsMixin.compute"], "tokens": 304}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DaskMethodsMixin:\n\n def compute(self, **kwargs):\n \"\"\"Compute this dask collection\n\n This turns a lazy Dask collection into its in-memory equivalent.\n For example a Dask array turns into a NumPy array and a Dask dataframe\n turns into a Pandas dataframe. The entire dataset must fit into memory\n before calling this operation.\n\n Parameters\n ----------\n scheduler : string, optional\n Which scheduler to use like \"threads\", \"synchronous\" or \"processes\".\n If not provided, the default is to check the global settings first,\n and then fall back to the collection defaults.\n optimize_graph : bool, optional\n If True [default], the graph is optimized before computation.\n Otherwise the graph is run as is. This can be useful for debugging.\n kwargs\n Extra keywords to forward to the scheduler function.\n\n See Also\n --------\n dask.base.compute\n \"\"\"\n (result,) = compute(self, traverse=False, **kwargs)\n return result\n\n def __await__(self):\n try:\n from distributed import futures_of, wait\n except ImportError as e:\n raise ImportError(\n \"Using async/await with dask requires the `distributed` package\"\n ) from e\n from tornado import gen\n\n @gen.coroutine\n def f():\n if futures_of(self):\n yield wait(self)\n raise gen.Return(self)\n\n return f().__await__()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_as_if_collection_optimization_function.return.getattr_x___dask_optimi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_as_if_collection_optimization_function.return.getattr_x___dask_optimi", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 309, "end_line": 323, "span_ids": ["compute_as_if_collection", "optimization_function", "dont_optimize"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute_as_if_collection(cls, dsk, keys, scheduler=None, get=None, **kwargs):\n \"\"\"Compute a graph as if it were of type cls.\n\n Allows for applying the same optimizations and default scheduler.\"\"\"\n schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get)\n dsk2 = optimization_function(cls)(dsk, keys, **kwargs)\n return schedule(dsk2, keys, **kwargs)\n\n\ndef dont_optimize(dsk, keys, **kwargs):\n return dsk\n\n\ndef optimization_function(x):\n return getattr(x, \"__dask_optimize__\", dont_optimize)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_collections_to_dsk_collections_to_dsk.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_collections_to_dsk_collections_to_dsk.return.dsk", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 324, "end_line": 353, "span_ids": ["collections_to_dsk"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def collections_to_dsk(collections, optimize_graph=True, optimizations=(), **kwargs):\n \"\"\"\n Convert many collections into a single dask graph, after optimization\n \"\"\"\n from .highlevelgraph import HighLevelGraph\n\n optimizations = tuple(optimizations) + tuple(config.get(\"optimizations\", ()))\n\n if optimize_graph:\n groups = groupby(optimization_function, collections)\n\n graphs = []\n for opt, val in groups.items():\n dsk, keys = _extract_graph_and_keys(val)\n dsk = opt(dsk, keys, **kwargs)\n\n for opt_inner in optimizations:\n dsk = opt_inner(dsk, keys, **kwargs)\n\n graphs.append(dsk)\n\n # Merge all graphs\n if any(isinstance(graph, HighLevelGraph) for graph in graphs):\n dsk = HighLevelGraph.merge(*graphs)\n else:\n dsk = merge(*map(ensure_dict, graphs))\n else:\n dsk, _ = _extract_graph_and_keys(collections)\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__extract_graph_and_keys__extract_graph_and_keys.return.graph_keys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__extract_graph_and_keys__extract_graph_and_keys.return.graph_keys", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 242, "end_line": 257, "span_ids": ["_extract_graph_and_keys"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _extract_graph_and_keys(vals):\n \"\"\"Given a list of dask vals, return a single graph and a list of keys such\n that ``get(dsk, keys)`` is equivalent to ``[v.compute() for v in vals]``.\"\"\"\n from .highlevelgraph import HighLevelGraph\n\n graphs, keys = [], []\n for v in vals:\n graphs.append(v.__dask_graph__())\n keys.append(v.__dask_keys__())\n\n if any(isinstance(graph, HighLevelGraph) for graph in graphs):\n graph = HighLevelGraph.merge(*graphs)\n else:\n graph = merge(*map(ensure_dict, graphs))\n\n return graph, keys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections_unpack_collections.collections_token.uuid_uuid4_hex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections_unpack_collections.collections_token.uuid_uuid4_hex", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 376, "end_line": 406, "span_ids": ["unpack_collections"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unpack_collections(*args, traverse=True):\n \"\"\"Extract collections in preparation for compute/persist/etc...\n\n Intended use is to find all collections in a set of (possibly nested)\n python objects, do something to them (compute, etc...), then repackage them\n in equivalent python objects.\n\n Parameters\n ----------\n *args\n Any number of objects. If it is a dask collection, it's extracted and\n added to the list of collections returned. By default, python builtin\n collections are also traversed to look for dask collections (for more\n information see the ``traverse`` keyword).\n traverse : bool, optional\n If True (default), builtin python collections are traversed looking for\n any dask collections they might contain.\n\n Returns\n -------\n collections : list\n A list of all dask collections contained in ``args``\n repack : callable\n A function to call on the transformed collections to repackage them as\n they were in the original ``args``.\n \"\"\"\n\n collections = []\n repack_dsk = {}\n\n collections_token = uuid.uuid4().hex\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections._unpack_unpack_collections.return.collections_repack": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_unpack_collections._unpack_unpack_collections.return.collections_repack", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 412, "end_line": 457, "span_ids": ["unpack_collections"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unpack_collections(*args, traverse=True):\n # ... other code\n\n def _unpack(expr):\n if is_dask_collection(expr):\n tok = tokenize(expr)\n if tok not in repack_dsk:\n repack_dsk[tok] = (getitem, collections_token, len(collections))\n collections.append(expr)\n return tok\n\n tok = uuid.uuid4().hex\n if not traverse:\n tsk = quote(expr)\n else:\n # Treat iterators like lists\n typ = list if isinstance(expr, Iterator) else type(expr)\n if typ in (list, tuple, set):\n tsk = (typ, [_unpack(i) for i in expr])\n elif typ in (dict, OrderedDict):\n tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()])\n elif dataclasses.is_dataclass(expr) and not isinstance(expr, type):\n tsk = (\n apply,\n typ,\n (),\n (\n dict,\n [\n [f.name, _unpack(getattr(expr, f.name))]\n for f in dataclasses.fields(expr)\n ],\n ),\n )\n else:\n return expr\n\n repack_dsk[tok] = tsk\n return tok\n\n out = uuid.uuid4().hex\n repack_dsk[out] = (tuple, [_unpack(i) for i in args])\n\n def repack(results):\n dsk = repack_dsk.copy()\n dsk[collections_token] = quote(results)\n return simple_get(dsk, out)\n\n return collections, repack", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_optimize_optimize.return.repack_postpersists_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_optimize_optimize.return.repack_postpersists_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 456, "end_line": 507, "span_ids": ["optimize"], "tokens": 418}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize(*args, traverse=True, **kwargs):\n \"\"\"Optimize several dask collections at once.\n\n Returns equivalent dask collections that all share the same merged and\n optimized underlying graph. This can be useful if converting multiple\n collections to delayed objects, or to manually apply the optimizations at\n strategic points.\n\n Note that in most cases you shouldn't need to call this method directly.\n\n Parameters\n ----------\n *args : objects\n Any number of objects. If a dask object, its graph is optimized and\n merged with all those of all other dask objects before returning an\n equivalent dask collection. Non-dask arguments are passed through\n unchanged.\n traverse : bool, optional\n By default dask traverses builtin python collections looking for dask\n objects passed to ``optimize``. For large collections this can be\n expensive. If none of the arguments contain any dask objects, set\n ``traverse=False`` to avoid doing this traversal.\n optimizations : list of callables, optional\n Additional optimization passes to perform.\n **kwargs\n Extra keyword arguments to forward to the optimization passes.\n\n Examples\n --------\n >>> import dask as d\n >>> import dask.array as da\n >>> a = da.arange(10, chunks=2).sum()\n >>> b = da.arange(10, chunks=2).mean()\n >>> a2, b2 = d.optimize(a, b)\n\n >>> a2.compute() == a.compute()\n True\n >>> b2.compute() == b.compute()\n True\n \"\"\"\n collections, repack = unpack_collections(*args, traverse=traverse)\n if not collections:\n return args\n\n dsk = collections_to_dsk(collections, **kwargs)\n\n postpersists = []\n for a in collections:\n r, s = a.__dask_postpersist__()\n postpersists.append(r(dsk, *s))\n\n return repack(postpersists)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_compute.return.repack_f_r_a_for_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_compute_compute.return.repack_f_r_a_for_r_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 510, "end_line": 572, "span_ids": ["compute"], "tokens": 573}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute(\n *args, traverse=True, optimize_graph=True, scheduler=None, get=None, **kwargs\n):\n \"\"\"Compute several dask collections at once.\n\n Parameters\n ----------\n args : object\n Any number of objects. If it is a dask object, it's computed and the\n result is returned. By default, python builtin collections are also\n traversed to look for dask objects (for more information see the\n ``traverse`` keyword). Non-dask arguments are passed through unchanged.\n traverse : bool, optional\n By default dask traverses builtin python collections looking for dask\n objects passed to ``compute``. For large collections this can be\n expensive. If none of the arguments contain any dask objects, set\n ``traverse=False`` to avoid doing this traversal.\n scheduler : string, optional\n Which scheduler to use like \"threads\", \"synchronous\" or \"processes\".\n If not provided, the default is to check the global settings first,\n and then fall back to the collection defaults.\n optimize_graph : bool, optional\n If True [default], the optimizations for each collection are applied\n before computation. Otherwise the graph is run as is. This can be\n useful for debugging.\n get : ``None``\n Should be left to ``None`` The get= keyword has been removed.\n kwargs\n Extra keywords to forward to the scheduler function.\n\n Examples\n --------\n >>> import dask as d\n >>> import dask.array as da\n >>> a = da.arange(10, chunks=2).sum()\n >>> b = da.arange(10, chunks=2).mean()\n >>> d.compute(a, b)\n (45, 4.5)\n\n By default, dask objects inside python collections will also be computed:\n\n >>> d.compute({'a': a, 'b': b, 'c': 1})\n ({'a': 45, 'b': 4.5, 'c': 1},)\n \"\"\"\n\n collections, repack = unpack_collections(*args, traverse=traverse)\n if not collections:\n return args\n\n schedule = get_scheduler(\n scheduler=scheduler,\n collections=collections,\n get=get,\n )\n\n dsk = collections_to_dsk(collections, optimize_graph, **kwargs)\n keys, postcomputes = [], []\n for x in collections:\n keys.append(x.__dask_keys__())\n postcomputes.append(x.__dask_postcompute__())\n\n results = schedule(dsk, keys, **kwargs)\n return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_numpy_register_numpy.normalize_ufunc.try_.except_AttributeError_.return.normalize_function_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_numpy_register_numpy.normalize_ufunc.try_.except_AttributeError_.return.normalize_function_x_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1067, "end_line": 1141, "span_ids": ["register_numpy"], "tokens": 582}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@normalize_token.register_lazy(\"numpy\")\ndef register_numpy():\n import numpy as np\n\n @normalize_token.register(np.ndarray)\n def normalize_array(x):\n if not x.shape:\n return (x.item(), x.dtype)\n if hasattr(x, \"mode\") and getattr(x, \"filename\", None):\n if hasattr(x.base, \"ctypes\"):\n offset = (\n x.ctypes._as_parameter_.value - x.base.ctypes._as_parameter_.value\n )\n else:\n offset = 0 # root memmap's have mmap object as base\n if hasattr(\n x, \"offset\"\n ): # offset numpy used while opening, and not the offset to the beginning of the file\n offset += getattr(x, \"offset\")\n return (\n x.filename,\n os.path.getmtime(x.filename),\n x.dtype,\n x.shape,\n x.strides,\n offset,\n )\n if x.dtype.hasobject:\n try:\n try:\n # string fast-path\n data = hash_buffer_hex(\n \"-\".join(x.flat).encode(\n encoding=\"utf-8\", errors=\"surrogatepass\"\n )\n )\n except UnicodeDecodeError:\n # bytes fast-path\n data = hash_buffer_hex(b\"-\".join(x.flat))\n except (TypeError, UnicodeDecodeError):\n try:\n data = hash_buffer_hex(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))\n except Exception:\n # pickling not supported, use UUID4-based fallback\n if not config.get(\"tokenize.ensure-deterministic\"):\n data = uuid.uuid4().hex\n else:\n raise RuntimeError(\n f\"``np.ndarray`` with object ``dtype`` {str(x)} cannot \"\n \"be deterministically hashed. Please, see \"\n \"https://docs.dask.org/en/latest/custom-collections.html#implementing-deterministic-hashing \" # noqa: E501\n \"for more information\"\n )\n else:\n try:\n data = hash_buffer_hex(x.ravel(order=\"K\").view(\"i1\"))\n except (BufferError, AttributeError, ValueError):\n data = hash_buffer_hex(x.copy().ravel(order=\"K\").view(\"i1\"))\n return (data, x.dtype, x.shape, x.strides)\n\n @normalize_token.register(np.matrix)\n def normalize_matrix(x):\n return type(x).__name__, normalize_array(x.view(type=np.ndarray))\n\n normalize_token.register(np.dtype, repr)\n normalize_token.register(np.generic, repr)\n\n @normalize_token.register(np.ufunc)\n def normalize_ufunc(x):\n try:\n name = x.__name__\n if getattr(np, name) is x:\n return \"np.\" + name\n except AttributeError:\n return normalize_function(x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_scipy_register_scipy.normalize_dok_matrix.return.type_x___name___normali": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_scipy_register_scipy.normalize_dok_matrix.return.type_x___name___normali", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1144, "end_line": 1166, "span_ids": ["register_scipy"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@normalize_token.register_lazy(\"scipy\")\ndef register_scipy():\n import scipy.sparse as sp\n\n def normalize_sparse_matrix(x, attrs):\n return (\n type(x).__name__,\n normalize_seq(normalize_token(getattr(x, key)) for key in attrs),\n )\n\n for cls, attrs in [\n (sp.dia_matrix, (\"data\", \"offsets\", \"shape\")),\n (sp.bsr_matrix, (\"data\", \"indices\", \"indptr\", \"blocksize\", \"shape\")),\n (sp.coo_matrix, (\"data\", \"row\", \"col\", \"shape\")),\n (sp.csr_matrix, (\"data\", \"indices\", \"indptr\", \"shape\")),\n (sp.csc_matrix, (\"data\", \"indices\", \"indptr\", \"shape\")),\n (sp.lil_matrix, (\"data\", \"rows\", \"shape\")),\n ]:\n normalize_token.register(cls, partial(normalize_sparse_matrix, attrs=attrs))\n\n @normalize_token.register(sp.dok_matrix)\n def normalize_dok_matrix(x):\n return type(x).__name__, normalize_token(sorted(x.items()))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__colorize__colorize.return._h": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__colorize__colorize.return._h", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 952, "end_line": 968, "span_ids": ["_colorize"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _colorize(t):\n \"\"\"Convert (r, g, b) triple to \"#RRGGBB\" string\n\n For use with ``visualize(color=...)``\n\n Examples\n --------\n >>> _colorize((255, 255, 255))\n '#FFFFFF'\n >>> _colorize((0, 32, 128))\n '#002080'\n \"\"\"\n t = t[:3]\n i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t))\n h = hex(int(i))[2:].upper()\n h = \"0\" * (6 - len(h)) + h\n return \"#\" + h", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_named_schedulers_get_err_msg._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_named_schedulers_get_err_msg._", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1216, "end_line": 1254, "span_ids": ["impl:15"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "named_schedulers = {\n \"sync\": local.get_sync,\n \"synchronous\": local.get_sync,\n \"single-threaded\": local.get_sync,\n \"threads\": threaded.get,\n \"threading\": threaded.get,\n}\n\ntry:\n from dask import multiprocessing as dask_multiprocessing\nexcept ImportError:\n pass\nelse:\n named_schedulers.update(\n {\n \"processes\": dask_multiprocessing.get,\n \"multiprocessing\": dask_multiprocessing.get,\n }\n )\n\n\nget_err_msg = \"\"\"\nThe get= keyword has been removed.\n\nPlease use the scheduler= keyword instead with the name of\nthe desired scheduler like 'threads' or 'processes'\n\n x.compute(scheduler='single-threaded')\n x.compute(scheduler='threads')\n x.compute(scheduler='processes')\n\nor with a function that takes the graph and keys\n\n x.compute(scheduler=my_scheduler_function)\n\nor with a Dask client\n\n x.compute(scheduler=client)\n\"\"\".strip()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_blockwise_blockwise.return.subgraph": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_blockwise_blockwise.return.subgraph", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 240, "end_line": 330, "span_ids": ["blockwise"], "tokens": 689}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def blockwise(\n func,\n output,\n output_indices,\n *arrind_pairs,\n numblocks=None,\n concatenate=None,\n new_axes=None,\n dependencies=(),\n **kwargs,\n):\n \"\"\"Create a Blockwise symbolic mutable mapping\n\n This is like the ``make_blockwise_graph`` function, but rather than construct a\n dict, it returns a symbolic Blockwise object.\n\n ``*arrind_pairs`` is similar to those in `make_blockwise_graph`, but in addition to\n allowing for collections it can accept BlockwiseDep instances, which allows for lazy\n evaluation of arguments to ``func`` which might be different for different\n chunks/paritions.\n\n See Also\n --------\n make_blockwise_graph\n Blockwise\n \"\"\"\n new_axes = new_axes or {}\n\n arrind_pairs = list(arrind_pairs)\n\n # Transform indices to canonical elements\n # We use terms like _0, and _1 rather than provided index elements\n unique_indices = {\n i for ii in arrind_pairs[1::2] if ii is not None for i in ii\n } | set(output_indices)\n sub = {k: blockwise_token(i, \".\") for i, k in enumerate(sorted(unique_indices))}\n output_indices = index_subs(tuple(output_indices), sub)\n a_pairs_list = []\n for a in arrind_pairs[1::2]:\n if a is not None:\n val = tuple(a)\n else:\n val = a\n a_pairs_list.append(index_subs(val, sub))\n\n arrind_pairs[1::2] = a_pairs_list\n new_axes = {index_subs((k,), sub)[0]: v for k, v in new_axes.items()}\n\n # Unpack dask values in non-array arguments\n inputs = []\n inputs_indices = []\n for name, index in toolz.partition(2, arrind_pairs):\n inputs.append(name)\n inputs_indices.append(index)\n\n # Unpack delayed objects in kwargs\n new_keys = {n for c in dependencies for n in c.__dask_layers__()}\n if kwargs:\n # replace keys in kwargs with _0 tokens\n new_tokens = tuple(\n blockwise_token(i) for i in range(len(inputs), len(inputs) + len(new_keys))\n )\n sub = dict(zip(new_keys, new_tokens))\n inputs.extend(new_keys)\n inputs_indices.extend((None,) * len(new_keys))\n kwargs = subs(kwargs, sub)\n\n indices = [(k, v) for k, v in zip(inputs, inputs_indices)]\n keys = map(blockwise_token, range(len(inputs)))\n\n # Construct local graph\n if not kwargs:\n subgraph = {output: (func,) + tuple(keys)}\n else:\n _keys = list(keys)\n if new_keys:\n _keys = _keys[: -len(new_keys)]\n kwargs2 = (dict, list(map(list, kwargs.items())))\n subgraph = {output: (apply, func, _keys, kwargs2)}\n\n # Construct final output\n subgraph = Blockwise(\n output,\n output_indices,\n subgraph,\n indices,\n numblocks=numblocks,\n concatenate=concatenate,\n new_axes=new_axes,\n )\n return subgraph", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph._Tensor_operation_make_blockwise_graph._Tensor_operation": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph._Tensor_operation_make_blockwise_graph._Tensor_operation", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 880, "end_line": 982, "span_ids": ["make_blockwise_graph"], "tokens": 1758}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_blockwise_graph(\n func,\n output,\n out_indices,\n *arrind_pairs,\n numblocks=None,\n concatenate=None,\n new_axes=None,\n output_blocks=None,\n dims=None,\n deserializing=False,\n func_future_args=None,\n return_key_deps=False,\n io_deps=None,\n **kwargs,\n):\n \"\"\"Tensor operation\n\n Applies a function, ``func``, across blocks from many different input\n collections. We arrange the pattern with which those blocks interact with\n sets of matching indices. E.g.::\n\n make_blockwise_graph(func, 'z', 'i', 'x', 'i', 'y', 'i')\n\n yield an embarrassingly parallel communication pattern and is read as\n\n $$ z_i = func(x_i, y_i) $$\n\n More complex patterns may emerge, including multiple indices::\n\n make_blockwise_graph(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')\n\n $$ z_{ij} = func(x_{ij}, y_{ji}) $$\n\n Indices missing in the output but present in the inputs results in many\n inputs being sent to one function (see examples).\n\n Examples\n --------\n Simple embarrassing map operation\n\n >>> inc = lambda x: x + 1\n >>> make_blockwise_graph(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (inc, ('x', 0, 0)),\n ('z', 0, 1): (inc, ('x', 0, 1)),\n ('z', 1, 0): (inc, ('x', 1, 0)),\n ('z', 1, 1): (inc, ('x', 1, 1))}\n\n Simple operation on two datasets\n\n >>> add = lambda x, y: x + y\n >>> make_blockwise_graph(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),\n ... 'y': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),\n ('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),\n ('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),\n ('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}\n\n Operation that flips one of the datasets\n\n >>> addT = lambda x, y: x + y.T # Transpose each chunk\n >>> # z_ij ~ x_ij y_ji\n >>> # .. .. .. notice swap\n >>> make_blockwise_graph(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),\n ... 'y': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),\n ('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),\n ('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),\n ('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}\n\n Dot product with contraction over ``j`` index. Yields list arguments\n\n >>> make_blockwise_graph(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),\n ... 'y': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],\n [('y', 0, 0), ('y', 1, 0)]),\n ('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],\n [('y', 0, 1), ('y', 1, 1)]),\n ('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],\n [('y', 0, 0), ('y', 1, 0)]),\n ('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],\n [('y', 0, 1), ('y', 1, 1)])}\n\n Pass ``concatenate=True`` to concatenate arrays ahead of time\n\n >>> make_blockwise_graph(f, 'z', 'i', 'x', 'ij', 'y', 'ij', concatenate=True,\n ... numblocks={'x': (2, 2), 'y': (2, 2,)}) # doctest: +SKIP\n {('z', 0): (f, (concatenate_axes, [('x', 0, 0), ('x', 0, 1)], (1,)),\n (concatenate_axes, [('y', 0, 0), ('y', 0, 1)], (1,)))\n ('z', 1): (f, (concatenate_axes, [('x', 1, 0), ('x', 1, 1)], (1,)),\n (concatenate_axes, [('y', 1, 0), ('y', 1, 1)], (1,)))}\n\n Supports Broadcasting rules\n\n >>> make_blockwise_graph(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),\n ... 'y': (2, 2)}) # doctest: +SKIP\n {('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),\n ('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),\n ('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),\n ('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}\n\n Support keyword arguments with apply\n\n >>> def f(a, b=0): return a + b\n >>> make_blockwise_graph(f, 'z', 'i', 'x', 'i', numblocks={'x': (2,)}, b=10) # doctest: +SKIP\n {('z', 0): (apply, f, [('x', 0)], {'b': 10}),\n ('z', 1): (apply, f, [('x', 1)], {'b': 10})}\n\n Include literals by indexing with ``None``\n\n >>> make_blockwise_graph(add, 'z', 'i', 'x', 'i', 100, None, numblocks={'x': (2,)}) # doctest: +SKIP\n {('z', 0): (add, ('x', 0), 100),\n ('z', 1): (add, ('x', 1), 100)}\n\n See Also\n --------\n dask.array.blockwise\n dask.blockwise.blockwise\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_product_lol_product.if_not_values_.else_.return.lol_product_head_value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_product_lol_product.if_not_values_.else_.return.lol_product_head_value", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1104, "end_line": 1128, "span_ids": ["lol_product"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lol_product(head, values):\n \"\"\"List of list of tuple keys, similar to `itertools.product`.\n\n Parameters\n ----------\n head : tuple\n Prefix prepended to all results.\n values : sequence\n Mix of singletons and lists. Each list is substituted with every\n possible value and introduces another level of list in the output.\n\n Examples\n --------\n >>> lol_product(('x',), (1, 2, 3))\n ('x', 1, 2, 3)\n >>> lol_product(('x',), (1, [2, 3], 4, [5, 6])) # doctest: +NORMALIZE_WHITESPACE\n [[('x', 1, 2, 4, 5), ('x', 1, 2, 4, 6)],\n [('x', 1, 3, 4, 5), ('x', 1, 3, 4, 6)]]\n \"\"\"\n if not values:\n return head\n elif isinstance(values[0], list):\n return [lol_product(head + (x,), values[1:]) for x in values[0]]\n else:\n return lol_product(head + (values[0],), values[1:])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_tuples_lol_tuples.if_ind_0_not_in_dummies_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_lol_tuples_lol_tuples.if_ind_0_not_in_dummies_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1131, "end_line": 1165, "span_ids": ["lol_tuples"], "tokens": 377}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lol_tuples(head, ind, values, dummies):\n \"\"\"List of list of tuple keys\n\n Parameters\n ----------\n head : tuple\n The known tuple so far\n ind : Iterable\n An iterable of indices not yet covered\n values : dict\n Known values for non-dummy indices\n dummies : dict\n Ranges of values for dummy indices\n\n Examples\n --------\n >>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})\n ('x', 1, 0)\n\n >>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})\n [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]\n\n >>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE\n [[('x', 1, 0, 0), ('x', 1, 0, 1)],\n [('x', 1, 1, 0), ('x', 1, 1, 1)],\n [('x', 1, 2, 0), ('x', 1, 2, 1)]]\n \"\"\"\n if not ind:\n return head\n if ind[0] not in dummies:\n return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)\n else:\n return [\n lol_tuples(head + (v,), ind[1:], values, dummies) for v in dummies[ind[0]]\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_optimize_blockwise_optimize_blockwise.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_optimize_blockwise_optimize_blockwise.return.out", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1168, "end_line": 1197, "span_ids": ["optimize_blockwise"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize_blockwise(graph, keys=()):\n \"\"\"High level optimization of stacked Blockwise layers\n\n For operations that have multiple Blockwise operations one after the other, like\n ``x.T + 123`` we can fuse these into a single Blockwise operation. This happens\n before any actual tasks are generated, and so can reduce overhead.\n\n This finds groups of Blockwise operations that can be safely fused, and then\n passes them to ``rewrite_blockwise`` for rewriting.\n\n Parameters\n ----------\n graph : HighLevelGraph\n keys : Iterable\n The keys of all outputs of all collections.\n Used to make sure that we don't fuse a layer needed by an output\n\n Returns\n -------\n HighLevelGraph\n\n See Also\n --------\n rewrite_blockwise\n \"\"\"\n out = _optimize_blockwise(graph, keys=keys)\n while out.dependencies != graph.dependencies:\n graph = out\n out = _optimize_blockwise(graph, keys=keys)\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__optimize_blockwise__optimize_blockwise.return.HighLevelGraph_out_depen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__optimize_blockwise__optimize_blockwise.return.HighLevelGraph_out_depen", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1200, "end_line": 1286, "span_ids": ["_optimize_blockwise"], "tokens": 601}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _optimize_blockwise(full_graph, keys=()):\n keep = {k[0] if type(k) is tuple else k for k in keys}\n layers = full_graph.layers\n dependents = reverse_dict(full_graph.dependencies)\n roots = {k for k in full_graph.layers if not dependents.get(k)}\n stack = list(roots)\n\n out = {}\n dependencies = {}\n seen = set()\n io_names = set()\n\n while stack:\n layer = stack.pop()\n if layer in seen or layer not in layers:\n continue\n seen.add(layer)\n\n # Outer loop walks through possible output Blockwise layers\n if isinstance(layers[layer], Blockwise):\n blockwise_layers = {layer}\n deps = set(blockwise_layers)\n io_names |= layers[layer].io_deps.keys()\n while deps: # we gather as many sub-layers as we can\n dep = deps.pop()\n\n if dep not in layers:\n stack.append(dep)\n continue\n if not isinstance(layers[dep], Blockwise):\n stack.append(dep)\n continue\n if dep != layer and dep in keep:\n stack.append(dep)\n continue\n if layers[dep].concatenate != layers[layer].concatenate:\n stack.append(dep)\n continue\n if (\n sum(k == dep for k, ind in layers[layer].indices if ind is not None)\n > 1\n ):\n stack.append(dep)\n continue\n if (\n blockwise_layers\n and layers[next(iter(blockwise_layers))].annotations\n != layers[dep].annotations\n ):\n stack.append(dep)\n continue\n\n # passed everything, proceed\n blockwise_layers.add(dep)\n\n # traverse further to this child's children\n for d in full_graph.dependencies.get(dep, ()):\n # Don't allow reductions to proceed\n output_indices = set(layers[dep].output_indices)\n input_indices = {\n i for _, ind in layers[dep].indices if ind for i in ind\n }\n\n if len(dependents[d]) <= 1 and output_indices.issuperset(\n input_indices\n ):\n deps.add(d)\n else:\n stack.append(d)\n\n # Merge these Blockwise layers into one\n new_layer = rewrite_blockwise([layers[l] for l in blockwise_layers])\n out[layer] = new_layer\n\n new_deps = set()\n for k, v in new_layer.indices:\n if v is None:\n new_deps |= keys_in_tasks(full_graph.dependencies, [k])\n elif k not in io_names:\n new_deps.add(k)\n dependencies[layer] = new_deps\n else:\n out[layer] = layers[layer]\n dependencies[layer] = full_graph.dependencies.get(layer, set())\n stack.extend(full_graph.dependencies.get(layer, ()))\n\n return HighLevelGraph(out, dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_zero_broadcast_dimensions_zero_broadcast_dimensions.return.homogeneous_deepmap_f_lo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_zero_broadcast_dimensions_zero_broadcast_dimensions.return.homogeneous_deepmap_f_lo", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1461, "end_line": 1480, "span_ids": ["zero_broadcast_dimensions"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@_deprecated()\ndef zero_broadcast_dimensions(lol, nblocks):\n \"\"\"\n >>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]\n >>> nblocks = (4, 1, 2) # note singleton dimension in second place\n >>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],\n ... [('x', 1, 1, 0), ('x', 1, 1, 1)],\n ... [('x', 1, 2, 0), ('x', 1, 2, 1)]]\n\n >>> zero_broadcast_dimensions(lol, nblocks) # doctest: +SKIP\n [[('x', 1, 0, 0), ('x', 1, 0, 1)],\n [('x', 1, 0, 0), ('x', 1, 0, 1)],\n [('x', 1, 0, 0), ('x', 1, 0, 1)]]\n\n See Also\n --------\n lol_tuples\n \"\"\"\n f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))\n return homogeneous_deepmap(f, lol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_broadcast_dimensions_broadcast_dimensions.return.toolz_valmap_toolz_first_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_broadcast_dimensions_broadcast_dimensions.return.toolz_valmap_toolz_first_", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1483, "end_line": 1540, "span_ids": ["broadcast_dimensions"], "tokens": 552}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,)), consolidate=None):\n \"\"\"Find block dimensions from arguments\n\n Parameters\n ----------\n argpairs : iterable\n name, ijk index pairs\n numblocks : dict\n maps {name: number of blocks}\n sentinels : iterable (optional)\n values for singleton dimensions\n consolidate : func (optional)\n use this to reduce each set of common blocks into a smaller set\n\n Examples\n --------\n >>> argpairs = [('x', 'ij'), ('y', 'ji')]\n >>> numblocks = {'x': (2, 3), 'y': (3, 2)}\n >>> broadcast_dimensions(argpairs, numblocks)\n {'i': 2, 'j': 3}\n\n Supports numpy broadcasting rules\n\n >>> argpairs = [('x', 'ij'), ('y', 'ij')]\n >>> numblocks = {'x': (2, 1), 'y': (1, 3)}\n >>> broadcast_dimensions(argpairs, numblocks)\n {'i': 2, 'j': 3}\n\n Works in other contexts too\n\n >>> argpairs = [('x', 'ij'), ('y', 'ij')]\n >>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}\n >>> broadcast_dimensions(argpairs, d)\n {'i': 'Hello', 'j': (2, 3)}\n \"\"\"\n # List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]\n argpairs2 = [(a, ind) for a, ind in argpairs if ind is not None]\n L = toolz.concat(\n [\n zip(inds, dims)\n for (x, inds), (x, dims) in toolz.join(\n toolz.first, argpairs2, toolz.first, numblocks.items()\n )\n ]\n )\n\n g = toolz.groupby(0, L)\n g = {k: {d for i, d in v} for k, v in g.items()}\n\n g2 = {k: v - set(sentinels) if len(v) > 1 else v for k, v in g.items()}\n\n if consolidate:\n return toolz.valmap(consolidate, g2)\n\n if g2 and not set(map(len, g2.values())) == {1}:\n raise ValueError(\"Shapes do not align %s\" % g)\n\n return toolz.valmap(toolz.first, g2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_read_bytes.if_blocksize_is_None__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_read_bytes.if_blocksize_is_None__", "embedding": null, "metadata": {"file_path": "dask/bytes/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 93, "end_line": 193, "span_ids": ["read_bytes", "read_block_from_file"], "tokens": 742}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_bytes(\n urlpath,\n delimiter=None,\n not_zero=False,\n blocksize=\"128 MiB\",\n sample=\"10 kiB\",\n compression=None,\n include_path=False,\n **kwargs,\n):\n # ... other code\n\n if blocksize is None:\n offsets = [[0]] * len(paths)\n lengths = [[None]] * len(paths)\n else:\n offsets = []\n lengths = []\n for path in paths:\n if compression == \"infer\":\n comp = infer_compression(path)\n else:\n comp = compression\n if comp is not None:\n raise ValueError(\n \"Cannot do chunked reads on compressed files. \"\n \"To read, set blocksize=None\"\n )\n size = fs.info(path)[\"size\"]\n if size is None:\n raise ValueError(\n \"Backing filesystem couldn't determine file size, cannot \"\n \"do chunked reads. To read, set blocksize=None.\"\n )\n\n elif size == 0:\n # skip empty\n offsets.append([])\n lengths.append([])\n else:\n # shrink blocksize to give same number of parts\n if size % blocksize and size > blocksize:\n blocksize1 = size / (size // blocksize)\n else:\n blocksize1 = blocksize\n place = 0\n off = [0]\n length = []\n\n # figure out offsets, spreading around spare bytes\n while size - place > (blocksize1 * 2) - 1:\n place += blocksize1\n off.append(int(place))\n length.append(off[-1] - off[-2])\n length.append(size - off[-1])\n\n if not_zero:\n off[0] = 1\n length[0] -= 1\n offsets.append(off)\n lengths.append(length)\n\n delayed_read = delayed(read_block_from_file)\n\n out = []\n for path, offset, length in zip(paths, offsets, lengths):\n token = tokenize(fs_token, delimiter, path, fs.ukey(path), compression, offset)\n keys = [f\"read-block-{o}-{token}\" for o in offset]\n values = [\n delayed_read(\n OpenFile(fs, path, compression=compression),\n o,\n l,\n delimiter,\n dask_key_name=key,\n )\n for o, key, l in zip(offset, keys, length)\n ]\n out.append(values)\n\n if sample:\n if sample is True:\n sample = \"10 kiB\" # backwards compatibility\n if isinstance(sample, str):\n sample = parse_bytes(sample)\n with OpenFile(fs, paths[0], compression=compression) as f:\n # read block without seek (because we start at zero)\n if delimiter is None:\n sample = f.read(sample)\n else:\n sample_buff = f.read(sample)\n while True:\n new = f.read(sample)\n if not new:\n break\n if delimiter in new:\n sample_buff = (\n sample_buff + new.split(delimiter, 1)[0] + delimiter\n )\n break\n sample_buff = sample_buff + new\n sample = sample_buff\n if include_path:\n return sample, out, paths\n return sample, out\n\n\ndef read_block_from_file(lazy_file, off, bs, delimiter):\n with copy.copy(lazy_file) as f:\n if off == 0 and bs is None:\n return f.read()\n return read_block(f, off, bs, delimiter)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_io_test_read_block.for_ols_in_0_3_3_.assert_b_join_filter_No": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_io_test_read_block.for_ols_in_0_3_3_.assert_b_join_filter_No", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 32, "span_ids": ["imports", "test_read_block"], "tokens": 391}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport os\nimport pathlib\n\nimport pytest\nfrom fsspec.utils import (\n infer_storage_options,\n read_block,\n seek_delimiter,\n stringify_path,\n)\n\n\ndef test_read_block():\n delimiter = b\"\\n\"\n data = delimiter.join([b\"123\", b\"456\", b\"789\"])\n f = io.BytesIO(data)\n\n assert read_block(f, 1, 2) == b\"23\"\n assert read_block(f, 0, 1, delimiter=b\"\\n\") == b\"123\\n\"\n assert read_block(f, 0, 2, delimiter=b\"\\n\") == b\"123\\n\"\n assert read_block(f, 0, 3, delimiter=b\"\\n\") == b\"123\\n\"\n assert read_block(f, 0, 5, delimiter=b\"\\n\") == b\"123\\n456\\n\"\n assert read_block(f, 0, 8, delimiter=b\"\\n\") == b\"123\\n456\\n789\"\n assert read_block(f, 0, 100, delimiter=b\"\\n\") == b\"123\\n456\\n789\"\n assert read_block(f, 1, 1, delimiter=b\"\\n\") == b\"\"\n assert read_block(f, 1, 5, delimiter=b\"\\n\") == b\"456\\n\"\n assert read_block(f, 1, 8, delimiter=b\"\\n\") == b\"456\\n789\"\n\n for ols in [[(0, 3), (3, 3), (6, 3), (9, 2)], [(0, 4), (4, 4), (8, 4)]]:\n out = [read_block(f, o, l, b\"\\n\") for o, l in ols]\n assert b\"\".join(filter(None, out)) == data", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_seek_delimiter_endline_test_seek_delimiter_endline.assert_f_tell_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_seek_delimiter_endline_test_seek_delimiter_endline.assert_f_tell_7", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 36, "end_line": 60, "span_ids": ["test_seek_delimiter_endline"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_seek_delimiter_endline():\n f = io.BytesIO(b\"123\\n456\\n789\")\n\n # if at zero, stay at zero\n seek_delimiter(f, b\"\\n\", 5)\n assert f.tell() == 0\n\n # choose the first block\n for bs in [1, 5, 100]:\n f.seek(1)\n seek_delimiter(f, b\"\\n\", blocksize=bs)\n assert f.tell() == 4\n\n # handle long delimiters well, even with short blocksizes\n f = io.BytesIO(b\"123abc456abc789\")\n for bs in [1, 2, 3, 4, 5, 6, 10]:\n f.seek(1)\n seek_delimiter(f, b\"abc\", blocksize=bs)\n assert f.tell() == 6\n\n # End at the end\n f = io.BytesIO(b\"123\\n456\")\n f.seek(5)\n seek_delimiter(f, b\"\\n\", 5)\n assert f.tell() == 7", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_test_infer_storage_options.None_2.infer_storage_options_hd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_test_infer_storage_options.None_2.infer_storage_options_hd", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 63, "end_line": 115, "span_ids": ["test_infer_storage_options"], "tokens": 599}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_infer_storage_options():\n so = infer_storage_options(\"/mnt/datasets/test.csv\")\n assert so.pop(\"protocol\") == \"file\"\n assert so.pop(\"path\") == \"/mnt/datasets/test.csv\"\n assert not so\n\n assert infer_storage_options(\"./test.csv\")[\"path\"] == \"./test.csv\"\n assert infer_storage_options(\"../test.csv\")[\"path\"] == \"../test.csv\"\n\n so = infer_storage_options(\"C:\\\\test.csv\")\n assert so.pop(\"protocol\") == \"file\"\n assert so.pop(\"path\") == \"C:\\\\test.csv\"\n assert not so\n\n assert infer_storage_options(\"d:\\\\test.csv\")[\"path\"] == \"d:\\\\test.csv\"\n assert infer_storage_options(\"\\\\test.csv\")[\"path\"] == \"\\\\test.csv\"\n assert infer_storage_options(\".\\\\test.csv\")[\"path\"] == \".\\\\test.csv\"\n assert infer_storage_options(\"test.csv\")[\"path\"] == \"test.csv\"\n\n so = infer_storage_options(\n \"hdfs://username:pwd@Node:123/mnt/datasets/test.csv?q=1#fragm\",\n inherit_storage_options={\"extra\": \"value\"},\n )\n assert so.pop(\"protocol\") == \"hdfs\"\n assert so.pop(\"username\") == \"username\"\n assert so.pop(\"password\") == \"pwd\"\n assert so.pop(\"host\") == \"Node\"\n assert so.pop(\"port\") == 123\n assert so.pop(\"path\") == \"/mnt/datasets/test.csv#fragm\"\n assert so.pop(\"url_query\") == \"q=1\"\n assert so.pop(\"url_fragment\") == \"fragm\"\n assert so.pop(\"extra\") == \"value\"\n assert not so\n\n so = infer_storage_options(\"hdfs://User-name@Node-name.com/mnt/datasets/test.csv\")\n assert so.pop(\"username\") == \"User-name\"\n assert so.pop(\"host\") == \"Node-name.com\"\n\n u = \"http://127.0.0.1:8080/test.csv\"\n assert infer_storage_options(u) == {\"protocol\": \"http\", \"path\": u}\n\n # For s3 and gcs the netloc is actually the bucket name, so we want to\n # include it in the path. Test that:\n # - Parsing doesn't lowercase the bucket\n # - The bucket is included in path\n for protocol in [\"s3\", \"gcs\", \"gs\"]:\n options = infer_storage_options(\"%s://Bucket-name.com/test.csv\" % protocol)\n assert options[\"path\"] == \"Bucket-name.com/test.csv\"\n\n with pytest.raises(KeyError):\n infer_storage_options(\"file:///bucket/file.csv\", {\"path\": \"collide\"})\n with pytest.raises(KeyError):\n infer_storage_options(\"hdfs:///bucket/file.csv\", {\"protocol\": \"collide\"})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_c_test_infer_storage_options_c.assert_so_path_expe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_infer_storage_options_c_test_infer_storage_options_c.assert_so_path_expe", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 118, "end_line": 132, "span_ids": ["test_infer_storage_options_c"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"urlpath, expected_path\",\n (\n (r\"c:\\foo\\bar\", r\"c:\\foo\\bar\"),\n (r\"C:\\\\foo\\bar\", r\"C:\\\\foo\\bar\"),\n (r\"c:/foo/bar\", r\"c:/foo/bar\"),\n (r\"file:///c|\\foo\\bar\", r\"c:\\foo\\bar\"),\n (r\"file:///C|/foo/bar\", r\"C:/foo/bar\"),\n (r\"file:///C:/foo/bar\", r\"C:/foo/bar\"),\n ),\n)\ndef test_infer_storage_options_c(urlpath, expected_path):\n so = infer_storage_options(urlpath)\n assert so[\"protocol\"] == \"file\"\n assert so[\"path\"] == expected_path", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_stringify_path_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_bytes_utils.py_test_stringify_path_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_bytes_utils.py", "file_name": "test_bytes_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 135, "end_line": 158, "span_ids": ["test_stringify_path"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stringify_path():\n test_filepath = os.path.join(\"path\", \"to\", \"file.txt\")\n\n # Pathlib.path\n path = pathlib.Path(test_filepath)\n assert stringify_path(path) == test_filepath\n\n # fspath protocol\n class CustomFSPath:\n \"\"\"For testing fspath on unknown objects\"\"\"\n\n def __init__(self, path):\n self.path = path\n\n def __fspath__(self):\n return self.path\n\n path = CustomFSPath(test_filepath)\n assert stringify_path(path) == test_filepath\n\n # Non path-like input is unaffected\n path = (1, 2, 3)\n assert stringify_path(path) is path", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_compression.py_from_io_import_BytesIO_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_compression.py_from_io_import_BytesIO_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_compression.py", "file_name": "test_compression.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 23, "span_ids": ["imports", "test_files"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from io import BytesIO\n\nimport pytest\nfrom fsspec.compression import compr\n\nfrom dask.bytes.utils import compress\n\n\n@pytest.mark.parametrize(\"fmt,File\", compr.items())\ndef test_files(fmt, File):\n if fmt not in compress:\n pytest.skip(\"compression function not provided\")\n if fmt is None:\n return\n data = b\"1234\" * 1000\n compressed = compress[fmt](data)\n\n b = BytesIO(compressed)\n g = File(b, mode=\"rb\")\n data2 = g.read()\n g.close()\n assert data == data2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_os_require_pyarrow.pytest_mark_skipif_not_py": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_os_require_pyarrow.pytest_mark_skipif_not_py", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 53, "span_ids": ["imports", "impl:18", "hdfs"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport posixpath\n\nimport pytest\nfrom fsspec.core import get_fs_token_paths, open_files\nfrom tlz import concat\n\nimport dask\nimport dask.bag as db\nfrom dask.bytes.core import read_bytes\n\ntry:\n import distributed\n from distributed import Client\n from distributed.utils_test import cluster, loop # noqa: F401\nexcept (ImportError, SyntaxError):\n distributed = None\n\n\nif not os.environ.get(\"DASK_RUN_HDFS_TESTS\", \"\"):\n pytestmark = pytest.mark.skip(reason=\"HDFS tests not configured to run\")\n\npyarrow = pytest.importorskip(\"pyarrow\")\n\ntry:\n from pyarrow.hdfs import _connect, _maybe_set_hadoop_classpath\nexcept ImportError:\n try:\n from pyarrow._hdfs import _maybe_set_hadoop_classpath\n from pyarrow._hdfs import connect as _connect\n except ImportError:\n pyarrow = False\n\nbasedir = \"/tmp/test-dask\"\n\n\n@pytest.fixture\ndef hdfs(request):\n _maybe_set_hadoop_classpath()\n hdfs = _connect(host=\"localhost\", port=8020)\n\n if hdfs.exists(basedir):\n hdfs.rm(basedir, recursive=True)\n hdfs.mkdir(basedir)\n\n yield hdfs\n\n if hdfs.exists(basedir):\n hdfs.rm(basedir, recursive=True)\n\n\n# This mark doesn't check the minimum pyarrow version.\nrequire_pyarrow = pytest.mark.skipif(not pyarrow, reason=\"pyarrow not installed\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_test_read_bytes.assert_b_join_r_for_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_test_read_bytes.assert_b_join_r_for_r", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 61, "span_ids": ["test_read_bytes"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes(hdfs):\n nfiles = 10\n\n data = b\"a\" * int(1e3)\n\n for fn in [\"%s/file.%d\" % (basedir, i) for i in range(nfiles)]:\n with hdfs.open(fn, \"wb\", replication=1) as f:\n f.write(data)\n\n sample, values = read_bytes(\"hdfs://%s/file.*\" % basedir)\n\n (results,) = dask.compute(values)\n assert [b\"\".join(r) for r in results] == nfiles * [data]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_URL_test_read_bytes_URL.assert_b_join_r_for_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_URL_test_read_bytes_URL.assert_b_join_r_for_r", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 76, "span_ids": ["test_read_bytes_URL"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_URL(hdfs):\n nfiles = 10\n data = b\"a\" * int(1e3)\n\n for fn in [\"%s/file.%d\" % (basedir, i) for i in range(nfiles)]:\n with hdfs.open(fn, \"wb\", replication=1) as f:\n f.write(data)\n\n path = \"hdfs://localhost:8020%s/file.*\" % basedir\n sample, values = read_bytes(path)\n\n (results,) = dask.compute(values)\n assert [b\"\".join(r) for r in results] == nfiles * [data]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_big_file_test_read_bytes_big_file.for_r_in_results_.assert_set_r_decode_utf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_bytes_big_file_test_read_bytes_big_file.for_r_in_results_.assert_set_r_decode_utf_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 79, "end_line": 98, "span_ids": ["test_read_bytes_big_file"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_big_file(hdfs):\n fn = \"%s/file\" % basedir\n\n # Write 100 MB file\n nblocks = int(1e3)\n blocksize = int(1e5)\n data = b\"a\" * blocksize\n with hdfs.open(fn, \"wb\", replication=1) as f:\n for i in range(nblocks):\n f.write(data)\n\n sample, values = read_bytes(\"hdfs://\" + fn, blocksize=blocksize)\n\n assert sample[:5] == b\"aaaaa\"\n assert len(values[0]) == nblocks\n\n (results,) = dask.compute(values[0])\n assert sum(map(len, results)) == nblocks * blocksize\n for r in results:\n assert set(r.decode(\"utf-8\")) == {\"a\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_deterministic_key_names_test_deterministic_key_names.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_deterministic_key_names_test_deterministic_key_names.None_4", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 113, "span_ids": ["test_deterministic_key_names"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deterministic_key_names(hdfs):\n data = b\"abc\\n\" * int(1e3)\n fn = \"%s/file\" % basedir\n\n with hdfs.open(fn, \"wb\", replication=1) as fil:\n fil.write(data)\n\n _, x = read_bytes(\"hdfs://%s/*\" % basedir, delimiter=b\"\\n\", sample=False)\n _, y = read_bytes(\"hdfs://%s/*\" % basedir, delimiter=b\"\\n\", sample=False)\n _, z = read_bytes(\"hdfs://%s/*\" % basedir, delimiter=b\"c\", sample=False)\n\n assert [f.key for f in concat(x)] == [f.key for f in concat(y)]\n assert [f.key for f in concat(x)] != [f.key for f in concat(z)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_open_files_write_test_open_files_write.assert_data_results": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_open_files_write_test_open_files_write.assert_data_results", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 128, "span_ids": ["test_open_files_write"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_open_files_write(hdfs):\n path = \"hdfs://%s/\" % basedir\n data = [b\"test data %i\" % i for i in range(5)]\n\n files = open_files(path, num=len(data), mode=\"wb\")\n for fil, b in zip(files, data):\n with fil as f:\n f.write(b)\n\n sample, vals = read_bytes(\"hdfs://%s/*.part\" % basedir)\n\n (results,) = dask.compute(list(concat(vals)))\n assert data == results", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_csv_test_read_csv.assert_df_id_sum_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_csv_test_read_csv.assert_df_id_sum_comput", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 131, "end_line": 143, "span_ids": ["test_read_csv"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv(hdfs):\n dd = pytest.importorskip(\"dask.dataframe\")\n\n with hdfs.open(\"%s/1.csv\" % basedir, \"wb\") as f:\n f.write(b\"name,amount,id\\nAlice,100,1\\nBob,200,2\")\n\n with hdfs.open(\"%s/2.csv\" % basedir, \"wb\") as f:\n f.write(b\"name,amount,id\\nCharlie,300,3\\nDennis,400,4\")\n\n df = dd.read_csv(\"hdfs://%s/*.csv\" % basedir)\n\n assert isinstance(df, dd.DataFrame)\n assert df.id.sum().compute() == 1 + 2 + 3 + 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_unicode_test_read_text_unicode.assert_len_result_0_stri": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_unicode_test_read_text_unicode.assert_len_result_0_stri", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 176, "end_line": 187, "span_ids": ["test_read_text_unicode"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text_unicode(hdfs):\n data = b\"abcd\\xc3\\xa9\"\n fn = \"%s/data.txt\" % basedir\n with hdfs.open(fn, \"wb\") as f:\n f.write(b\"\\n\".join([data, data]))\n\n f = db.read_text(\"hdfs://\" + fn, collection=False)\n\n result = f[0].compute()\n assert len(result) == 2\n assert list(map(str.strip, result)) == [data.decode(\"utf-8\")] * 2\n assert len(result[0].strip()) == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_parquet_pyarrow_test_parquet_pyarrow._smoke_test_on_read": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_parquet_pyarrow_test_parquet_pyarrow._smoke_test_on_read", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 190, "end_line": 207, "span_ids": ["test_parquet_pyarrow"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@require_pyarrow\ndef test_parquet_pyarrow(hdfs):\n dd = pytest.importorskip(\"dask.dataframe\")\n import numpy as np\n import pandas as pd\n\n fn = \"%s/test.parquet\" % basedir\n hdfs_fn = \"hdfs://%s\" % fn\n df = pd.DataFrame(np.random.normal(size=(1000, 4)), columns=list(\"abcd\"))\n ddf = dd.from_pandas(df, npartitions=4)\n\n ddf.to_parquet(hdfs_fn, engine=\"pyarrow\")\n\n assert len(hdfs.ls(fn)) # Files are written\n\n ddf2 = dd.read_parquet(hdfs_fn, engine=\"pyarrow\")\n\n assert len(ddf2) == 1000 # smoke test on read", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_glob_test_glob.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_glob_test_glob.None_10", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 211, "end_line": 254, "span_ids": ["test_glob"], "tokens": 493}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_glob(hdfs):\n\n tree = {\n basedir: ([\"c\", \"c2\"], [\"a\", \"a1\", \"a2\", \"a3\", \"b1\"]),\n basedir + \"/c\": ([\"d\"], [\"x1\", \"x2\"]),\n basedir + \"/c2\": ([\"d\"], [\"x1\", \"x2\"]),\n basedir + \"/c/d\": ([], [\"x3\"]),\n }\n\n hdfs, _, _ = get_fs_token_paths(\"hdfs:///\")\n hdfs.makedirs(basedir + \"/c/d\")\n hdfs.makedirs(basedir + \"/c2/d/\")\n for fn in (\n posixpath.join(dirname, f)\n for (dirname, (_, fils)) in tree.items()\n for f in fils\n ):\n with hdfs.open(fn, mode=\"wb\") as f2:\n f2.write(b\"000\")\n\n assert set(hdfs.glob(basedir + \"/a*\")) == {\n basedir + p for p in [\"/a\", \"/a1\", \"/a2\", \"/a3\"]\n }\n\n assert set(hdfs.glob(basedir + \"/c/*\")) == {\n basedir + p for p in [\"/c/x1\", \"/c/x2\", \"/c/d\"]\n }\n\n assert set(hdfs.glob(basedir + \"/*/x*\")) == {\n basedir + p for p in [\"/c/x1\", \"/c/x2\", \"/c2/x1\", \"/c2/x2\"]\n }\n assert set(hdfs.glob(basedir + \"/*/x1\")) == {\n basedir + p for p in [\"/c/x1\", \"/c2/x1\"]\n }\n\n assert hdfs.find(\"/this-path-doesnt-exist\") == []\n assert hdfs.find(basedir + \"/missing/\") == []\n assert hdfs.find(basedir + \"/missing/x1\") == []\n assert hdfs.glob(basedir + \"/missing/*\") == []\n assert hdfs.glob(basedir + \"/*/missing\") == []\n\n assert set(hdfs.glob(basedir + \"/*\")) == {\n basedir + p for p in [\"/a\", \"/a1\", \"/a2\", \"/a3\", \"/b1\", \"/c\", \"/c2\"]\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_distributed_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_distributed_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 257, "end_line": 273, "span_ids": ["test_distributed"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not distributed, reason=\"Skipped as distributed is not installed.\" # noqa: F811\n) # noqa: F811\ndef test_distributed(hdfs, loop): # noqa: F811\n dd = pytest.importorskip(\"dask.dataframe\")\n\n with hdfs.open(\"%s/1.csv\" % basedir, \"wb\") as f:\n f.write(b\"name,amount,id\\nAlice,100,1\\nBob,200,2\")\n\n with hdfs.open(\"%s/2.csv\" % basedir, \"wb\") as f:\n f.write(b\"name,amount,id\\nCharlie,300,3\\nDennis,400,4\")\n\n with cluster() as (s, [a, b]):\n with Client(s[\"address\"], loop=loop): # noqa: F811\n df = dd.read_csv(\"hdfs://%s/*.csv\" % basedir)\n assert df.id.sum().compute() == 1 + 2 + 3 + 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_dir_server_dir_server.with_tmpdir_as_d_.p_terminate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_dir_server_dir_server.with_tmpdir_as_d_.p_terminate_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 21, "end_line": 41, "span_ids": ["dir_server"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture(scope=\"module\")\ndef dir_server():\n with tmpdir() as d:\n for fn in files:\n with open(os.path.join(d, fn), \"wb\") as f:\n f.write(b\"a\" * 10000)\n\n cmd = [sys.executable, \"-m\", \"http.server\", \"8999\"]\n p = subprocess.Popen(cmd, cwd=d)\n timeout = 10\n while True:\n try:\n requests.get(\"http://localhost:8999\")\n break\n except requests.exceptions.ConnectionError as e:\n time.sleep(0.1)\n timeout -= 0.1\n if timeout < 0:\n raise RuntimeError(\"Server did not appear\") from e\n yield d\n p.terminate()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_errors_test_errors.with_f_as_f_.with_pytest_raises_ValueE.f_seek_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_errors_test_errors.with_f_as_f_.with_pytest_raises_ValueE.f_seek_1_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 118, "end_line": 139, "span_ids": ["test_errors"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_errors(dir_server):\n f = open_files(\"http://localhost:8999/doesnotexist\")[0]\n with pytest.raises(errs):\n with f as f:\n f.read()\n f = open_files(\"http://nohost/\")[0]\n\n expected = FileNotFoundError\n\n with pytest.raises(expected):\n with f as f:\n f.read()\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn, mode=\"wb\")[0]\n with pytest.raises(NotImplementedError):\n with f:\n pass\n f = open_files(root + fn)[0]\n with f as f:\n with pytest.raises(ValueError):\n f.seek(-1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_files_test_open_glob.assert_fs_1_path_htt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_files_test_open_glob.assert_fs_1_path_htt", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 151, "end_line": 164, "span_ids": ["test_files", "test_open_glob"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_files(dir_server):\n root = \"http://localhost:8999/\"\n fs = open_files([root + f for f in files])\n for f, f2 in zip(fs, files):\n with f as f:\n with open(os.path.join(dir_server, f2), \"rb\") as expected:\n assert f.read() == expected.read()\n\n\ndef test_open_glob(dir_server):\n root = \"http://localhost:8999/\"\n fs = open_files(root + \"/*\")\n assert fs[0].path == \"http://localhost:8999/a\"\n assert fs[1].path == \"http://localhost:8999/b\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_parquet_test_parquet.assert_df_columns_tolist_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_parquet_test_parquet.assert_df_columns_tolist_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 159, "end_line": 173, "span_ids": ["test_parquet"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.network\n@pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/5042\", strict=False)\ndef test_parquet():\n pytest.importorskip(\"requests\", minversion=\"2.21.0\")\n dd = pytest.importorskip(\"dask.dataframe\")\n pytest.importorskip(\"fastparquet\") # no pyarrow compatibility FS yet\n df = dd.read_parquet(\n [\n \"https://github.com/Parquet/parquet-compatibility/raw/\"\n \"master/parquet-testdata/impala/1.1.1-NONE/\"\n \"nation.impala.parquet\"\n ]\n ).compute()\n assert df.n_nationkey.tolist() == list(range(25))\n assert df.columns.tolist() == [\"n_nationkey\", \"n_name\", \"n_regionkey\", \"n_comment\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_gzip_test_unordered_urlpath_errors.with_pytest_raises_TypeEr.read_bytes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_gzip_test_unordered_urlpath_errors.with_pytest_raises_TypeEr.read_bytes_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 60, "span_ids": ["imports", "test_unordered_urlpath_errors", "to_uri"], "tokens": 435}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import gzip\nimport os\nimport pathlib\nimport sys\nfrom functools import partial\nfrom time import sleep\n\nimport cloudpickle\nimport pytest\nfrom fsspec.compression import compr\nfrom fsspec.core import open_files\nfrom fsspec.implementations.local import LocalFileSystem\nfrom tlz import concat, valmap\n\nfrom dask import compute\nfrom dask.bytes.core import read_bytes\nfrom dask.bytes.utils import compress\nfrom dask.utils import filetexts\n\ncompute = partial(compute, scheduler=\"sync\")\n\nfiles = {\n \".test.accounts.1.json\": (\n b'{\"amount\": 100, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 200, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 300, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 400, \"name\": \"Dennis\"}\\n'\n ),\n \".test.accounts.2.json\": (\n b'{\"amount\": 500, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 600, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 700, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 800, \"name\": \"Dennis\"}\\n'\n ),\n}\n\n\ncsv_files = {\n \".test.fakedata.1.csv\": (b\"a,b\\n\" b\"1,2\\n\"),\n \".test.fakedata.2.csv\": (b\"a,b\\n\" b\"3,4\\n\"),\n \"subdir/.test.fakedata.2.csv\": (b\"a,b\\n\" b\"5,6\\n\"),\n}\n\n\ndef to_uri(path):\n return pathlib.Path(os.path.abspath(path)).as_uri()\n\n\ndef test_unordered_urlpath_errors():\n\n # Unordered urlpath argument\n with pytest.raises(TypeError):\n read_bytes(\n {\n \"sets/are.csv\",\n \"unordered/so/they.csv\",\n \"should/not/be.csv\",\n \"allowed.csv\",\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_test_read_bytes.with_filetexts_files_mod.assert_set_results_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_test_read_bytes.with_filetexts_files_mod.assert_set_results_se", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 74, "span_ids": ["test_read_bytes"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes():\n with filetexts(files, mode=\"b\"):\n sample, values = read_bytes(\".test.accounts.*\")\n assert isinstance(sample, bytes)\n assert sample[:5] == files[sorted(files)[0]][:5]\n assert sample.endswith(b\"\\n\")\n\n assert isinstance(values, (list, tuple))\n assert isinstance(values[0], (list, tuple))\n assert hasattr(values[0][0], \"dask\")\n\n assert sum(map(len, values)) >= len(files)\n results = compute(*concat(values))\n assert set(results) == set(files.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.with_filetexts_files_mod.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.with_filetexts_files_mod.None_5", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 84, "span_ids": ["test_read_bytes_sample_delimiter"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_sample_delimiter():\n with filetexts(files, mode=\"b\"):\n sample, values = read_bytes(\".test.accounts.*\", sample=80, delimiter=b\"\\n\")\n assert sample.endswith(b\"\\n\")\n sample, values = read_bytes(\".test.accounts.1.json\", sample=80, delimiter=b\"\\n\")\n assert sample.endswith(b\"\\n\")\n sample, values = read_bytes(\".test.accounts.1.json\", sample=2, delimiter=b\"\\n\")\n assert sample.endswith(b\"\\n\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_with_paths_test_with_paths.with_pytest_raises_OSErro.read_bytes_url_blocksize": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_with_paths_test_with_paths.with_pytest_raises_OSErro.read_bytes_url_blocksize", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 151, "span_ids": ["test_with_paths"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(sys.platform == \"win32\", reason=\"pathlib and moto clash on windows\")\ndef test_with_paths():\n with filetexts(files, mode=\"b\"):\n url = pathlib.Path(\"./.test.accounts.*\")\n sample, values = read_bytes(url, blocksize=None)\n assert sum(map(len, values)) == len(files)\n with pytest.raises(OSError):\n # relative path doesn't work\n url = pathlib.Path(\"file://.test.accounts.*\")\n read_bytes(url, blocksize=None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_block_test_read_bytes_block.with_filetexts_files_mod.for_bs_in_5_15_45_150.assert_set_ourlines_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_block_test_read_bytes_block.with_filetexts_files_mod.for_bs_in_5_15_45_150.assert_set_ourlines_s", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 149, "end_line": 162, "span_ids": ["test_read_bytes_block"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_block():\n with filetexts(files, mode=\"b\"):\n for bs in [5, 15, 45, 1500]:\n sample, vals = read_bytes(\".test.account*\", blocksize=bs)\n assert list(map(len, vals)) == [\n max((len(v) // bs), 1) for v in files.values()\n ]\n\n results = compute(*concat(vals))\n assert sum(len(r) for r in results) == sum(len(v) for v in files.values())\n\n ourlines = b\"\".join(results).split(b\"\\n\")\n testlines = b\"\".join(files.values()).split(b\"\\n\")\n assert set(ourlines) == set(testlines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_delimited_test_read_bytes_delimited.with_filetexts_files_mod.for_bs_in_5_15_45_1_.assert_ours_test": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_read_bytes_delimited_test_read_bytes_delimited.with_filetexts_files_mod.for_bs_in_5_15_45_1_.assert_ours_test", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 168, "end_line": 191, "span_ids": ["test_read_bytes_delimited"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_delimited():\n with filetexts(files, mode=\"b\"):\n for bs in [5, 15, 45, \"1.5 kB\"]:\n _, values = read_bytes(\".test.accounts*\", blocksize=bs, delimiter=b\"\\n\")\n _, values2 = read_bytes(\".test.accounts*\", blocksize=bs, delimiter=b\"foo\")\n assert [a.key for a in concat(values)] != [b.key for b in concat(values2)]\n\n results = compute(*concat(values))\n res = [r for r in results if r]\n assert all(r.endswith(b\"\\n\") for r in res)\n ourlines = b\"\".join(res).split(b\"\\n\")\n testlines = b\"\".join(files[k] for k in sorted(files)).split(b\"\\n\")\n assert ourlines == testlines\n\n # delimiter not at the end\n d = b\"}\"\n _, values = read_bytes(\".test.accounts*\", blocksize=bs, delimiter=d)\n results = compute(*concat(values))\n res = [r for r in results if r]\n # All should end in } except EOF\n assert sum(r.endswith(b\"}\") for r in res) == len(res) - 2\n ours = b\"\".join(res)\n test = b\"\".join(files[v] for v in sorted(files))\n assert ours == test", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_fmt_bs_test_compression.with_filetexts_files2_mo.assert_b_join_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_fmt_bs_test_compression.with_filetexts_files2_mo.assert_b_join_results_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 191, "end_line": 219, "span_ids": ["test_compression", "impl:7"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "fmt_bs = [(fmt, None) for fmt in compr] + [(fmt, 10) for fmt in compr] # type: ignore\n\n\n@pytest.mark.parametrize(\"fmt,blocksize\", fmt_bs)\ndef test_compression(fmt, blocksize):\n if fmt not in compress:\n pytest.skip(\"compression function not provided\")\n files2 = valmap(compress[fmt], files)\n with filetexts(files2, mode=\"b\"):\n if fmt and blocksize:\n with pytest.raises(ValueError):\n read_bytes(\n \".test.accounts.*.json\",\n blocksize=blocksize,\n delimiter=b\"\\n\",\n compression=fmt,\n )\n return\n sample, values = read_bytes(\n \".test.accounts.*.json\",\n blocksize=blocksize,\n delimiter=b\"\\n\",\n compression=fmt,\n )\n assert sample[:5] == files[sorted(files)[0]][:5]\n assert sample.endswith(b\"\\n\")\n\n results = compute(*concat(values))\n assert b\"\".join(results) == b\"\".join([files[k] for k in sorted(files)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_test_open_files_text_mode.with_filetexts_files_mod.assert_list_data_fil": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_test_open_files_text_mode.with_filetexts_files_mod.assert_list_data_fil", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 225, "end_line": 244, "span_ids": ["test_open_files", "test_open_files_text_mode"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_open_files():\n with filetexts(files, mode=\"b\"):\n myfiles = open_files(\".test.accounts.*\")\n assert len(myfiles) == len(files)\n for lazy_file, data_file in zip(myfiles, sorted(files)):\n with lazy_file as f:\n x = f.read()\n assert x == files[data_file]\n\n\n@pytest.mark.parametrize(\"encoding\", [\"utf-8\", \"ascii\"])\ndef test_open_files_text_mode(encoding):\n with filetexts(files, mode=\"b\"):\n myfiles = open_files(\".test.accounts.*\", mode=\"rt\", encoding=encoding)\n assert len(myfiles) == len(files)\n data = []\n for file in myfiles:\n with file as f:\n data.append(f.read())\n assert list(data) == [files[k].decode(encoding) for k in sorted(files)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_compression_test_open_files_compression.with_filetexts_files2_mo.assert_list_data_sol": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_compression_test_open_files_compression.with_filetexts_files2_mo.assert_list_data_sol", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 262, "span_ids": ["test_open_files_compression"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"mode\", [\"rt\", \"rb\"])\n@pytest.mark.parametrize(\"fmt\", list(compr))\ndef test_open_files_compression(mode, fmt):\n if fmt not in compress:\n pytest.skip(\"compression function not provided\")\n files2 = valmap(compress[fmt], files)\n with filetexts(files2, mode=\"b\"):\n myfiles = open_files(\".test.accounts.*\", mode=mode, compression=fmt)\n data = []\n for file in myfiles:\n with file as f:\n data.append(f.read())\n sol = [files[k] for k in sorted(files)]\n if mode == \"rt\":\n sol = [b.decode() for b in sol]\n assert list(data) == sol", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_bad_compression_test_names.with_filetexts_files_mod.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_bad_compression_test_names.with_filetexts_files_mod.None_4", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 295, "span_ids": ["test_bad_compression", "test_not_found", "test_names"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bad_compression():\n with filetexts(files, mode=\"b\"):\n for func in [read_bytes, open_files]:\n with pytest.raises(ValueError):\n sample, values = func(\".test.accounts.*\", compression=\"not-found\")\n\n\ndef test_not_found():\n fn = \"not-a-file\"\n with pytest.raises((FileNotFoundError, OSError), match=fn):\n read_bytes(fn)\n\n\n@pytest.mark.slow\ndef test_names():\n with filetexts(files, mode=\"b\"):\n _, a = read_bytes(\".test.accounts.*\")\n _, b = read_bytes(\".test.accounts.*\")\n a = list(concat(a))\n b = list(concat(b))\n\n assert [aa._key for aa in a] == [bb._key for bb in b]\n\n sleep(1)\n for fn in files:\n with open(fn, \"ab\") as f:\n f.write(b\"x\")\n\n _, c = read_bytes(\".test.accounts.*\")\n c = list(concat(c))\n assert [aa._key for aa in a] != [cc._key for cc in c]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_write_test_open_files_write.assert_d_b_000_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_open_files_write_test_open_files_write.assert_d_b_000_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 298, "end_line": 313, "span_ids": ["test_open_files_write"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression_opener\", [(None, open), (\"gzip\", gzip.open)])\ndef test_open_files_write(tmpdir, compression_opener):\n compression, opener = compression_opener\n tmpdir = str(tmpdir)\n files = open_files(tmpdir, num=2, mode=\"wb\", compression=compression)\n assert len(files) == 2\n assert {f.mode for f in files} == {\"wb\"}\n for fil in files:\n with fil as f:\n f.write(b\"000\")\n files = sorted(os.listdir(tmpdir))\n assert files == [\"0.part\", \"1.part\"]\n\n with opener(os.path.join(tmpdir, files[0]), \"rb\") as f:\n d = f.read()\n assert d == b\"000\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_io_endpoint_uri._http_127_0_0_1_5555_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_io_endpoint_uri._http_127_0_0_1_5555_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 73, "span_ids": ["imports", "ensure_safe_environment_variables", "impl:20", "s3so"], "tokens": 534}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport os\nimport shlex\nimport subprocess\nimport sys\nimport time\nfrom contextlib import contextmanager\nfrom functools import partial\n\nimport pytest\nfrom packaging.version import parse as parse_version\n\ns3fs = pytest.importorskip(\"s3fs\")\nboto3 = pytest.importorskip(\"boto3\")\nmoto = pytest.importorskip(\"moto\", minversion=\"1.3.14\")\npytest.importorskip(\"flask\") # server mode needs flask too\nrequests = pytest.importorskip(\"requests\")\n\nfrom fsspec.compression import compr\nfrom fsspec.core import get_fs_token_paths, open_files\nfrom s3fs import S3FileSystem as DaskS3FileSystem\nfrom tlz import concat, valmap\n\ntry:\n import fsspec.parquet as fsspec_parquet\nexcept ImportError:\n fsspec_parquet = None\n\nfrom dask import compute\nfrom dask.bytes.core import read_bytes\nfrom dask.bytes.utils import compress\n\ncompute = partial(compute, scheduler=\"sync\")\n\n\ntest_bucket_name = \"test\"\nfiles = {\n \"test/accounts.1.json\": (\n b'{\"amount\": 100, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 200, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 300, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 400, \"name\": \"Dennis\"}\\n'\n ),\n \"test/accounts.2.json\": (\n b'{\"amount\": 500, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 600, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 700, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 800, \"name\": \"Dennis\"}\\n'\n ),\n}\n\n\n@contextmanager\ndef ensure_safe_environment_variables():\n \"\"\"\n Get a context manager to safely set environment variables\n All changes will be undone on close, hence environment variables set\n within this contextmanager will neither persist nor change global state.\n \"\"\"\n saved_environ = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(saved_environ)\n\n\n@pytest.fixture\ndef s3so():\n return dict(client_kwargs={\"endpoint_url\": \"http://127.0.0.1:5555/\"})\n\n\nendpoint_uri = \"http://127.0.0.1:5555/\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_s3_context.try_.finally_.fs_rm_bucket_recursive_T": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_s3_context.try_.finally_.fs_rm_bucket_recursive_T", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 131, "span_ids": ["s3_context", "s3"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef s3(s3_base):\n with s3_context() as fs:\n yield fs\n\n\n@contextmanager\ndef s3_context(bucket=test_bucket_name, files=files):\n client = boto3.client(\"s3\", endpoint_url=endpoint_uri)\n client.create_bucket(Bucket=bucket, ACL=\"public-read-write\")\n for f, data in files.items():\n client.put_object(Bucket=bucket, Key=f, Body=data)\n fs = s3fs.S3FileSystem(\n anon=True, client_kwargs={\"endpoint_url\": \"http://127.0.0.1:5555/\"}\n )\n s3fs.S3FileSystem.clear_instance_cache()\n fs.invalidate_cache()\n try:\n yield fs\n finally:\n fs.rm(bucket, recursive=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata_s3_with_yellow_tripdata.data._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata_s3_with_yellow_tripdata.data._", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 130, "end_line": 201, "span_ids": ["s3_with_yellow_tripdata"], "tokens": 1171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture()\n@pytest.mark.slow\ndef s3_with_yellow_tripdata(s3):\n \"\"\"\n Fixture with sample yellowtrip CSVs loaded into S3.\n\n Provides the following CSVs:\n\n * s3://test/nyc-taxi/2015/yellow_tripdata_2015-01.csv\n * s3://test/nyc-taxi/2014/yellow_tripdata_2015-mm.csv\n for mm from 01 - 12.\n \"\"\"\n np = pytest.importorskip(\"numpy\")\n pd = pytest.importorskip(\"pandas\")\n\n data = {\n \"VendorID\": {0: 2, 1: 1, 2: 1, 3: 1, 4: 1},\n \"tpep_pickup_datetime\": {\n 0: \"2015-01-15 19:05:39\",\n 1: \"2015-01-10 20:33:38\",\n 2: \"2015-01-10 20:33:38\",\n 3: \"2015-01-10 20:33:39\",\n 4: \"2015-01-10 20:33:39\",\n },\n \"tpep_dropoff_datetime\": {\n 0: \"2015-01-15 19:23:42\",\n 1: \"2015-01-10 20:53:28\",\n 2: \"2015-01-10 20:43:41\",\n 3: \"2015-01-10 20:35:31\",\n 4: \"2015-01-10 20:52:58\",\n },\n \"passenger_count\": {0: 1, 1: 1, 2: 1, 3: 1, 4: 1},\n \"trip_distance\": {0: 1.59, 1: 3.3, 2: 1.8, 3: 0.5, 4: 3.0},\n \"pickup_longitude\": {\n 0: -73.993896484375,\n 1: -74.00164794921875,\n 2: -73.96334075927734,\n 3: -74.00908660888672,\n 4: -73.97117614746094,\n },\n \"pickup_latitude\": {\n 0: 40.7501106262207,\n 1: 40.7242431640625,\n 2: 40.80278778076172,\n 3: 40.71381759643555,\n 4: 40.762428283691406,\n },\n \"RateCodeID\": {0: 1, 1: 1, 2: 1, 3: 1, 4: 1},\n \"store_and_fwd_flag\": {0: \"N\", 1: \"N\", 2: \"N\", 3: \"N\", 4: \"N\"},\n \"dropoff_longitude\": {\n 0: -73.97478485107422,\n 1: -73.99441528320312,\n 2: -73.95182037353516,\n 3: -74.00432586669923,\n 4: -74.00418090820312,\n },\n \"dropoff_latitude\": {\n 0: 40.75061798095703,\n 1: 40.75910949707031,\n 2: 40.82441329956055,\n 3: 40.71998596191406,\n 4: 40.742652893066406,\n },\n \"payment_type\": {0: 1, 1: 1, 2: 2, 3: 2, 4: 2},\n \"fare_amount\": {0: 12.0, 1: 14.5, 2: 9.5, 3: 3.5, 4: 15.0},\n \"extra\": {0: 1.0, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0.5},\n \"mta_tax\": {0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0.5},\n \"tip_amount\": {0: 3.25, 1: 2.0, 2: 0.0, 3: 0.0, 4: 0.0},\n \"tolls_amount\": {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0},\n \"improvement_surcharge\": {0: 0.3, 1: 0.3, 2: 0.3, 3: 0.3, 4: 0.3},\n \"total_amount\": {0: 17.05, 1: 17.8, 2: 10.8, 3: 4.8, 4: 16.3},\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata.sample_s3_with_yellow_tripdata.yield": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_with_yellow_tripdata.sample_s3_with_yellow_tripdata.yield", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 219, "span_ids": ["s3_with_yellow_tripdata"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture()\n@pytest.mark.slow\ndef s3_with_yellow_tripdata(s3):\n # ... other code\n sample = pd.DataFrame(data)\n df = sample.take(np.arange(5).repeat(10000))\n file = io.BytesIO()\n sfile = io.TextIOWrapper(file)\n df.to_csv(sfile, index=False)\n\n key = \"nyc-taxi/2015/yellow_tripdata_2015-01.csv\"\n client = boto3.client(\"s3\", endpoint_url=\"http://127.0.0.1:5555/\")\n client.put_object(Bucket=test_bucket_name, Key=key, Body=file)\n key = \"nyc-taxi/2014/yellow_tripdata_2014-{:0>2d}.csv\"\n\n for i in range(1, 13):\n file.seek(0)\n client.put_object(Bucket=test_bucket_name, Key=key.format(i), Body=file)\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_get_s3_test_get_s3.None_1.DaskS3FileSystem_secret_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_get_s3_test_get_s3.None_1.DaskS3FileSystem_secret_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 222, "end_line": 234, "span_ids": ["test_get_s3"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_s3():\n s3 = DaskS3FileSystem(key=\"key\", secret=\"secret\")\n assert s3.key == \"key\"\n assert s3.secret == \"secret\"\n\n s3 = DaskS3FileSystem(username=\"key\", password=\"secret\")\n assert s3.key == \"key\"\n assert s3.secret == \"secret\"\n\n with pytest.raises(KeyError):\n DaskS3FileSystem(key=\"key\", username=\"key\")\n with pytest.raises(KeyError):\n DaskS3FileSystem(secret=\"key\", password=\"key\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_write_test_open_files_write.assert_set_list_files_val": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_write_test_open_files_write.assert_set_list_files_val", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 237, "end_line": 247, "span_ids": ["test_open_files_write"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_open_files_write(s3, s3so):\n paths = [\"s3://\" + test_bucket_name + \"/more/\" + f for f in files]\n fils = open_files(paths, mode=\"wb\", **s3so)\n for fil, data in zip(fils, files.values()):\n with fil as f:\n f.write(data)\n sample, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/more/test/accounts.*\", **s3so\n )\n results = compute(*concat(values))\n assert set(list(files.values())) == set(results)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_test_read_bytes.assert_set_results_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_test_read_bytes.assert_set_results_se", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 250, "end_line": 262, "span_ids": ["test_read_bytes"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes(s3, s3so):\n sample, values = read_bytes(\"s3://\" + test_bucket_name + \"/test/accounts.*\", **s3so)\n assert isinstance(sample, bytes)\n assert sample[:5] == files[sorted(files)[0]][:5]\n assert sample.endswith(b\"\\n\")\n\n assert isinstance(values, (list, tuple))\n assert isinstance(values[0], (list, tuple))\n assert hasattr(values[0][0], \"dask\")\n\n assert sum(map(len, values)) >= len(files)\n results = compute(*concat(values))\n assert set(results) == set(files.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_sample_delimiter_test_read_bytes_sample_delimiter.None_5", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 262, "end_line": 283, "span_ids": ["test_read_bytes_sample_delimiter"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_sample_delimiter(s3, s3so):\n sample, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts.*\",\n sample=80,\n delimiter=b\"\\n\",\n **s3so,\n )\n assert sample.endswith(b\"\\n\")\n sample, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts.1.json\",\n sample=80,\n delimiter=b\"\\n\",\n **s3so,\n )\n assert sample.endswith(b\"\\n\")\n sample, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts.1.json\",\n sample=2,\n delimiter=b\"\\n\",\n **s3so,\n )\n assert sample.endswith(b\"\\n\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_non_existing_glob_test_read_bytes_blocksize_on_large_data.assert_len_L_12": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_non_existing_glob_test_read_bytes_blocksize_on_large_data.assert_len_L_12", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 286, "end_line": 313, "span_ids": ["test_read_bytes_blocksize_none", "test_read_bytes_non_existing_glob", "test_read_bytes_blocksize_on_large_data"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_bytes_non_existing_glob(s3, s3so):\n with pytest.raises(IOError):\n read_bytes(\"s3://\" + test_bucket_name + \"/non-existing/*\", **s3so)\n\n\ndef test_read_bytes_blocksize_none(s3, s3so):\n _, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts.*\", blocksize=None, **s3so\n )\n assert sum(map(len, values)) == len(files)\n\n\ndef test_read_bytes_blocksize_on_large_data(s3_with_yellow_tripdata, s3so):\n _, L = read_bytes(\n f\"s3://{test_bucket_name}/nyc-taxi/2015/yellow_tripdata_2015-01.csv\",\n blocksize=None,\n anon=True,\n **s3so,\n )\n assert len(L) == 1\n\n _, L = read_bytes(\n f\"s3://{test_bucket_name}/nyc-taxi/2014/*.csv\",\n blocksize=None,\n anon=True,\n **s3so,\n )\n assert len(L) == 12", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_block_test_read_bytes_block.assert_set_ourlines_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_block_test_read_bytes_block.assert_set_ourlines_s", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 316, "end_line": 330, "span_ids": ["test_read_bytes_block"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"blocksize\", [5, 15, 45, 1500])\ndef test_read_bytes_block(s3, blocksize, s3so):\n _, vals = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/account*\", blocksize=blocksize, **s3so\n )\n assert list(map(len, vals)) == [\n max((len(v) // blocksize), 1) for v in files.values()\n ]\n\n results = compute(*concat(vals))\n assert sum(len(r) for r in results) == sum(len(v) for v in files.values())\n\n ourlines = b\"\".join(results).split(b\"\\n\")\n testlines = b\"\".join(files.values()).split(b\"\\n\")\n assert set(ourlines) == set(testlines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_delimited_test_read_bytes_delimited.assert_ours_test": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_read_bytes_delimited_test_read_bytes_delimited.assert_ours_test", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 333, "end_line": 370, "span_ids": ["test_read_bytes_delimited"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"blocksize\", [5, 15, 45, 1500])\ndef test_read_bytes_delimited(s3, blocksize, s3so):\n _, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts*\",\n blocksize=blocksize,\n delimiter=b\"\\n\",\n **s3so,\n )\n _, values2 = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts*\",\n blocksize=blocksize,\n delimiter=b\"foo\",\n **s3so,\n )\n assert [a.key for a in concat(values)] != [b.key for b in concat(values2)]\n\n results = compute(*concat(values))\n res = [r for r in results if r]\n assert all(r.endswith(b\"\\n\") for r in res)\n ourlines = b\"\".join(res).split(b\"\\n\")\n testlines = b\"\".join(files[k] for k in sorted(files)).split(b\"\\n\")\n assert ourlines == testlines\n\n # delimiter not at the end\n d = b\"}\"\n _, values = read_bytes(\n \"s3://\" + test_bucket_name + \"/test/accounts*\",\n blocksize=blocksize,\n delimiter=d,\n **s3so,\n )\n results = compute(*concat(values))\n res = [r for r in results if r]\n # All should end in } except EOF\n assert sum(r.endswith(b\"}\") for r in res) == len(res) - 2\n ours = b\"\".join(res)\n test = b\"\".join(files[v] for v in sorted(files))\n assert ours == test", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_compression_test_compression.with_s3_context_compress.assert_b_join_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_compression_test_compression.with_s3_context_compress.assert_b_join_results_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 378, "end_line": 406, "span_ids": ["test_compression"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"fmt,blocksize\",\n [(fmt, None) for fmt in compr] + [(fmt, 10) for fmt in compr], # type: ignore\n)\ndef test_compression(s3, fmt, blocksize, s3so):\n if fmt not in compress:\n pytest.skip(\"compression function not provided\")\n s3._cache.clear()\n with s3_context(\"compress\", valmap(compress[fmt], files)):\n if fmt and blocksize:\n with pytest.raises(ValueError):\n read_bytes(\n \"s3://compress/test/accounts.*\",\n compression=fmt,\n blocksize=blocksize,\n **s3so,\n )\n return\n sample, values = read_bytes(\n \"s3://compress/test/accounts.*\",\n compression=fmt,\n blocksize=blocksize,\n **s3so,\n )\n assert sample.startswith(files[sorted(files)[0]][:10])\n assert sample.endswith(b\"\\n\")\n\n results = compute(*concat(values))\n assert b\"\".join(results) == b\"\".join([files[k] for k in sorted(files)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_double.lambda_x_x_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_open_files_double.lambda_x_x_2", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 409, "end_line": 422, "span_ids": ["test_open_files", "impl:22"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"mode\", [\"rt\", \"rb\"])\ndef test_open_files(s3, mode, s3so):\n myfiles = open_files(\n \"s3://\" + test_bucket_name + \"/test/accounts.*\", mode=mode, **s3so\n )\n assert len(myfiles) == len(files)\n for lazy_file, path in zip(myfiles, sorted(files)):\n with lazy_file as f:\n data = f.read()\n sol = files[path]\n assert data == sol if mode == \"rb\" else sol.decode()\n\n\ndouble = lambda x: x * 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_modification_time_read_bytes_test_modification_time_read_bytes.assert_aa__key_for_aa_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_modification_time_read_bytes_test_modification_time_read_bytes.assert_aa__key_for_aa_in", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 420, "end_line": 430, "span_ids": ["test_modification_time_read_bytes"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_modification_time_read_bytes(s3, s3so):\n with s3_context(\"compress\", files):\n _, a = read_bytes(\"s3://compress/test/accounts.*\", anon=True, **s3so)\n _, b = read_bytes(\"s3://compress/test/accounts.*\", anon=True, **s3so)\n\n assert [aa._key for aa in concat(a)] == [bb._key for bb in concat(b)]\n\n with s3_context(\"compress\", valmap(double, files)):\n _, c = read_bytes(\"s3://compress/test/accounts.*\", anon=True, **s3so)\n\n assert [aa._key for aa in concat(a)] != [cc._key for cc in concat(c)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_wstoragepars_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_wstoragepars_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 605, "end_line": 628, "span_ids": ["test_parquet_wstoragepars"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parquet_wstoragepars(s3, s3so):\n dd = pytest.importorskip(\"dask.dataframe\")\n pytest.importorskip(\"fastparquet\")\n pd = pytest.importorskip(\"pandas\")\n np = pytest.importorskip(\"numpy\")\n\n url = \"s3://%s/test.parquet\" % test_bucket_name\n\n data = pd.DataFrame({\"i32\": np.array([0, 5, 2, 5])})\n df = dd.from_pandas(data, chunksize=500)\n df.to_parquet(url, write_index=False, storage_options=s3so)\n\n dd.read_parquet(url, storage_options=dict(**s3so, **{\"default_fill_cache\": False}))\n assert s3.current().default_fill_cache is False\n dd.read_parquet(url, storage_options=dict(**s3so, **{\"default_fill_cache\": True}))\n assert s3.current().default_fill_cache is True\n\n dd.read_parquet(\n url, storage_options=dict(**s3so, **{\"default_block_size\": 2**20})\n )\n assert s3.current().default_block_size == 2**20\n with s3.current().open(url + \"/_metadata\") as f:\n assert f.blocksize == 2**20", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_Cache._posttask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_Cache._posttask_", "embedding": null, "metadata": {"file_path": "dask/cache.py", "file_name": "cache.py", "file_type": "text/x-python", "category": "implementation", "start_line": 54, "end_line": 66, "span_ids": ["Cache._finish", "Cache._posttask"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Cache(Callback):\n\n def _posttask(self, key, value, dsk, state, id):\n duration = default_timer() - self.starttimes[key]\n deps = state[\"dependencies\"][key]\n if deps:\n duration += max(self.durations.get(k, 0) for k in deps)\n self.durations[key] = duration\n nb = self._nbytes(value) + overhead + sys.getsizeof(key) * 4\n self.cache.put(key, value, cost=duration / nb / 1e9, nbytes=nb)\n\n def _finish(self, dsk, state, errored):\n self.starttimes.clear()\n self.durations.clear()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_unpack_callbacks_normalize_callback.if_isinstance_cb_Callbac.else_.raise_TypeError_Callback": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_unpack_callbacks_normalize_callback.if_isinstance_cb_Callbac.else_.raise_TypeError_Callback", "embedding": null, "metadata": {"file_path": "dask/callbacks.py", "file_name": "callbacks.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 113, "span_ids": ["normalize_callback", "unpack_callbacks", "local_callbacks"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unpack_callbacks(cbs):\n \"\"\"Take an iterable of callbacks, return a list of each callback.\"\"\"\n if cbs:\n return [[i for i in f if i] for f in zip(*cbs)]\n else:\n return [(), (), (), (), ()]\n\n\n@contextmanager\ndef local_callbacks(callbacks=None):\n \"\"\"Allows callbacks to work with nested schedulers.\n\n Callbacks will only be used by the first started scheduler they encounter.\n This means that only the outermost scheduler will use global callbacks.\"\"\"\n global_callbacks = callbacks is None\n if global_callbacks:\n callbacks, Callback.active = Callback.active, set()\n try:\n yield callbacks or ()\n finally:\n if global_callbacks:\n Callback.active = callbacks\n\n\ndef normalize_callback(cb):\n \"\"\"Normalizes a callback to a tuple\"\"\"\n if isinstance(cb, Callback):\n return cb._callback\n elif isinstance(cb, tuple):\n return cb\n else:\n raise TypeError(\"Callbacks must be either `Callback` or `tuple`\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_add_callbacks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_add_callbacks_", "embedding": null, "metadata": {"file_path": "dask/callbacks.py", "file_name": "callbacks.py", "file_type": "text/x-python", "category": "implementation", "start_line": 116, "end_line": 142, "span_ids": ["add_callbacks.__init__", "add_callbacks.__exit__", "add_callbacks.__enter__", "add_callbacks"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class add_callbacks:\n \"\"\"Context manager for callbacks.\n\n Takes several callbacks and applies them only in the enclosed context.\n Callbacks can either be represented as a ``Callback`` object, or as a tuple\n of length 4.\n\n Examples\n --------\n >>> def pretask(key, dsk, state):\n ... print(\"Now running {0}\").format(key)\n >>> callbacks = (None, pretask, None, None)\n >>> with add_callbacks(callbacks): # doctest: +SKIP\n ... res.compute()\n \"\"\"\n\n def __init__(self, *callbacks):\n self.callbacks = [normalize_callback(c) for c in callbacks]\n Callback.active.update(self.callbacks)\n\n def __enter__(self):\n return\n\n def __exit__(self, type, value, traceback):\n for c in self.callbacks:\n Callback.active.discard(c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_update_update.return.old": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_update_update.return.old", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 85, "end_line": 125, "span_ids": ["update"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def update(old: dict, new: Mapping, priority: Literal[\"old\", \"new\"] = \"new\") -> dict:\n \"\"\"Update a nested dictionary with values from another\n\n This is like dict.update except that it smoothly merges nested values\n\n This operates in-place and modifies old\n\n Parameters\n ----------\n priority: string {'old', 'new'}\n If new (default) then the new dictionary has preference.\n Otherwise the old dictionary does.\n\n Examples\n --------\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'x': 2, 'y': {'b': 3}}\n >>> update(a, b) # doctest: +SKIP\n {'x': 2, 'y': {'a': 2, 'b': 3}}\n\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'x': 2, 'y': {'b': 3}}\n >>> update(a, b, priority='old') # doctest: +SKIP\n {'x': 1, 'y': {'a': 2, 'b': 3}}\n\n See Also\n --------\n dask.config.merge\n \"\"\"\n for k, v in new.items():\n k = canonical_name(k, old)\n\n if isinstance(v, Mapping):\n if k not in old or old[k] is None:\n old[k] = {}\n update(old[k], v, priority=priority)\n else:\n if priority == \"new\" or k not in old:\n old[k] = v\n\n return old", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_merge_merge.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_merge_merge.return.result", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 147, "span_ids": ["merge"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge(*dicts: Mapping) -> dict:\n \"\"\"Update a sequence of nested dictionaries\n\n This prefers the values in the latter dictionaries to those in the former\n\n Examples\n --------\n >>> a = {'x': 1, 'y': {'a': 2}}\n >>> b = {'y': {'b': 3}}\n >>> merge(a, b) # doctest: +SKIP\n {'x': 1, 'y': {'a': 2, 'b': 3}}\n\n See Also\n --------\n dask.config.update\n \"\"\"\n result: dict = {}\n for d in dicts:\n update(result, d)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_yaml_collect_yaml.return.configs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_yaml_collect_yaml.return.configs", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 150, "end_line": 188, "span_ids": ["collect_yaml"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def collect_yaml(paths: Sequence[str] = paths) -> list[dict]:\n \"\"\"Collect configuration from yaml files\n\n This searches through a list of paths, expands to find all yaml or json\n files, and then parses each file.\n \"\"\"\n # Find all paths\n file_paths = []\n for path in paths:\n if os.path.exists(path):\n if os.path.isdir(path):\n try:\n file_paths.extend(\n sorted(\n os.path.join(path, p)\n for p in os.listdir(path)\n if os.path.splitext(p)[1].lower()\n in (\".json\", \".yaml\", \".yml\")\n )\n )\n except OSError:\n # Ignore permission errors\n pass\n else:\n file_paths.append(path)\n\n configs = []\n\n # Parse yaml files\n for path in file_paths:\n try:\n with open(path) as f:\n data = yaml.safe_load(f.read()) or {}\n configs.append(data)\n except OSError:\n # Ignore permission errors\n pass\n\n return configs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_env_collect_env.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_env_collect_env.return.result", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 191, "end_line": 224, "span_ids": ["collect_env"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def collect_env(env: Mapping[str, str] | None = None) -> dict:\n \"\"\"Collect config from environment variables\n\n This grabs environment variables of the form \"DASK_FOO__BAR_BAZ=123\" and\n turns these into config variables of the form ``{\"foo\": {\"bar-baz\": 123}}``\n It transforms the key and value in the following way:\n\n - Lower-cases the key text\n - Treats ``__`` (double-underscore) as nested access\n - Calls ``ast.literal_eval`` on the value\n\n Any serialized config passed via ``DASK_INTERNAL_INHERIT_CONFIG`` is also set here.\n\n \"\"\"\n\n if env is None:\n env = os.environ\n\n if \"DASK_INTERNAL_INHERIT_CONFIG\" in env:\n d = deserialize(env[\"DASK_INTERNAL_INHERIT_CONFIG\"])\n else:\n d = {}\n\n for name, value in env.items():\n if name.startswith(\"DASK_\"):\n varname = name[5:].lower().replace(\"__\", \".\")\n try:\n d[varname] = ast.literal_eval(value)\n except (SyntaxError, ValueError):\n d[varname] = value\n\n result: dict = {}\n set(d, config=result)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set_set.__exit__.for_op_path_value_in_re.if_op_replace_.else_insert.for_key_in_path_1_.else_.d_pop_path_1_None_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set_set.__exit__.for_op_path_value_in_re.if_op_replace_.else_insert.for_key_in_path_1_.else_.d_pop_path_1_None_", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 288, "end_line": 366, "span_ids": ["set.__enter__", "set.__init__", "set", "set.__exit__"], "tokens": 525}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class set:\n \"\"\"Temporarily set configuration values within a context manager\n\n Parameters\n ----------\n arg : mapping or None, optional\n A mapping of configuration key-value pairs to set.\n **kwargs :\n Additional key-value pairs to set. If ``arg`` is provided, values set\n in ``arg`` will be applied before those in ``kwargs``.\n Double-underscores (``__``) in keyword arguments will be replaced with\n ``.``, allowing nested values to be easily set.\n\n Examples\n --------\n >>> import dask\n\n Set ``'foo.bar'`` in a context, by providing a mapping.\n\n >>> with dask.config.set({'foo.bar': 123}):\n ... pass\n\n Set ``'foo.bar'`` in a context, by providing a keyword argument.\n\n >>> with dask.config.set(foo__bar=123):\n ... pass\n\n Set ``'foo.bar'`` globally.\n\n >>> dask.config.set(foo__bar=123) # doctest: +SKIP\n\n See Also\n --------\n dask.config.get\n \"\"\"\n\n config: dict\n # [(op, path, value), ...]\n _record: list[tuple[Literal[\"insert\", \"replace\"], tuple[str, ...], Any]]\n\n def __init__(\n self,\n arg: Mapping = None,\n config: dict = config,\n lock: threading.Lock = config_lock,\n **kwargs,\n ):\n with lock:\n self.config = config\n self._record = []\n\n if arg is not None:\n for key, value in arg.items():\n key = check_deprecations(key)\n self._assign(key.split(\".\"), value, config)\n if kwargs:\n for key, value in kwargs.items():\n key = key.replace(\"__\", \".\")\n key = check_deprecations(key)\n self._assign(key.split(\".\"), value, config)\n\n def __enter__(self):\n return self.config\n\n def __exit__(self, type, value, traceback):\n for op, path, value in reversed(self._record):\n d = self.config\n if op == \"replace\":\n for key in path[:-1]:\n d = d.setdefault(key, {})\n d[path[-1]] = value\n else: # insert\n for key in path[:-1]:\n try:\n d = d[key]\n except KeyError:\n break\n else:\n d.pop(path[-1], None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set._assign_set._assign.if_len_keys_1_.else_.self__assign_keys_1_va": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_set._assign_set._assign.if_len_keys_1_.else_.self__assign_keys_1_va", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 368, "end_line": 409, "span_ids": ["set._assign"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class set:\n\n def _assign(\n self,\n keys: Sequence[str],\n value: Any,\n d: dict,\n path: tuple[str, ...] = (),\n record: bool = True,\n ) -> None:\n \"\"\"Assign value into a nested configuration dictionary\n\n Parameters\n ----------\n keys : Sequence[str]\n The nested path of keys to assign the value.\n value : object\n d : dict\n The part of the nested dictionary into which we want to assign the\n value\n path : tuple[str], optional\n The path history up to this point.\n record : bool, optional\n Whether this operation needs to be recorded to allow for rollback.\n \"\"\"\n key = canonical_name(keys[0], d)\n\n path = path + (key,)\n\n if len(keys) == 1:\n if record:\n if key in d:\n self._record.append((\"replace\", path, d[key]))\n else:\n self._record.append((\"insert\", path, None))\n d[key] = value\n else:\n if key not in d:\n if record:\n self._record.append((\"insert\", path, None))\n d[key] = {}\n # No need to record subsequent operations after an insert\n record = False\n self._assign(keys[1:], value, d[key], path, record=record)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_collect.return.merge_configs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_collect_collect.return.merge_configs_", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 412, "end_line": 438, "span_ids": ["collect"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def collect(paths: list[str] = paths, env: Mapping[str, str] = None) -> dict:\n \"\"\"\n Collect configuration from paths and environment variables\n\n Parameters\n ----------\n paths : list[str]\n A list of paths to search for yaml config files\n\n env : Mapping[str, str]\n The system environment variables\n\n Returns\n -------\n config: dict\n\n See Also\n --------\n dask.config.refresh: collect configuration and update into primary config\n \"\"\"\n if env is None:\n env = os.environ\n\n configs = collect_yaml(paths=paths)\n configs.append(collect_env(env=env))\n\n return merge(*configs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_refresh_refresh.update_config_collect_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_refresh_refresh.update_config_collect_", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 441, "end_line": 472, "span_ids": ["refresh"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def refresh(\n config: dict = config, defaults: list[Mapping] = defaults, **kwargs\n) -> None:\n \"\"\"\n Update configuration by re-reading yaml files and env variables\n\n This mutates the global dask.config.config, or the config parameter if\n passed in.\n\n This goes through the following stages:\n\n 1. Clearing out all old configuration\n 2. Updating from the stored defaults from downstream libraries\n (see update_defaults)\n 3. Updating from yaml files and environment variables\n\n Note that some functionality only checks configuration once at startup and\n may not change behavior, even if configuration changes. It is recommended\n to restart your python process if convenient to ensure that new\n configuration changes take place.\n\n See Also\n --------\n dask.config.collect: for parameters\n dask.config.update_defaults\n \"\"\"\n config.clear()\n\n for d in defaults:\n update(config, d, priority=\"old\")\n\n update(config, collect(**kwargs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_get_get.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_get_get.return.result", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 475, "end_line": 524, "span_ids": ["get"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get(\n key: str,\n default: Any = no_default,\n config: dict = config,\n override_with: Any | None = None,\n) -> Any:\n \"\"\"\n Get elements from global config\n\n If ``override_with`` is not None this value will be passed straight back.\n Useful for getting kwarg defaults from Dask config.\n\n Use '.' for nested access\n\n Examples\n --------\n >>> from dask import config\n >>> config.get('foo') # doctest: +SKIP\n {'x': 1, 'y': 2}\n\n >>> config.get('foo.x') # doctest: +SKIP\n 1\n\n >>> config.get('foo.x.y', default=123) # doctest: +SKIP\n 123\n\n >>> config.get('foo.y', override_with=None) # doctest: +SKIP\n 2\n\n >>> config.get('foo.y', override_with=3) # doctest: +SKIP\n 3\n\n See Also\n --------\n dask.config.set\n \"\"\"\n if override_with is not None:\n return override_with\n keys = key.split(\".\")\n result = config\n for k in keys:\n k = canonical_name(k, result)\n try:\n result = result[k]\n except (TypeError, IndexError, KeyError):\n if default is not no_default:\n return default\n else:\n raise\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/context.py___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/context.py___", "embedding": null, "metadata": {"file_path": "dask/context.py", "file_name": "context.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 67, "span_ids": ["GlobalMethod", "globalmethod", "GlobalMethod.__init__", "GlobalMethod.__get__", "docstring"], "tokens": 404}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nControl global computation context\n\"\"\"\nimport threading\nfrom functools import partial\n\nfrom . import config\n\n_globals = config.config\n\n\nthread_state = threading.local()\n\n\ndef globalmethod(default=None, key=None, falsey=None):\n \"\"\"Allow function to be taken over by globals\n\n This modifies a method so that occurrences of it may be taken over by\n functions registered in the global options. Can be used as a decorator or a\n function.\n\n Parameters\n ----------\n default : callable\n The default callable to use.\n key : str\n Key under which we register this function in the global parameters\n falsey : callable, None, optional\n A function to use if the option is falsey. If not provided, the default\n is used instead.\n\n Examples\n --------\n >>> import dask\n >>> class Foo:\n ... @globalmethod(key='bar', falsey=lambda: 3)\n ... def bar():\n ... return 1\n >>> f = Foo()\n >>> f.bar()\n 1\n >>> with dask.config.set(bar=lambda: 2):\n ... print(f.bar())\n 2\n >>> with dask.config.set(bar=False):\n ... print(f.bar())\n 3\n \"\"\"\n if default is None:\n return partial(globalmethod, key=key, falsey=falsey)\n return GlobalMethod(default=default, key=key, falsey=falsey)\n\n\nclass GlobalMethod:\n def __init__(self, default, key, falsey=None):\n self._default = default\n self._key = key\n self._falsey = falsey\n\n def __get__(self, instance, owner=None):\n if self._key in _globals:\n if _globals[self._key]:\n return _globals[self._key]\n elif self._falsey is not None:\n return self._falsey\n return self._default", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_from_collections_import_d_istask.return.type_x_is_tuple_and_x_an": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_from_collections_import_d_istask.return.type_x_is_tuple_and_x_an", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 40, "span_ids": ["imports", "ishashable", "istask"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import defaultdict\n\nfrom .utils_test import add, inc # noqa: F401\n\nno_default = \"__no_default__\"\n\n\ndef ishashable(x):\n \"\"\"Is x hashable?\n\n Examples\n --------\n\n >>> ishashable(1)\n True\n >>> ishashable([1])\n False\n \"\"\"\n try:\n hash(x)\n return True\n except TypeError:\n return False\n\n\ndef istask(x):\n \"\"\"Is x a runnable task?\n\n A task is a tuple with a callable first argument\n\n Examples\n --------\n\n >>> inc = lambda x: x + 1\n >>> istask((inc, 1))\n True\n >>> istask(1)\n False\n \"\"\"\n return type(x) is tuple and x and callable(x[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_has_tasks_lists_to_tuples.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_has_tasks_lists_to_tuples.return.res", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 43, "end_line": 81, "span_ids": ["lists_to_tuples", "has_tasks", "preorder_traversal"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def has_tasks(dsk, x):\n \"\"\"Whether ``x`` has anything to compute.\n\n Returns True if:\n - ``x`` is a task\n - ``x`` is a key in ``dsk``\n - ``x`` is a list that contains any tasks or keys\n \"\"\"\n if istask(x):\n return True\n try:\n if x in dsk:\n return True\n except Exception:\n pass\n if isinstance(x, list):\n for i in x:\n if has_tasks(dsk, i):\n return True\n return False\n\n\ndef preorder_traversal(task):\n \"\"\"A generator to preorder-traverse a task.\"\"\"\n\n for item in task:\n if istask(item):\n yield from preorder_traversal(item)\n elif isinstance(item, list):\n yield list\n yield from preorder_traversal(item)\n else:\n yield item\n\n\ndef lists_to_tuples(res, keys):\n if isinstance(keys, list):\n return tuple(lists_to_tuples(r, k) for r, k in zip(res, keys))\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__execute_task__execute_task.if_isinstance_arg_list_.else_.return.arg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__execute_task__execute_task.if_isinstance_arg_list_.else_.return.arg", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 86, "end_line": 127, "span_ids": ["_execute_task"], "tokens": 343}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _execute_task(arg, cache, dsk=None):\n \"\"\"Do the actual work of collecting data and executing a function\n\n Examples\n --------\n\n >>> cache = {'x': 1, 'y': 2}\n\n Compute tasks against a cache\n >>> _execute_task((add, 'x', 1), cache) # Compute task in naive manner\n 2\n >>> _execute_task((add, (inc, 'x'), 1), cache) # Support nested computation\n 3\n\n Also grab data from cache\n >>> _execute_task('x', cache)\n 1\n\n Support nested lists\n >>> list(_execute_task(['x', 'y'], cache))\n [1, 2]\n\n >>> list(map(list, _execute_task([['x', 'y'], ['y', 'x']], cache)))\n [[1, 2], [2, 1]]\n\n >>> _execute_task('foo', cache) # Passes through on non-keys\n 'foo'\n \"\"\"\n if isinstance(arg, list):\n return [_execute_task(a, cache) for a in arg]\n elif istask(arg):\n func, args = arg[0], arg[1:]\n # Note: Don't assign the subtask results to a variable. numpy detects\n # temporaries by their reference count and can execute certain\n # operations in-place.\n return func(*(_execute_task(a, cache) for a in args))\n elif not ishashable(arg):\n return arg\n elif arg in cache:\n return cache[arg]\n else:\n return arg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_get.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_get.return.result", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 154, "span_ids": ["get"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get(dsk, out, cache=None):\n \"\"\"Get value from Dask\n\n Examples\n --------\n\n >>> inc = lambda x: x + 1\n >>> d = {'x': 1, 'y': (inc, 'x')}\n\n >>> get(d, 'x')\n 1\n >>> get(d, 'y')\n 2\n \"\"\"\n for k in flatten(out) if isinstance(out, list) else [out]:\n if k not in dsk:\n raise KeyError(f\"{k} is not a key in the graph\")\n if cache is None:\n cache = {}\n for key in toposort(dsk):\n task = dsk[key]\n result = _execute_task(task, cache)\n cache[key] = result\n result = _execute_task(out, cache)\n if isinstance(out, list):\n result = lists_to_tuples(result, out)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_keys_in_tasks_keys_in_tasks.return.ret_if_as_list_else_set_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_keys_in_tasks_keys_in_tasks.return.ret_if_as_list_else_set_r", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 159, "end_line": 191, "span_ids": ["keys_in_tasks"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def keys_in_tasks(keys, tasks, as_list=False):\n \"\"\"Returns the keys in `keys` that are also in `tasks`\n\n Examples\n --------\n >>> dsk = {'x': 1,\n ... 'y': (inc, 'x'),\n ... 'z': (add, 'x', 'y'),\n ... 'w': (inc, 'z'),\n ... 'a': (add, (inc, 'x'), 1)}\n\n >>> keys_in_tasks(dsk, ['x', 'y', 'j']) # doctest: +SKIP\n {'x', 'y'}\n \"\"\"\n ret = []\n while tasks:\n work = []\n for w in tasks:\n typ = type(w)\n if typ is tuple and w and callable(w[0]): # istask(w)\n work.extend(w[1:])\n elif typ is list:\n work.extend(w)\n elif typ is dict:\n work.extend(w.values())\n else:\n try:\n if w in keys:\n ret.append(w)\n except TypeError: # not hashable\n pass\n tasks = work\n return ret if as_list else set(ret)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_dependencies_get_dependencies.return.keys_in_tasks_dsk_arg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_dependencies_get_dependencies.return.keys_in_tasks_dsk_arg_", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 194, "end_line": 230, "span_ids": ["get_dependencies"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_dependencies(dsk, key=None, task=no_default, as_list=False):\n \"\"\"Get the immediate tasks on which this task depends\n\n Examples\n --------\n >>> dsk = {'x': 1,\n ... 'y': (inc, 'x'),\n ... 'z': (add, 'x', 'y'),\n ... 'w': (inc, 'z'),\n ... 'a': (add, (inc, 'x'), 1)}\n\n >>> get_dependencies(dsk, 'x')\n set()\n\n >>> get_dependencies(dsk, 'y')\n {'x'}\n\n >>> get_dependencies(dsk, 'z') # doctest: +SKIP\n {'x', 'y'}\n\n >>> get_dependencies(dsk, 'w') # Only direct dependencies\n {'z'}\n\n >>> get_dependencies(dsk, 'a') # Ignore non-keys\n {'x'}\n\n >>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly\n {'x'}\n \"\"\"\n if key is not None:\n arg = dsk[key]\n elif task is not no_default:\n arg = task\n else:\n raise ValueError(\"Provide either key or task\")\n\n return keys_in_tasks(dsk, [arg], as_list=as_list)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_deps_get_deps.return.dependencies_dependents": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_get_deps_get_deps.return.dependencies_dependents", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 245, "span_ids": ["get_deps"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_deps(dsk):\n \"\"\"Get dependencies and dependents from dask dask graph\n\n >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dependencies, dependents = get_deps(dsk)\n >>> dependencies\n {'a': set(), 'b': {'a'}, 'c': {'b'}}\n >>> dependents # doctest: +SKIP\n {'a': {'b'}, 'b': {'c'}, 'c': set()}\n \"\"\"\n dependencies = {k: get_dependencies(dsk, task=v) for k, v in dsk.items()}\n dependents = reverse_dict(dependencies)\n return dependencies, dependents", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_flatten_flatten.if_isinstance_seq_str_.else_.for_item_in_seq_.if_isinstance_item_conta.else_.yield_item": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_flatten_flatten.if_isinstance_seq_str_.else_.for_item_in_seq_.if_isinstance_item_conta.else_.yield_item", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 274, "end_line": 299, "span_ids": ["flatten"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def flatten(seq, container=list):\n \"\"\"\n\n >>> list(flatten([1]))\n [1]\n\n >>> list(flatten([[1, 2], [1, 2]]))\n [1, 2, 1, 2]\n\n >>> list(flatten([[[1], [2]], [[1], [2]]]))\n [1, 2, 1, 2]\n\n >>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples\n [(1, 2), (1, 2)]\n\n >>> list(flatten((1, 2, [3, 4]))) # support heterogeneous\n [1, 2, 3, 4]\n \"\"\"\n if isinstance(seq, str):\n yield seq\n else:\n for item in seq:\n if isinstance(item, container):\n yield from flatten(item, container=container)\n else:\n yield item", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_reverse_dict_reverse_dict.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_reverse_dict_reverse_dict.return.result", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 277, "end_line": 292, "span_ids": ["reverse_dict"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reverse_dict(d):\n \"\"\"\n\n >>> a, b, c = 'abc'\n >>> d = {a: [b, c], b: [c]}\n >>> reverse_dict(d) # doctest: +SKIP\n {'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}\n \"\"\"\n result = defaultdict(set)\n _add = set.add\n for k, vals in d.items():\n result[k]\n for val in vals:\n _add(result[val], k)\n result.default_factory = None\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_subs_subs.return.task_1_tuple_newargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_subs_subs.return.task_1_tuple_newargs_", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 320, "end_line": 354, "span_ids": ["subs"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def subs(task, key, val):\n \"\"\"Perform a substitution on a task\n\n Examples\n --------\n\n >>> subs((inc, 'x'), 'x', 1) # doctest: +ELLIPSIS\n (, 1)\n \"\"\"\n type_task = type(task)\n if not (type_task is tuple and task and callable(task[0])): # istask(task):\n try:\n if type_task is type(key) and task == key:\n return val\n except Exception:\n pass\n if type_task is list:\n return [subs(x, key, val) for x in task]\n return task\n newargs = []\n hash_key = {key}\n for arg in task[1:]:\n type_arg = type(arg)\n if type_arg is tuple and arg and callable(arg[0]): # istask(task):\n arg = subs(arg, key, val)\n elif type_arg is list:\n arg = [subs(x, key, val) for x in arg]\n else:\n try:\n if arg in hash_key: # Hash and equality match\n arg = val\n except TypeError: # not hashable\n pass\n newargs.append(arg)\n return task[:1] + tuple(newargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__toposort__toposort.return.ordered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py__toposort__toposort.return.ordered", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 357, "end_line": 423, "span_ids": ["_toposort"], "tokens": 491}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _toposort(dsk, keys=None, returncycle=False, dependencies=None):\n # Stack-based depth-first search traversal. This is based on Tarjan's\n # method for topological sorting (see wikipedia for pseudocode)\n if keys is None:\n keys = dsk\n elif not isinstance(keys, list):\n keys = [keys]\n if not returncycle:\n ordered = []\n\n # Nodes whose descendents have been completely explored.\n # These nodes are guaranteed to not be part of a cycle.\n completed = set()\n\n # All nodes that have been visited in the current traversal. Because\n # we are doing depth-first search, going \"deeper\" should never result\n # in visiting a node that has already been seen. The `seen` and\n # `completed` sets are mutually exclusive; it is okay to visit a node\n # that has already been added to `completed`.\n seen = set()\n\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, k) for k in dsk}\n\n for key in keys:\n if key in completed:\n continue\n nodes = [key]\n while nodes:\n # Keep current node on the stack until all descendants are visited\n cur = nodes[-1]\n if cur in completed:\n # Already fully traversed descendants of cur\n nodes.pop()\n continue\n seen.add(cur)\n\n # Add direct descendants of cur to nodes stack\n next_nodes = []\n for nxt in dependencies[cur]:\n if nxt not in completed:\n if nxt in seen:\n # Cycle detected!\n cycle = [nxt]\n while nodes[-1] != nxt:\n cycle.append(nodes.pop())\n cycle.append(nodes.pop())\n cycle.reverse()\n if returncycle:\n return cycle\n else:\n cycle = \"->\".join(str(x) for x in cycle)\n raise RuntimeError(\"Cycle detected in Dask: %s\" % cycle)\n next_nodes.append(nxt)\n\n if next_nodes:\n nodes.extend(next_nodes)\n else:\n # cur has no more descendants to explore, so we're done with it\n if not returncycle:\n ordered.append(cur)\n completed.add(cur)\n seen.remove(cur)\n nodes.pop()\n if returncycle:\n return []\n return ordered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_toposort_getcycle.return._toposort_d_keys_keys_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_toposort_getcycle.return._toposort_d_keys_keys_r", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 429, "end_line": 452, "span_ids": ["getcycle", "toposort"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def toposort(dsk, dependencies=None):\n \"\"\"Return a list of keys of dask sorted in topological order.\"\"\"\n return _toposort(dsk, dependencies=dependencies)\n\n\ndef getcycle(d, keys):\n \"\"\"Return a list of nodes that form a cycle if Dask is not a DAG.\n\n Returns an empty list if no cycle is found.\n\n ``keys`` may be a single key or list of keys.\n\n Examples\n --------\n\n >>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')}\n >>> getcycle(d, 'x')\n ['x', 'z', 'y', 'x']\n\n See Also\n --------\n isdag\n \"\"\"\n return _toposort(d, keys=keys, returncycle=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_isdag_isdag.return.not_getcycle_d_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_isdag_isdag.return.not_getcycle_d_keys_", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 427, "end_line": 445, "span_ids": ["isdag"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def isdag(d, keys):\n \"\"\"Does Dask form a directed acyclic graph when calculating keys?\n\n ``keys`` may be a single key or list of keys.\n\n Examples\n --------\n\n >>> inc = lambda x: x + 1\n >>> isdag({'x': 0, 'y': (inc, 'x')}, 'y')\n True\n >>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y')\n False\n\n See Also\n --------\n getcycle\n \"\"\"\n return not getcycle(d, keys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_literal_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/core.py_literal_", "embedding": null, "metadata": {"file_path": "dask/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 473, "end_line": 503, "span_ids": ["literal.__call__", "literal.__init__", "literal", "literal.__reduce__", "quote", "literal.__repr__"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class literal:\n \"\"\"A small serializable object to wrap literal values without copying\"\"\"\n\n __slots__ = (\"data\",)\n\n def __init__(self, data):\n self.data = data\n\n def __repr__(self):\n return \"literal\" % type(self.data).__name__\n\n def __reduce__(self):\n return (literal, (self.data,))\n\n def __call__(self):\n return self.data\n\n\ndef quote(x):\n \"\"\"Ensure that this value remains this value in a dask graph\n\n Some values in dask graph take on special meaning. Sometimes we want to\n ensure that our data is not interpreted but remains literal.\n\n >>> quote((add, 1, 2))\n (literal,)\n \"\"\"\n if istask(x) or type(x) is list or type(x) is dict:\n return (literal(x),)\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/__init__.py_try__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/__init__.py_try__", "embedding": null, "metadata": {"file_path": "dask/dataframe/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 63, "span_ids": ["impl"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "try:\n from ..base import compute\n from . import backends, dispatch, rolling\n from .core import (\n DataFrame,\n Index,\n Series,\n _Frame,\n map_partitions,\n repartition,\n to_datetime,\n to_timedelta,\n )\n from .groupby import Aggregation\n from .io import (\n demo,\n from_array,\n from_bcolz,\n from_dask_array,\n from_delayed,\n from_pandas,\n read_csv,\n read_fwf,\n read_hdf,\n read_json,\n read_sql,\n read_sql_query,\n read_sql_table,\n read_table,\n to_bag,\n to_csv,\n to_hdf,\n to_json,\n to_records,\n to_sql,\n )\n from .multi import concat, merge, merge_asof\n from .numeric import to_numeric\n from .optimize import optimize\n from .reshape import get_dummies, melt, pivot_table\n from .utils import assert_eq\n\n try:\n from .io import read_parquet, to_parquet\n except ImportError:\n pass\n try:\n from .io import read_orc, to_orc\n except ImportError:\n pass\n try:\n from .core import isna\n except ImportError:\n pass\nexcept ImportError as e:\n msg = (\n \"Dask dataframe requirements are not installed.\\n\\n\"\n \"Please either conda or pip install as follows:\\n\\n\"\n \" conda install dask # either conda install\\n\"\n ' python -m pip install \"dask[dataframe]\" --upgrade # or python -m pip install'\n )\n raise ImportError(msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_compat.py_string_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_compat.py_string_", "embedding": null, "metadata": {"file_path": "dask/dataframe/_compat.py", "file_name": "_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 85, "span_ids": ["assert_numpy_array_equal", "imports", "makeDateIndex", "makeMissingDataframe", "makeTimeDataFrame", "makeTimedeltaIndex", "makeTimeSeries", "assert_categorical_equal", "makeMixedDataFrame", "makeDataFrame"], "tokens": 735}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import string\n\nimport numpy as np\nimport pandas as pd\nfrom packaging.version import parse as parse_version\n\nPANDAS_VERSION = parse_version(pd.__version__)\nPANDAS_GT_104 = PANDAS_VERSION >= parse_version(\"1.0.4\")\nPANDAS_GT_110 = PANDAS_VERSION >= parse_version(\"1.1.0\")\nPANDAS_GT_120 = PANDAS_VERSION >= parse_version(\"1.2.0\")\nPANDAS_GT_121 = PANDAS_VERSION >= parse_version(\"1.2.1\")\nPANDAS_GT_130 = PANDAS_VERSION >= parse_version(\"1.3.0\")\nPANDAS_GT_131 = PANDAS_VERSION >= parse_version(\"1.3.1\")\nPANDAS_GT_133 = PANDAS_VERSION >= parse_version(\"1.3.3\")\nPANDAS_GT_140 = PANDAS_VERSION >= parse_version(\"1.4.0\")\n# FIXME: Using `.release` below as versions like `1.5.0.dev0+268.gbe8d1ec880`\n# are less than `1.5.0` with `packaging.version`. Update to use `parse_version(\"1.5.0\")`\n# below once `pandas=1.5.0` is released\nPANDAS_GT_150 = PANDAS_VERSION.release >= (1, 5, 0)\n\nimport pandas.testing as tm\n\n\ndef assert_categorical_equal(left, right, *args, **kwargs):\n tm.assert_extension_array_equal(left, right, *args, **kwargs)\n assert pd.api.types.is_categorical_dtype(\n left.dtype\n ), f\"{left} is not categorical dtype\"\n assert pd.api.types.is_categorical_dtype(\n right.dtype\n ), f\"{right} is not categorical dtype\"\n\n\ndef assert_numpy_array_equal(left, right):\n left_na = pd.isna(left)\n right_na = pd.isna(right)\n np.testing.assert_array_equal(left_na, right_na)\n\n left_valid = left[~left_na]\n right_valid = right[~right_na]\n np.testing.assert_array_equal(left_valid, right_valid)\n\n\ndef makeDataFrame():\n data = np.random.randn(30, 4)\n index = list(string.ascii_letters)[:30]\n return pd.DataFrame(data, index=index, columns=list(\"ABCD\"))\n\n\ndef makeTimeDataFrame():\n data = makeDataFrame()\n data.index = makeDateIndex()\n return data\n\n\ndef makeTimeSeries():\n return makeTimeDataFrame()[\"A\"]\n\n\ndef makeDateIndex(k=30, freq=\"B\"):\n return pd.date_range(\"2000\", periods=k, freq=freq)\n\n\ndef makeTimedeltaIndex(k=30, freq=\"D\"):\n return pd.timedelta_range(\"1 day\", periods=k, freq=freq)\n\n\ndef makeMissingDataframe():\n df = makeDataFrame()\n data = df.values\n data = np.where(data > 1, np.nan, data)\n return pd.DataFrame(data, index=df.index, columns=df.columns)\n\n\ndef makeMixedDataFrame():\n df = pd.DataFrame(\n {\n \"A\": [0.0, 1, 2, 3, 4],\n \"B\": [0.0, 1, 0, 1, 0],\n \"C\": [f\"foo{i}\" for i in range(5)],\n \"D\": pd.date_range(\"2009-01-01\", periods=5),\n }\n )\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_dtypes.py_pd_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/_dtypes.py_pd_", "embedding": null, "metadata": {"file_path": "dask/dataframe/_dtypes.py", "file_name": "_dtypes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 34, "span_ids": ["__4", "__5", "__2", "imports", "__3", "__1", "_"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\n\nfrom .extensions import make_array_nonempty, make_scalar\n\n\n@make_array_nonempty.register(pd.DatetimeTZDtype)\ndef _(dtype):\n return pd.array([pd.Timestamp(1), pd.NaT], dtype=dtype)\n\n\n@make_scalar.register(pd.DatetimeTZDtype)\ndef _(x):\n return pd.Timestamp(1, tz=x.tz, unit=x.unit)\n\n\n@make_array_nonempty.register(pd.StringDtype)\ndef _(dtype):\n return pd.array([\"a\", pd.NA], dtype=dtype)\n\n\n@make_scalar.register(str)\ndef _(x):\n return \"s\"\n\n\n@make_array_nonempty.register(pd.BooleanDtype)\ndef _(dtype):\n return pd.array([True, pd.NA], dtype=dtype)\n\n\n@make_scalar.register(bool)\ndef _(x):\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor._function_map_Accessor._function_map.return.self__series_map_partitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor._function_map_Accessor._function_map.return.self__series_map_partitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 59, "end_line": 75, "span_ids": ["Accessor._function_map"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Accessor:\n\n def _function_map(self, attr, *args, **kwargs):\n if \"meta\" in kwargs:\n meta = kwargs.pop(\"meta\")\n else:\n meta = self._delegate_method(\n self._series._meta_nonempty, self._accessor_name, attr, args, kwargs\n )\n token = f\"{self._accessor_name}-{attr}\"\n return self._series.map_partitions(\n self._delegate_method,\n self._accessor_name,\n attr,\n args,\n kwargs,\n meta=meta,\n token=token,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_from_collections_import_d__categorize_block.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_from_collections_import_d__categorize_block.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 42, "span_ids": ["imports", "_categorize_block"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import defaultdict\nfrom numbers import Integral\n\nimport pandas as pd\nfrom pandas.api.types import is_scalar\nfrom tlz import partition_all\n\nfrom ..base import compute_as_if_collection, tokenize\nfrom . import methods\nfrom .accessor import Accessor\nfrom .dispatch import ( # noqa: F401\n categorical_dtype,\n categorical_dtype_dispatch,\n is_categorical_dtype,\n)\nfrom .utils import clear_known_categories, has_known_categories\n\n\ndef _categorize_block(df, categories, index):\n \"\"\"Categorize a dataframe with given categories\n\n df: DataFrame\n categories: dict mapping column name to iterable of categories\n \"\"\"\n df = df.copy()\n for col, vals in categories.items():\n if is_categorical_dtype(df[col]):\n df[col] = df[col].cat.set_categories(vals)\n else:\n cat_dtype = categorical_dtype(meta=df[col], categories=vals, ordered=False)\n df[col] = df[col].astype(cat_dtype)\n if index is not None:\n if is_categorical_dtype(df.index):\n ind = df.index.set_categories(index)\n else:\n cat_dtype = categorical_dtype(\n meta=df.index, categories=index, ordered=False\n )\n ind = df.index.astype(dtype=cat_dtype)\n ind.name = df.index.name\n df.index = ind\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py__get_categories__get_categories_agg.return.res_res_ind_0_append_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py__get_categories__get_categories_agg.return.res_res_ind_0_append_re", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 73, "span_ids": ["_get_categories_agg", "_get_categories"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_categories(df, columns, index):\n res = {}\n for col in columns:\n x = df[col]\n if is_categorical_dtype(x):\n res[col] = x._constructor(x.cat.categories)\n else:\n res[col] = x.dropna().drop_duplicates()\n if index:\n if is_categorical_dtype(df.index):\n return res, df.index.categories\n return res, df.index.dropna().drop_duplicates()\n return res, None\n\n\ndef _get_categories_agg(parts):\n res = defaultdict(list)\n res_ind = []\n for p in parts:\n for k, v in p[0].items():\n res[k].append(v)\n res_ind.append(p[1])\n res = {\n k: methods.concat(v, ignore_index=True).drop_duplicates()\n for k, v in res.items()\n }\n if res_ind[0] is None:\n return res, None\n return res, res_ind[0].append(res_ind[1:]).drop_duplicates()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_categorize_categorize.return.df_map_partitions__catego": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_categorize_categorize.return.df_map_partitions__catego", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 76, "end_line": 153, "span_ids": ["categorize"], "tokens": 649}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def categorize(df, columns=None, index=None, split_every=None, **kwargs):\n \"\"\"Convert columns of the DataFrame to category dtype.\n\n Parameters\n ----------\n columns : list, optional\n A list of column names to convert to categoricals. By default any\n column with an object dtype is converted to a categorical, and any\n unknown categoricals are made known.\n index : bool, optional\n Whether to categorize the index. By default, object indices are\n converted to categorical, and unknown categorical indices are made\n known. Set True to always categorize the index, False to never.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used.\n Default is 16.\n kwargs\n Keyword arguments are passed on to compute.\n \"\"\"\n meta = df._meta\n if columns is None:\n columns = list(meta.select_dtypes([\"object\", \"category\"]).columns)\n elif is_scalar(columns):\n columns = [columns]\n\n # Filter out known categorical columns\n columns = [\n c\n for c in columns\n if not (is_categorical_dtype(meta[c]) and has_known_categories(meta[c]))\n ]\n\n if index is not False:\n if is_categorical_dtype(meta.index):\n index = not has_known_categories(meta.index)\n elif index is None:\n index = meta.index.dtype == object\n\n # Nothing to do\n if not len(columns) and index is False:\n return df\n\n if split_every is None:\n split_every = 16\n elif split_every is False:\n split_every = df.npartitions\n elif not isinstance(split_every, Integral) or split_every < 2:\n raise ValueError(\"split_every must be an integer >= 2\")\n\n token = tokenize(df, columns, index, split_every)\n a = \"get-categories-chunk-\" + token\n dsk = {\n (a, i): (_get_categories, key, columns, index)\n for (i, key) in enumerate(df.__dask_keys__())\n }\n\n prefix = \"get-categories-agg-\" + token\n k = df.npartitions\n depth = 0\n while k > split_every:\n b = prefix + str(depth)\n for part_i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(b, part_i)] = (_get_categories_agg, [(a, i) for i in inds])\n k = part_i + 1\n a = b\n depth += 1\n\n dsk[(prefix, 0)] = (_get_categories_agg, [(a, i) for i in range(k)])\n dsk.update(df.dask)\n\n # Compute the categories\n categories, index = compute_as_if_collection(\n df.__class__, dsk, (prefix, 0), **kwargs\n )\n\n # Categorize each partition\n return df.map_partitions(_categorize_block, categories, index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor_CategoricalAccessor.known.return.has_known_categories_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor_CategoricalAccessor.known.return.has_known_categories_self", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 156, "end_line": 195, "span_ids": ["CategoricalAccessor.known", "CategoricalAccessor"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CategoricalAccessor(Accessor):\n \"\"\"\n Accessor object for categorical properties of the Series values.\n\n Examples\n --------\n >>> s.cat.categories # doctest: +SKIP\n\n Notes\n -----\n Attributes that depend only on metadata are eager\n\n * categories\n * ordered\n\n Attributes depending on the entire dataset are lazy\n\n * codes\n * ...\n\n So `df.a.cat.categories` <=> `df.a._meta.cat.categories`\n So `df.a.cat.codes` <=> `df.a.map_partitions(lambda x: x.cat.codes)`\n \"\"\"\n\n _accessor_name = \"cat\"\n _accessor_methods = (\n \"add_categories\",\n \"as_ordered\",\n \"as_unordered\",\n \"remove_categories\",\n \"rename_categories\",\n \"reorder_categories\",\n \"set_categories\",\n )\n _accessor_properties = ()\n\n @property\n def known(self):\n \"\"\"Whether the categories are fully known\"\"\"\n return has_known_categories(self._series)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_known_CategoricalAccessor.as_known.return.self_set_categories_categ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_known_CategoricalAccessor.as_known.return.self_set_categories_categ", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 184, "end_line": 199, "span_ids": ["CategoricalAccessor.as_known"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CategoricalAccessor(Accessor):\n\n def as_known(self, **kwargs):\n \"\"\"Ensure the categories in this series are known.\n\n If the categories are known, this is a no-op. If unknown, the\n categories are computed, and a new series with known categories is\n returned.\n\n Parameters\n ----------\n kwargs\n Keywords to pass on to the call to `compute`.\n \"\"\"\n if self.known:\n return self._series\n categories = self._property_map(\"categories\").unique().compute(**kwargs)\n return self.set_categories(categories.values)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_unknown_CategoricalAccessor.codes.return.self__property_map_codes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.as_unknown_CategoricalAccessor.codes.return.self__property_map_codes", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 214, "end_line": 253, "span_ids": ["CategoricalAccessor.ordered", "CategoricalAccessor.codes", "CategoricalAccessor.categories", "CategoricalAccessor.as_unknown"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CategoricalAccessor(Accessor):\n\n def as_unknown(self):\n \"\"\"Ensure the categories in this series are unknown\"\"\"\n if not self.known:\n return self._series\n out = self._series.copy()\n out._meta = clear_known_categories(out._meta)\n return out\n\n @property\n def ordered(self):\n \"\"\"Whether the categories have an ordered relationship\"\"\"\n return self._delegate_property(self._series._meta, \"cat\", \"ordered\")\n\n @property\n def categories(self):\n \"\"\"The categories of this categorical.\n\n If categories are unknown, an error is raised\"\"\"\n if not self.known:\n msg = (\n \"`df.column.cat.categories` with unknown categories is not \"\n \"supported. Please use `column.cat.as_known()` or \"\n \"`df.categorize()` beforehand to ensure known categories\"\n )\n raise NotImplementedError(msg)\n return self._delegate_property(self._series._meta, \"cat\", \"categories\")\n\n @property\n def codes(self):\n \"\"\"The codes of this categorical.\n\n If categories are unknown, an error is raised\"\"\"\n if not self.known:\n msg = (\n \"`df.column.cat.codes` with unknown categories is not \"\n \"supported. Please use `column.cat.as_known()` or \"\n \"`df.categorize()` beforehand to ensure known categories\"\n )\n raise NotImplementedError(msg)\n return self._property_map(\"codes\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.remove_unused_categories_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/categorical.py_CategoricalAccessor.remove_unused_categories_", "embedding": null, "metadata": {"file_path": "dask/dataframe/categorical.py", "file_name": "categorical.py", "file_type": "text/x-python", "category": "implementation", "start_line": 242, "end_line": 277, "span_ids": ["CategoricalAccessor.remove_unused_categories"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CategoricalAccessor(Accessor):\n\n def remove_unused_categories(self):\n \"\"\"\n Removes categories which are not used\n\n Notes\n -----\n This method requires a full scan of the data to compute the\n unique values, which can be expensive.\n \"\"\"\n # get the set of used categories\n present = self._series.dropna().unique()\n present = pd.Index(present.compute())\n\n if isinstance(self._series._meta, pd.CategoricalIndex):\n meta_cat = self._series._meta\n else:\n meta_cat = self._series._meta.cat\n\n # Reorder to keep cat:code relationship, filtering unused (-1)\n ordered, mask = present.reindex(meta_cat.categories)\n if mask is None:\n # PANDAS-23963: old and new categories match.\n return self._series\n\n new_categories = ordered[mask != -1]\n meta = meta_cat.set_categories(new_categories, ordered=meta_cat.ordered)\n return self._series.map_partitions(\n self._delegate_method,\n \"cat\",\n \"set_categories\",\n (),\n {\"new_categories\": new_categories},\n meta=meta,\n token=\"cat-set_categories\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__concat_finalize.return._concat_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__concat_finalize.return._concat_results_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 83, "end_line": 106, "span_ids": ["finalize", "_concat"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _concat(args, ignore_index=False):\n if not args:\n return args\n if isinstance(first(core.flatten(args)), np.ndarray):\n return da.core.concatenate3(args)\n if not has_parallel_type(args[0]):\n try:\n return pd.Series(args)\n except Exception:\n return args\n # We filter out empty partitions here because pandas frequently has\n # inconsistent dtypes in results between empty and non-empty frames.\n # Ideally this would be handled locally for each operation, but in practice\n # this seems easier. TODO: don't do this.\n args2 = [i for i in args if len(i)]\n return (\n args[0]\n if not args2\n else methods.concat(args2, uniform=True, ignore_index=ignore_index)\n )\n\n\ndef finalize(results):\n return _concat(results)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__scalar_binary__scalar_binary.if_return_type_is_not_Sca.else_.return.Scalar_graph_name_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__scalar_binary__scalar_binary.if_return_type_is_not_Sca.else_.return.Scalar_graph_name_meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 262, "end_line": 292, "span_ids": ["_scalar_binary"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _scalar_binary(op, self, other, inv=False):\n name = f\"{funcname(op)}-{tokenize(self, other)}\"\n dependencies = [self]\n\n dsk = {}\n return_type = get_parallel_type(other)\n\n if isinstance(other, Scalar):\n dependencies.append(other)\n other_key = (other._name, 0)\n elif is_dask_collection(other):\n return NotImplemented\n else:\n other_key = other\n\n dsk[(name, 0)] = (\n (op, other_key, (self._name, 0)) if inv else (op, (self._name, 0), other_key)\n )\n\n other_meta = make_meta(other, parent_meta=self._parent_meta)\n other_meta_nonempty = meta_nonempty(other_meta)\n if inv:\n meta = op(other_meta_nonempty, self._meta_nonempty)\n else:\n meta = op(self._meta_nonempty, other_meta_nonempty)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n if return_type is not Scalar:\n return return_type(graph, name, meta, [other.index.min(), other.index.max()])\n else:\n return Scalar(graph, name, meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._elemwise__Frame.__repr__.return._str_fmt_format_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._elemwise__Frame.__repr__.return._str_fmt_format_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 494, "end_line": 524, "span_ids": ["_Frame._repr_divisions", "_Frame.__repr__", "_Frame._elemwise", "_Frame._repr_data"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def _elemwise(self):\n return elemwise\n\n def _repr_data(self):\n raise NotImplementedError\n\n @property\n def _repr_divisions(self):\n name = f\"npartitions={self.npartitions}\"\n if self.known_divisions:\n divisions = pd.Index(self.divisions, name=name)\n else:\n # avoid to be converted to NaN\n divisions = pd.Index([\"\"] * (self.npartitions + 1), name=name)\n return divisions\n\n def __repr__(self):\n data = self._repr_data().to_string(max_rows=5, show_dimensions=False)\n _str_fmt = \"\"\"Dask {klass} Structure:\n{data}\nDask Name: {name}, {task} tasks\"\"\"\n if len(self.columns) == 0:\n data = data.partition(\"\\n\")[-1].replace(\"Index\", \"Divisions\")\n _str_fmt = f\"Empty {_str_fmt}\"\n return _str_fmt.format(\n klass=self.__class__.__name__,\n data=data,\n name=key_split(self._name),\n task=len(self.dask),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.drop_duplicates__Frame.drop_duplicates.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.drop_duplicates__Frame.drop_duplicates.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 502, "end_line": 532, "span_ids": ["_Frame.drop_duplicates"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def drop_duplicates(\n self, subset=None, split_every=None, split_out=1, ignore_index=False, **kwargs\n ):\n if subset is not None:\n # Let pandas error on bad inputs\n self._meta_nonempty.drop_duplicates(subset=subset, **kwargs)\n kwargs[\"subset\"] = subset\n split_out_setup = split_out_on_cols\n split_out_setup_kwargs = {\"cols\": subset}\n else:\n self._meta_nonempty.drop_duplicates(**kwargs)\n split_out_setup = split_out_setup_kwargs = None\n\n if kwargs.get(\"keep\", True) is False:\n raise NotImplementedError(\"drop_duplicates with keep=False\")\n\n chunk = M.drop_duplicates\n return aca(\n self,\n chunk=chunk,\n aggregate=chunk,\n meta=self._meta,\n token=\"drop-duplicates\",\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_setup,\n split_out_setup_kwargs=split_out_setup_kwargs,\n ignore_index=ignore_index,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__len____Frame.__complex__.return.self__scalarfunc_complex_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__len____Frame.__complex__.return.self__scalarfunc_complex_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 625, "end_line": 653, "span_ids": ["_Frame.__int__", "_Frame._scalarfunc", "_Frame:9", "_Frame.__bool__", "_Frame.__complex__", "_Frame:7", "_Frame.__float__", "_Frame.__len__"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def __len__(self):\n return self.reduction(\n len, np.sum, token=\"len\", meta=int, split_every=False\n ).compute()\n\n def __bool__(self):\n raise ValueError(\n f\"The truth value of a {self.__class__.__name__} is ambiguous. \"\n \"Use a.any() or a.all().\"\n )\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n def wrapper():\n raise TypeError(f\"cannot convert the series to {cast_type}\")\n\n return wrapper\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __complex__(self):\n return self._scalarfunc(complex)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_partitions__Frame.map_partitions.return.map_partitions_func_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_partitions__Frame.map_partitions.return.map_partitions_func_self", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 655, "end_line": 770, "span_ids": ["_Frame.map_partitions"], "tokens": 1134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @insert_meta_param_description(pad=12)\n def map_partitions(self, func, *args, **kwargs):\n \"\"\"Apply Python function on each DataFrame partition.\n\n Note that the index and divisions are assumed to remain unchanged.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n args, kwargs :\n Arguments and keywords to pass to the function. The partition will\n be the first argument, and these will be passed *after*. Arguments\n and keywords may contain ``Scalar``, ``Delayed``, ``partition_info``\n or regular python objects. DataFrame-like args (both dask and\n pandas) will be repartitioned to align (if necessary) before\n applying the function (see ``align_dataframes`` to control).\n enforce_metadata : bool, default True\n Whether to enforce at runtime that the structure of the DataFrame\n produced by ``func`` actually matches the structure of ``meta``.\n This will rename and reorder columns for each partition,\n and will raise an error if this doesn't work or types don't match.\n transform_divisions : bool, default True\n Whether to apply the function onto the divisions and apply those\n transformed divisions to the output.\n align_dataframes : bool, default True\n Whether to repartition DataFrame- or Series-like args\n (both dask and pandas) so their divisions align before applying\n the function. This requires all inputs to have known divisions.\n Single-partition inputs will be split into multiple partitions.\n\n If False, all inputs must have either the same number of partitions\n or a single partition. Single-partition inputs will be broadcast to\n every partition of multi-partition inputs.\n $META\n\n Examples\n --------\n Given a DataFrame, Series, or Index, such as:\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],\n ... 'y': [1., 2., 3., 4., 5.]})\n >>> ddf = dd.from_pandas(df, npartitions=2)\n\n One can use ``map_partitions`` to apply a function on each partition.\n Extra arguments and keywords can optionally be provided, and will be\n passed to the function after the partition.\n\n Here we apply a function with arguments and keywords to a DataFrame,\n resulting in a Series:\n\n >>> def myadd(df, a, b=1):\n ... return df.x + df.y + a + b\n >>> res = ddf.map_partitions(myadd, 1, b=2)\n >>> res.dtype\n dtype('float64')\n\n By default, dask tries to infer the output metadata by running your\n provided function on some fake data. This works well in many cases, but\n can sometimes be expensive, or even fail. To avoid this, you can\n manually specify the output metadata with the ``meta`` keyword. This\n can be specified in many forms, for more information see\n ``dask.dataframe.utils.make_meta``.\n\n Here we specify the output is a Series with no name, and dtype\n ``float64``:\n\n >>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))\n\n Here we map a function that takes in a DataFrame, and returns a\n DataFrame with a new column:\n\n >>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))\n >>> res.dtypes\n x int64\n y float64\n z float64\n dtype: object\n\n As before, the output metadata can also be specified manually. This\n time we pass in a ``dict``, as the output is a DataFrame:\n\n >>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),\n ... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})\n\n In the case where the metadata doesn't change, you can also pass in\n the object itself directly:\n\n >>> res = ddf.map_partitions(lambda df: df.head(), meta=ddf)\n\n Also note that the index and divisions are assumed to remain unchanged.\n If the function you're mapping changes the index/divisions, you'll need\n to clear them afterwards:\n\n >>> ddf.map_partitions(func).clear_divisions() # doctest: +SKIP\n\n Your map function gets information about where it is in the dataframe by\n accepting a special ``partition_info`` keyword argument.\n\n >>> def func(partition, partition_info=None):\n ... pass\n\n This will receive the following information:\n\n >>> partition_info # doctest: +SKIP\n {'number': 1, 'division': 3}\n\n For each argument and keyword arguments that are dask dataframes you will\n receive the number (n) which represents the nth partition of the dataframe\n and the division (the first index value in the partition). If divisions\n are not known (for instance if the index is not sorted) then you will get\n None as the division.\n \"\"\"\n return map_partitions(func, self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_overlap__Frame.map_overlap.return.map_overlap_func_self_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.map_overlap__Frame.map_overlap.return.map_overlap_func_self_b", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 655, "end_line": 765, "span_ids": ["_Frame.map_overlap"], "tokens": 1184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @insert_meta_param_description(pad=12)\n def map_overlap(self, func, before, after, *args, **kwargs):\n \"\"\"Apply a function to each partition, sharing rows with adjacent partitions.\n\n This can be useful for implementing windowing functions such as\n ``df.rolling(...).mean()`` or ``df.diff()``.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n before : int\n The number of rows to prepend to partition ``i`` from the end of\n partition ``i - 1``.\n after : int\n The number of rows to append to partition ``i`` from the beginning\n of partition ``i + 1``.\n args, kwargs :\n Arguments and keywords to pass to the function. The partition will\n be the first argument, and these will be passed *after*.\n $META\n\n Notes\n -----\n Given positive integers ``before`` and ``after``, and a function\n ``func``, ``map_overlap`` does the following:\n\n 1. Prepend ``before`` rows to each partition ``i`` from the end of\n partition ``i - 1``. The first partition has no rows prepended.\n\n 2. Append ``after`` rows to each partition ``i`` from the beginning of\n partition ``i + 1``. The last partition has no rows appended.\n\n 3. Apply ``func`` to each partition, passing in any extra ``args`` and\n ``kwargs`` if provided.\n\n 4. Trim ``before`` rows from the beginning of all but the first\n partition.\n\n 5. Trim ``after`` rows from the end of all but the last partition.\n\n Note that the index and divisions are assumed to remain unchanged.\n\n Examples\n --------\n Given a DataFrame, Series, or Index, such as:\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],\n ... 'y': [1., 2., 3., 4., 5.]})\n >>> ddf = dd.from_pandas(df, npartitions=2)\n\n A rolling sum with a trailing moving window of size 2 can be computed by\n overlapping 2 rows before each partition, and then mapping calls to\n ``df.rolling(2).sum()``:\n\n >>> ddf.compute()\n x y\n 0 1 1.0\n 1 2 2.0\n 2 4 3.0\n 3 7 4.0\n 4 11 5.0\n >>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()\n x y\n 0 NaN NaN\n 1 3.0 3.0\n 2 6.0 5.0\n 3 11.0 7.0\n 4 18.0 9.0\n\n The pandas ``diff`` method computes a discrete difference shifted by a\n number of periods (can be positive or negative). This can be\n implemented by mapping calls to ``df.diff`` to each partition after\n prepending/appending that many rows, depending on sign:\n\n >>> def diff(df, periods=1):\n ... before, after = (periods, 0) if periods > 0 else (0, -periods)\n ... return df.map_overlap(lambda df, periods=1: df.diff(periods),\n ... periods, 0, periods=periods)\n >>> diff(ddf, 1).compute()\n x y\n 0 NaN NaN\n 1 1.0 1.0\n 2 2.0 1.0\n 3 3.0 1.0\n 4 4.0 1.0\n\n If you have a ``DatetimeIndex``, you can use a ``pd.Timedelta`` for time-\n based windows.\n\n >>> ts = pd.Series(range(10), index=pd.date_range('2017', periods=10))\n >>> dts = dd.from_pandas(ts, npartitions=2)\n >>> dts.map_overlap(lambda df: df.rolling('2D').sum(),\n ... pd.Timedelta('2D'), 0).compute()\n 2017-01-01 0.0\n 2017-01-02 1.0\n 2017-01-03 3.0\n 2017-01-04 5.0\n 2017-01-05 7.0\n 2017-01-06 9.0\n 2017-01-07 11.0\n 2017-01-08 13.0\n 2017-01-09 15.0\n 2017-01-10 17.0\n Freq: D, dtype: float64\n \"\"\"\n from .rolling import map_overlap\n\n return map_overlap(func, self, before, after, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.memory_usage_per_partition__Frame.memory_usage_per_partition.return.self_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.memory_usage_per_partition__Frame.memory_usage_per_partition.return.self_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 756, "end_line": 777, "span_ids": ["_Frame.memory_usage_per_partition"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def memory_usage_per_partition(self, index=True, deep=False):\n \"\"\"Return the memory usage of each partition\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the index in\n returned Series.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n ``object`` dtypes for system-level memory consumption, and include\n it in the returned values.\n\n Returns\n -------\n Series\n A Series whose index is the partition number and whose values\n are the memory usage of each partition in bytes.\n \"\"\"\n return self.map_partitions(\n total_mem_usage, index=index, deep=deep\n ).clear_divisions()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction__Frame.reduction._Generic_row_wise_reduc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction__Frame.reduction._Generic_row_wise_reduc", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 779, "end_line": 891, "span_ids": ["_Frame.reduction"], "tokens": 986}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @insert_meta_param_description(pad=12)\n def reduction(\n self,\n chunk,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n split_every=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n **kwargs,\n ):\n \"\"\"Generic row-wise reductions.\n\n Parameters\n ----------\n chunk : callable\n Function to operate on each partition. Should return a\n ``pandas.DataFrame``, ``pandas.Series``, or a scalar.\n aggregate : callable, optional\n Function to operate on the concatenated result of ``chunk``. If not\n specified, defaults to ``chunk``. Used to do the final aggregation\n in a tree reduction.\n\n The input to ``aggregate`` depends on the output of ``chunk``.\n If the output of ``chunk`` is a:\n\n - scalar: Input is a Series, with one row per partition.\n - Series: Input is a DataFrame, with one row per partition. Columns\n are the rows in the output series.\n - DataFrame: Input is a DataFrame, with one row per partition.\n Columns are the columns in the output dataframes.\n\n Should return a ``pandas.DataFrame``, ``pandas.Series``, or a\n scalar.\n combine : callable, optional\n Function to operate on intermediate concatenated results of\n ``chunk`` in a tree-reduction. If not provided, defaults to\n ``aggregate``. The input/output requirements should match that of\n ``aggregate`` described above.\n $META\n token : str, optional\n The name to use for the output keys.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used,\n and all intermediates will be concatenated and passed to\n ``aggregate``. Default is 8.\n chunk_kwargs : dict, optional\n Keyword arguments to pass on to ``chunk`` only.\n aggregate_kwargs : dict, optional\n Keyword arguments to pass on to ``aggregate`` only.\n combine_kwargs : dict, optional\n Keyword arguments to pass on to ``combine`` only.\n kwargs :\n All remaining keywords will be passed to ``chunk``, ``combine``,\n and ``aggregate``.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})\n >>> ddf = dd.from_pandas(df, npartitions=4)\n\n Count the number of rows in a DataFrame. To do this, count the number\n of rows in each partition, then sum the results:\n\n >>> res = ddf.reduction(lambda x: x.count(),\n ... aggregate=lambda x: x.sum())\n >>> res.compute()\n x 50\n y 50\n dtype: int64\n\n Count the number of rows in a Series with elements greater than or\n equal to a value (provided via a keyword).\n\n >>> def count_greater(x, value=0):\n ... return (x >= value).sum()\n >>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),\n ... chunk_kwargs={'value': 25})\n >>> res.compute()\n 25\n\n Aggregate both the sum and count of a Series at the same time:\n\n >>> def sum_and_count(x):\n ... return pd.Series({'count': x.count(), 'sum': x.sum()},\n ... index=['count', 'sum'])\n >>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())\n >>> res.compute()\n count 50\n sum 1225\n dtype: int64\n\n Doing the same, but for a DataFrame. Here ``chunk`` returns a\n DataFrame, meaning the input to ``aggregate`` is a DataFrame with an\n index with non-unique entries for both 'x' and 'y'. We groupby the\n index, and sum each group to get the final result.\n\n >>> def sum_and_count(x):\n ... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},\n ... columns=['count', 'sum'])\n >>> res = ddf.reduction(sum_and_count,\n ... aggregate=lambda x: x.groupby(level=0).sum())\n >>> res.compute()\n count sum\n x 50 1225\n y 50 3725\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction.if_aggregate_is_None___Frame.reduction.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reduction.if_aggregate_is_None___Frame.reduction.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 892, "end_line": 922, "span_ids": ["_Frame.reduction"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @insert_meta_param_description(pad=12)\n def reduction(\n self,\n chunk,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n split_every=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n **kwargs,\n ):\n if aggregate is None:\n aggregate = chunk\n\n if combine is None:\n if combine_kwargs:\n raise ValueError(\"`combine_kwargs` provided with no `combine`\")\n combine = aggregate\n combine_kwargs = aggregate_kwargs\n\n chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}\n chunk_kwargs[\"aca_chunk\"] = chunk\n\n combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}\n combine_kwargs[\"aca_combine\"] = combine\n\n aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}\n aggregate_kwargs[\"aca_aggregate\"] = aggregate\n\n return aca(\n self,\n chunk=_reduction_chunk,\n aggregate=_reduction_aggregate,\n combine=_reduction_combine,\n meta=meta,\n token=token,\n split_every=split_every,\n chunk_kwargs=chunk_kwargs,\n aggregate_kwargs=aggregate_kwargs,\n combine_kwargs=combine_kwargs,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.pipe__Frame.pipe.if_isinstance_func_tuple.else_.return.func_self_args_kwarg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.pipe__Frame.pipe.if_isinstance_func_tuple.else_.return.func_self_args_kwarg", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 924, "end_line": 937, "span_ids": ["_Frame.pipe"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def pipe(self, func, *args, **kwargs):\n # Taken from pandas:\n # https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\n \"%s is both the pipe target and a keyword argument\" % target\n )\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.random_split__Frame.random_split.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.random_split__Frame.random_split.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 939, "end_line": 989, "span_ids": ["_Frame.random_split"], "tokens": 446}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def random_split(self, frac, random_state=None, shuffle=False):\n \"\"\"Pseudorandomly split dataframe into different pieces row-wise\n\n Parameters\n ----------\n frac : list\n List of floats that should sum to one.\n random_state : int or np.random.RandomState\n If int create a new RandomState with this as the seed.\n Otherwise draw from the passed RandomState.\n shuffle : bool, default False\n If set to True, the dataframe is shuffled (within partition)\n before the split.\n\n Examples\n --------\n\n 50/50 split\n\n >>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP\n\n 80/10/10 split, consistent random_state\n\n >>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP\n\n See Also\n --------\n dask.DataFrame.sample\n \"\"\"\n if not np.allclose(sum(frac), 1):\n raise ValueError(\"frac should sum to 1\")\n state_data = random_state_data(self.npartitions, random_state)\n token = tokenize(self, frac, random_state)\n name = \"split-\" + token\n layer = {\n (name, i): (pd_split, (self._name, i), frac, state, shuffle)\n for i, state in enumerate(state_data)\n }\n\n out = []\n for i in range(len(frac)):\n name2 = \"split-%d-%s\" % (i, token)\n dsk2 = {\n (name2, j): (getitem, (name, j), i) for j in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(\n name2, merge(dsk2, layer), dependencies=[self]\n )\n out_df = type(self)(graph, name2, self._meta, self.divisions)\n out.append(out_df)\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.head__Frame.head.return.self__head_n_n_npartitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.head__Frame.head.return.self__head_n_n_npartitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1119, "end_line": 1138, "span_ids": ["_Frame.head"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def head(self, n=5, npartitions=1, compute=True):\n \"\"\"First n rows of the dataset\n\n Parameters\n ----------\n n : int, optional\n The number of rows to return. Default is 5.\n npartitions : int, optional\n Elements are only taken from the first ``npartitions``, with a\n default of 1. If there are fewer than ``n`` rows in the first\n ``npartitions`` a warning will be raised and any found rows\n returned. Pass -1 to use all partitions.\n compute : bool, optional\n Whether to compute the result, default is True.\n \"\"\"\n if npartitions <= -1:\n npartitions = self.npartitions\n # No need to warn if we're already looking at all partitions\n safe = npartitions != self.npartitions\n return self._head(n=n, npartitions=npartitions, compute=compute, safe=safe)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._head__Frame._head.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._head__Frame._head.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1140, "end_line": 1173, "span_ids": ["_Frame._head"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _head(self, n, npartitions, compute, safe):\n if npartitions <= -1:\n npartitions = self.npartitions\n if npartitions > self.npartitions:\n raise ValueError(\n f\"only {self.npartitions} partitions, head received {npartitions}\"\n )\n\n name = f\"head-{npartitions}-{n}-{self._name}\"\n if safe:\n head = safe_head\n else:\n head = M.head\n\n if npartitions > 1:\n name_p = f\"head-partial-{n}-{self._name}\"\n\n dsk = {}\n for i in range(npartitions):\n dsk[(name_p, i)] = (M.head, (self._name, i), n)\n\n concat = (_concat, [(name_p, i) for i in range(npartitions)])\n dsk[(name, 0)] = (head, concat, n)\n else:\n dsk = {(name, 0): (head, (self._name, 0), n)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n result = new_dd_object(\n graph, name, self._meta, [self.divisions[0], self.divisions[npartitions]]\n )\n\n if compute:\n result = result.compute()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.tail__Frame.loc.return._LocIndexer_self_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.tail__Frame.loc.return._LocIndexer_self_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1042, "end_line": 1066, "span_ids": ["_Frame.loc", "_Frame.tail"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def tail(self, n=5, compute=True):\n \"\"\"Last n rows of the dataset\n\n Caveat, the only checks the last n rows of the last partition.\n \"\"\"\n name = \"tail-%d-%s\" % (n, self._name)\n dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n result = new_dd_object(graph, name, self._meta, self.divisions[-2:])\n\n if compute:\n result = result.compute()\n return result\n\n @property\n def loc(self):\n \"\"\"Purely label-location based indexer for selection by label.\n\n >>> df.loc[\"b\"] # doctest: +SKIP\n >>> df.loc[\"b\":\"d\"] # doctest: +SKIP\n \"\"\"\n from .indexing import _LocIndexer\n\n return _LocIndexer(self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._partitions__Frame._partitions.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._partitions__Frame._partitions.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1068, "end_line": 1084, "span_ids": ["_Frame._partitions"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _partitions(self, index):\n if not isinstance(index, tuple):\n index = (index,)\n from ..array.slicing import normalize_index\n\n index = normalize_index(index, (self.npartitions,))\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n name = \"blocks-\" + tokenize(self, index)\n new_keys = np.array(self.__dask_keys__(), dtype=object)[index].tolist()\n\n divisions = [self.divisions[i] for _, i in new_keys] + [\n self.divisions[new_keys[-1][1] + 1]\n ]\n dsk = {(name, i): tuple(key) for i, key in enumerate(new_keys)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self._meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.partitions__Frame._Note_iloc_is_implement": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.partitions__Frame._Note_iloc_is_implement", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1086, "end_line": 1107, "span_ids": ["_Frame.partitions"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def partitions(self):\n \"\"\"Slice dataframe by partitions\n\n This allows partitionwise slicing of a Dask Dataframe. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along partitions so, for example, ``df.partitions[:5]`` produces a new\n Dask Dataframe of the first five partitions.\n\n Examples\n --------\n >>> df.partitions[0] # doctest: +SKIP\n >>> df.partitions[:3] # doctest: +SKIP\n >>> df.partitions[::10] # doctest: +SKIP\n\n Returns\n -------\n A Dask DataFrame\n \"\"\"\n return IndexCallable(self._partitions)\n\n # Note: iloc is implemented only on DataFrame", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.repartition__Frame.repartition.if_partition_size_is_not_.elif_freq_is_not_None_.return.repartition_freq_self_fr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.repartition__Frame.repartition.if_partition_size_is_not_.elif_freq_is_not_None_.return.repartition_freq_self_fr", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1297, "end_line": 1386, "span_ids": ["_Frame.repartition"], "tokens": 832}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def repartition(\n self,\n divisions=None,\n npartitions=None,\n partition_size=None,\n freq=None,\n force=False,\n ):\n \"\"\"Repartition dataframe along new divisions\n\n Parameters\n ----------\n divisions : list, optional\n The \"dividing lines\" used to split the dataframe into partitions.\n For ``divisions=[0, 10, 50, 100]``, there would be three output partitions,\n where the new index contained [0, 10), [10, 50), and [50, 100), respectively.\n See https://docs.dask.org/en/latest/dataframe-design.html#partitions.\n Only used if npartitions and partition_size isn't specified.\n For convenience if given an integer this will defer to npartitions\n and if given a string it will defer to partition_size (see below)\n npartitions : int, optional\n Number of partitions of output. Only used if partition_size\n isn't specified.\n partition_size: int or string, optional\n Max number of bytes of memory for each partition. Use numbers or\n strings like 5MB. If specified npartitions and divisions will be\n ignored.\n\n .. warning::\n\n This keyword argument triggers computation to determine\n the memory size of each partition, which may be expensive.\n\n freq : str, pd.Timedelta\n A period on which to partition timeseries data like ``'7D'`` or\n ``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.\n force : bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions' lower and upper bounds must be\n the same as the old divisions'.\n\n Notes\n -----\n Exactly one of `divisions`, `npartitions`, `partition_size`, or `freq`\n should be specified. A ``ValueError`` will be raised when that is\n not the case.\n\n Also note that ``len(divisons)`` is equal to ``npartitions + 1``. This is because ``divisions``\n represents the upper and lower bounds of each partition. The first item is the\n lower bound of the first partition, the second item is the lower bound of the\n second partition and the upper bound of the first partition, and so on.\n The second-to-last item is the lower bound of the last partition, and the last\n (extra) item is the upper bound of the last partition.\n\n Examples\n --------\n >>> df = df.repartition(npartitions=10) # doctest: +SKIP\n >>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP\n >>> df = df.repartition(freq='7d') # doctest: +SKIP\n \"\"\"\n if isinstance(divisions, int):\n npartitions = divisions\n divisions = None\n if isinstance(divisions, str):\n partition_size = divisions\n divisions = None\n if (\n sum(\n [\n partition_size is not None,\n divisions is not None,\n npartitions is not None,\n freq is not None,\n ]\n )\n != 1\n ):\n raise ValueError(\n \"Please provide exactly one of ``npartitions=``, ``freq=``, \"\n \"``divisions=``, ``partition_size=`` keyword arguments\"\n )\n\n if partition_size is not None:\n return repartition_size(self, partition_size)\n elif npartitions is not None:\n return repartition_npartitions(self, npartitions)\n elif divisions is not None:\n return repartition(self, divisions, force=force)\n elif freq is not None:\n return repartition_freq(self, freq=freq)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shuffle__Frame.shuffle.return.dd_shuffle_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shuffle__Frame.shuffle.return.dd_shuffle_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1190, "end_line": 1243, "span_ids": ["_Frame.shuffle"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def shuffle(\n self,\n on,\n npartitions=None,\n max_branch=None,\n shuffle=None,\n ignore_index=False,\n compute=None,\n ):\n \"\"\"Rearrange DataFrame into new partitions\n\n Uses hashing of `on` to map rows to output partitions. After this\n operation, rows with the same value of `on` will be in the same\n partition.\n\n Parameters\n ----------\n on : str, list of str, or Series, Index, or DataFrame\n Column(s) or index to be used to map rows to output partitions\n npartitions : int, optional\n Number of partitions of output. Partition count will not be\n changed by default.\n max_branch: int, optional\n The maximum number of splits per input partition. Used within\n the staged shuffling algorithm.\n shuffle: {'disk', 'tasks'}, optional\n Either ``'disk'`` for single-node operation or ``'tasks'`` for\n distributed operation. Will be inferred by your current scheduler.\n ignore_index: bool, default False\n Ignore index during shuffle. If ``True``, performance may improve,\n but index values will not be preserved.\n compute: bool\n Whether or not to trigger an immediate computation. Defaults to False.\n\n Notes\n -----\n This does not preserve a meaningful index/partitioning scheme. This\n is not deterministic if done in parallel.\n\n Examples\n --------\n >>> df = df.shuffle(df.columns[0]) # doctest: +SKIP\n \"\"\"\n from .shuffle import shuffle as dd_shuffle\n\n return dd_shuffle(\n self,\n on,\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n ignore_index=ignore_index,\n compute=compute,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.fillna__Frame.fillna.return.parts_map_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.fillna__Frame.fillna.return.parts_map_overlap_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1245, "end_line": 1308, "span_ids": ["_Frame.fillna"], "tokens": 499}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def fillna(self, value=None, method=None, limit=None, axis=None):\n axis = self._validate_axis(axis)\n if method is None and limit is not None:\n raise NotImplementedError(\"fillna with set limit and method=None\")\n if isinstance(value, _Frame):\n test_value = value._meta_nonempty.values[0]\n elif isinstance(value, Scalar):\n test_value = value._meta_nonempty\n else:\n test_value = value\n meta = self._meta_nonempty.fillna(\n value=test_value, method=method, limit=limit, axis=axis\n )\n\n if axis == 1 or method is None:\n # Control whether or not dask's partition alignment happens.\n # We don't want for a pandas Series.\n # We do want it for a dask Series\n if is_series_like(value) and not is_dask_collection(value):\n args = ()\n kwargs = {\"value\": value}\n else:\n args = (value,)\n kwargs = {}\n return self.map_partitions(\n M.fillna,\n *args,\n method=method,\n limit=limit,\n axis=axis,\n meta=meta,\n enforce_metadata=False,\n **kwargs,\n )\n\n if method in (\"pad\", \"ffill\"):\n method = \"ffill\"\n skip_check = 0\n before, after = 1 if limit is None else limit, 0\n else:\n method = \"bfill\"\n skip_check = self.npartitions - 1\n before, after = 0, 1 if limit is None else limit\n\n if limit is None:\n name = \"fillna-chunk-\" + tokenize(self, method)\n dsk = {\n (name, i): (\n methods.fillna_check,\n (self._name, i),\n method,\n i != skip_check,\n )\n for i in range(self.npartitions)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n parts = new_dd_object(graph, name, meta, self.divisions)\n else:\n parts = self\n\n return parts.map_overlap(\n M.fillna, before, after, method=method, limit=limit, meta=meta\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.ffill__Frame.sample.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.ffill__Frame.sample.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1310, "end_line": 1365, "span_ids": ["_Frame.sample", "_Frame.bfill", "_Frame.ffill"], "tokens": 443}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def ffill(self, axis=None, limit=None):\n return self.fillna(method=\"ffill\", limit=limit, axis=axis)\n\n @derived_from(pd.DataFrame)\n def bfill(self, axis=None, limit=None):\n return self.fillna(method=\"bfill\", limit=limit, axis=axis)\n\n def sample(self, n=None, frac=None, replace=False, random_state=None):\n \"\"\"Random sample of items\n\n Parameters\n ----------\n n : int, optional\n Number of items to return is not supported by dask. Use frac\n instead.\n frac : float, optional\n Fraction of axis items to return.\n replace : boolean, optional\n Sample with or without replacement. Default = False.\n random_state : int or ``np.random.RandomState``\n If int we create a new RandomState with this as the seed\n Otherwise we draw from the passed RandomState\n\n See Also\n --------\n DataFrame.random_split\n pandas.DataFrame.sample\n \"\"\"\n if n is not None:\n msg = (\n \"sample does not support the number of sampled items \"\n \"parameter, 'n'. Please use the 'frac' parameter instead.\"\n )\n if isinstance(n, Number) and 0 <= n <= 1:\n warnings.warn(msg)\n frac = n\n else:\n raise ValueError(msg)\n\n if frac is None:\n raise ValueError(\"frac must not be None\")\n\n if random_state is None:\n random_state = np.random.RandomState()\n\n name = \"sample-\" + tokenize(self, frac, replace, random_state)\n\n state_data = random_state_data(self.npartitions, random_state)\n dsk = {\n (name, i): (methods.sample, (self._name, i), state, frac, replace)\n for i, state in enumerate(state_data)\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self._meta, self.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.replace__Frame.to_dask_array.return.arr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.replace__Frame.to_dask_array.return.arr", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1565, "end_line": 1611, "span_ids": ["_Frame.to_dask_array", "_Frame.replace"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def replace(self, to_replace=None, value=None, regex=False):\n # In PANDAS_GT_140 pandas starts using no_default instead of None\n value_kwarg = {\"value\": value} if value is not None else {}\n return self.map_partitions(\n M.replace,\n to_replace=to_replace,\n **value_kwarg,\n regex=regex,\n enforce_metadata=False,\n )\n\n def to_dask_array(self, lengths=None, meta=None):\n \"\"\"Convert a dask DataFrame to a dask array.\n\n Parameters\n ----------\n lengths : bool or Sequence of ints, optional\n How to determine the chunks sizes for the output array.\n By default, the output array will have unknown chunk lengths\n along the first axis, which can cause some later operations\n to fail.\n\n * True : immediately compute the length of each partition\n * Sequence : a sequence of integers to use for the chunk sizes\n on the first axis. These values are *not* validated for\n correctness, beyond ensuring that the number of items\n matches the number of partitions.\n meta : object, optional\n An optional `meta` parameter can be passed for dask to override the\n default metadata on the underlying dask array.\n\n Returns\n -------\n \"\"\"\n if lengths is True:\n lengths = tuple(self.map_partitions(len, enforce_metadata=False).compute())\n\n arr = self.values\n\n chunks = self._validate_chunks(arr, lengths)\n arr._chunks = chunks\n\n if meta is not None:\n arr._meta = meta\n\n return arr", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_hdf__Frame.to_sql.return.to_sql_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_hdf__Frame.to_sql.return.to_sql_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1613, "end_line": 1657, "span_ids": ["_Frame.to_sql", "_Frame.to_hdf", "_Frame.to_csv"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def to_hdf(self, path_or_buf, key, mode=\"a\", append=False, **kwargs):\n \"\"\"See dd.to_hdf docstring for more information\"\"\"\n from .io import to_hdf\n\n return to_hdf(self, path_or_buf, key, mode, append, **kwargs)\n\n def to_csv(self, filename, **kwargs):\n \"\"\"See dd.to_csv docstring for more information\"\"\"\n from .io import to_csv\n\n return to_csv(self, filename, **kwargs)\n\n def to_sql(\n self,\n name: str,\n uri: str,\n schema=None,\n if_exists: str = \"fail\",\n index: bool = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n compute=True,\n parallel=False,\n engine_kwargs=None,\n ):\n \"\"\"See dd.to_sql docstring for more information\"\"\"\n from .io import to_sql\n\n return to_sql(\n self,\n name=name,\n uri=uri,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n compute=compute,\n parallel=parallel,\n engine_kwargs=engine_kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_json__Frame._get_binary_operator.if_inv_.else_.return.lambda_self_other_elemw": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.to_json__Frame._get_binary_operator.if_inv_.else_.return.lambda_self_other_elemw", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1590, "end_line": 1631, "span_ids": ["_Frame.to_delayed", "_Frame._get_unary_operator", "_Frame.to_json", "_Frame._get_binary_operator"], "tokens": 343}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def to_json(self, filename, *args, **kwargs):\n \"\"\"See dd.to_json docstring for more information\"\"\"\n from .io import to_json\n\n return to_json(self, filename, *args, **kwargs)\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a list of ``dask.delayed`` objects, one per partition.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n Examples\n --------\n >>> partitions = df.to_delayed() # doctest: +SKIP\n\n See Also\n --------\n dask.dataframe.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n layer = self.__dask_layers__()[0]\n if optimize_graph:\n graph = self.__dask_optimize__(graph, self.__dask_keys__())\n layer = \"delayed-\" + self._name\n graph = HighLevelGraph.from_collections(layer, graph, dependencies=())\n return [Delayed(k, graph, layer=layer) for k in keys]\n\n @classmethod\n def _get_unary_operator(cls, op):\n return lambda self: elemwise(op, self)\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n if inv:\n return lambda self, other: elemwise(op, other, self)\n else:\n return lambda self, other: elemwise(op, self, other)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.rolling__Frame.rolling.return.Rolling_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.rolling__Frame.rolling.return.Rolling_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1493, "end_line": 1542, "span_ids": ["_Frame.rolling"], "tokens": 373}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def rolling(self, window, min_periods=None, center=False, win_type=None, axis=0):\n \"\"\"Provides rolling transformations.\n\n Parameters\n ----------\n window : int, str, offset\n Size of the moving window. This is the number of observations used\n for calculating the statistic. When not using a ``DatetimeIndex``,\n the window size must not be so large as to span more than one\n adjacent partition. If using an offset or offset alias like '5D',\n the data must have a ``DatetimeIndex``\n\n .. versionchanged:: 0.15.0\n\n Now accepts offsets and string offset aliases\n\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n center : boolean, default False\n Set the labels at the center of the window.\n win_type : string, default None\n Provide a window type. The recognized window types are identical\n to pandas.\n axis : int, default 0\n\n Returns\n -------\n a Rolling object on which to call a method to compute a statistic\n \"\"\"\n from dask.dataframe.rolling import Rolling\n\n if isinstance(window, Integral):\n if window < 0:\n raise ValueError(\"window must be >= 0\")\n\n if min_periods is not None:\n if not isinstance(min_periods, Integral):\n raise ValueError(\"min_periods must be an integer\")\n if min_periods < 0:\n raise ValueError(\"min_periods must be >= 0\")\n\n return Rolling(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n axis=axis,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.diff__Frame.diff.return.self_map_overlap_M_diff_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.diff__Frame.diff.return.self_map_overlap_M_diff_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1544, "end_line": 1565, "span_ids": ["_Frame.diff"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def diff(self, periods=1, axis=0):\n \"\"\"\n .. note::\n\n Pandas currently uses an ``object``-dtype column to represent\n boolean data with missing values. This can cause issues for\n boolean-specific operations, like ``|``. To enable boolean-\n specific operations, at the cost of metadata that doesn't match\n pandas, use ``.astype(bool)`` after the ``shift``.\n \"\"\"\n axis = self._validate_axis(axis)\n if not isinstance(periods, Integral):\n raise TypeError(\"periods must be an integer\")\n\n if axis == 1:\n return self.map_partitions(\n M.diff, token=\"diff\", periods=periods, axis=1, enforce_metadata=False\n )\n\n before, after = (periods, 0) if periods > 0 else (0, -periods)\n return self.map_overlap(M.diff, before, after, token=\"diff\", periods=periods)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shift__Frame.shift.return.maybe_shift_divisions_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.shift__Frame.shift.return.maybe_shift_divisions_out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1567, "end_line": 1600, "span_ids": ["_Frame.shift"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def shift(self, periods=1, freq=None, axis=0):\n axis = self._validate_axis(axis)\n if not isinstance(periods, Integral):\n raise TypeError(\"periods must be an integer\")\n\n if axis == 1:\n return self.map_partitions(\n M.shift,\n token=\"shift\",\n periods=periods,\n freq=freq,\n axis=1,\n enforce_metadata=False,\n )\n\n if freq is None:\n before, after = (periods, 0) if periods > 0 else (0, -periods)\n return self.map_overlap(\n M.shift, before, after, token=\"shift\", periods=periods\n )\n\n # Let pandas error on invalid arguments\n meta = self._meta_nonempty.shift(periods, freq=freq)\n out = self.map_partitions(\n M.shift,\n token=\"shift\",\n periods=periods,\n freq=freq,\n meta=meta,\n enforce_metadata=False,\n transform_divisions=False,\n )\n return maybe_shift_divisions(out, periods, freq=freq)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._reduction_agg__Frame._reduction_agg.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._reduction_agg__Frame._reduction_agg.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1602, "end_line": 1625, "span_ids": ["_Frame._reduction_agg"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _reduction_agg(self, name, axis=None, skipna=True, split_every=False, out=None):\n axis = self._validate_axis(axis)\n\n meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)\n token = self._token_prefix + name\n\n method = getattr(M, name)\n if axis == 1:\n result = self.map_partitions(\n method, meta=meta, token=token, skipna=skipna, axis=axis\n )\n return handle_out(out, result)\n else:\n result = self.reduction(\n method,\n meta=meta,\n token=token,\n skipna=skipna,\n axis=axis,\n split_every=split_every,\n )\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sum__Frame.sum.if_min_count_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sum__Frame.sum.if_min_count_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1799, "end_line": 1823, "span_ids": ["_Frame.sum"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def sum(\n self,\n axis=None,\n skipna=True,\n split_every=False,\n dtype=None,\n out=None,\n min_count=None,\n numeric_only=None,\n ):\n result = self._reduction_agg(\n \"sum\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n if min_count:\n cond = self.notnull().sum(axis=axis) >= min_count\n if is_series_like(cond):\n return result.where(cond, other=np.NaN)\n else:\n return _scalar_binary(\n lambda x, y: result if x is y else np.NaN, cond, True\n )\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.prod__Frame.prod.if_min_count_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.prod__Frame.prod.if_min_count_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1825, "end_line": 1849, "span_ids": ["_Frame.prod"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def prod(\n self,\n axis=None,\n skipna=True,\n split_every=False,\n dtype=None,\n out=None,\n min_count=None,\n numeric_only=None,\n ):\n result = self._reduction_agg(\n \"prod\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n if min_count:\n cond = self.notnull().sum(axis=axis) >= min_count\n if is_series_like(cond):\n return result.where(cond, other=np.NaN)\n else:\n return _scalar_binary(\n lambda x, y: result if x is y else np.NaN, cond, True\n )\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmax__Frame.idxmax.if_axis_1_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmax__Frame.idxmax.if_axis_1_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1705, "end_line": 1736, "span_ids": ["_Frame.idxmax"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def idxmax(self, axis=None, skipna=True, split_every=False):\n fn = \"idxmax\"\n axis = self._validate_axis(axis)\n meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)\n if axis == 1:\n return map_partitions(\n M.idxmax,\n self,\n meta=meta,\n token=self._token_prefix + fn,\n skipna=skipna,\n axis=axis,\n enforce_metadata=False,\n )\n else:\n scalar = not is_series_like(meta)\n result = aca(\n [self],\n chunk=idxmaxmin_chunk,\n aggregate=idxmaxmin_agg,\n combine=idxmaxmin_combine,\n meta=meta,\n aggregate_kwargs={\"scalar\": scalar},\n token=self._token_prefix + fn,\n split_every=split_every,\n skipna=skipna,\n fn=fn,\n )\n if isinstance(self, DataFrame):\n result.divisions = (min(self.columns), max(self.columns))\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmin__Frame.idxmin.if_axis_1_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.idxmin__Frame.idxmin.if_axis_1_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1738, "end_line": 1769, "span_ids": ["_Frame.idxmin"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def idxmin(self, axis=None, skipna=True, split_every=False):\n fn = \"idxmin\"\n axis = self._validate_axis(axis)\n meta = self._meta_nonempty.idxmax(axis=axis)\n if axis == 1:\n return map_partitions(\n M.idxmin,\n self,\n meta=meta,\n token=self._token_prefix + fn,\n skipna=skipna,\n axis=axis,\n enforce_metadata=False,\n )\n else:\n scalar = not is_series_like(meta)\n result = aca(\n [self],\n chunk=idxmaxmin_chunk,\n aggregate=idxmaxmin_agg,\n combine=idxmaxmin_combine,\n meta=meta,\n aggregate_kwargs={\"scalar\": scalar},\n token=self._token_prefix + fn,\n split_every=split_every,\n skipna=skipna,\n fn=fn,\n )\n if isinstance(self, DataFrame):\n result.divisions = (min(self.columns), max(self.columns))\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.count__Frame.mode.return.mode_series": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.count__Frame.mode.return.mode_series", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2006, "end_line": 2042, "span_ids": ["_Frame.mode", "_Frame.count"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def count(self, axis=None, split_every=False, numeric_only=None):\n axis = self._validate_axis(axis)\n token = self._token_prefix + \"count\"\n if axis == 1:\n meta = self._meta_nonempty.count(axis=axis)\n return self.map_partitions(\n M.count, meta=meta, token=token, axis=axis, enforce_metadata=False\n )\n else:\n meta = self._meta_nonempty.count()\n\n # Need the astype(int) for empty dataframes, which sum to float dtype\n result = self.reduction(\n M.count,\n aggregate=_count_aggregate,\n meta=meta,\n token=token,\n split_every=split_every,\n )\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return result\n\n @derived_from(pd.DataFrame)\n def mode(self, dropna=True, split_every=False):\n mode_series = self.reduction(\n chunk=M.value_counts,\n combine=M.sum,\n aggregate=_mode_aggregate,\n split_every=split_every,\n chunk_kwargs={\"dropna\": dropna},\n aggregate_kwargs={\"dropna\": dropna},\n )\n mode_series.name = self.name\n return mode_series", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.mean__Frame.mean.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.mean__Frame.mean.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1974, "end_line": 2015, "span_ids": ["_Frame.mean"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def mean(\n self,\n axis=None,\n skipna=True,\n split_every=False,\n dtype=None,\n out=None,\n numeric_only=None,\n ):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"mean\")\n meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.mean,\n self,\n meta=meta,\n token=self._token_prefix + \"mean\",\n axis=axis,\n skipna=skipna,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n num = self._get_numeric_data()\n s = num.sum(skipna=skipna, split_every=split_every)\n n = num.count(split_every=split_every)\n name = self._token_prefix + \"mean-%s\" % tokenize(self, axis, skipna)\n result = map_partitions(\n methods.mean_aggregate,\n s,\n n,\n token=name,\n meta=meta,\n enforce_metadata=False,\n parent_meta=self._meta,\n )\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.var__Frame.var.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.var__Frame.var.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2017, "end_line": 2054, "span_ids": ["_Frame.var"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def var(\n self,\n axis=None,\n skipna=True,\n ddof=1,\n split_every=False,\n dtype=None,\n out=None,\n numeric_only=None,\n ):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"var\")\n meta = self._meta_nonempty.var(axis=axis, skipna=skipna)\n if axis == 1:\n result = map_partitions(\n M.var,\n self,\n meta=meta,\n token=self._token_prefix + \"var\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n if self.ndim == 1:\n result = self._var_1d(self, skipna, ddof, split_every)\n return handle_out(out, result)\n\n # pandas 1.0+ does not implement var on timedelta\n result = self._var_numeric(skipna, ddof, split_every)\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_numeric__Frame._var_numeric.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_numeric__Frame._var_numeric.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1881, "end_line": 1904, "span_ids": ["_Frame._var_numeric"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _var_numeric(self, skipna=True, ddof=1, split_every=False):\n num = self.select_dtypes(include=[\"number\", \"bool\"], exclude=[np.timedelta64])\n\n values_dtype = num.values.dtype\n array_values = num.values\n\n if not np.issubdtype(values_dtype, np.number):\n array_values = num.values.astype(\"f8\")\n\n var = da.nanvar if skipna or skipna is None else da.var\n array_var = var(array_values, axis=0, ddof=ddof, split_every=split_every)\n\n name = self._token_prefix + \"var-numeric\" + tokenize(num, split_every)\n cols = num._meta.columns if is_dataframe_like(num) else None\n\n var_shape = num._meta_nonempty.values.var(axis=0).shape\n array_var_name = (array_var._name,) + (0,) * len(var_shape)\n\n layer = {(name, 0): (methods.wrap_var_reduction, array_var_name, cols)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])\n\n return new_dd_object(\n graph, name, num._meta_nonempty.var(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_timedeltas__Frame._var_timedeltas.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_timedeltas__Frame._var_timedeltas.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1906, "end_line": 1932, "span_ids": ["_Frame._var_timedeltas"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _var_timedeltas(self, skipna=True, ddof=1, split_every=False):\n timedeltas = self.select_dtypes(include=[np.timedelta64])\n\n var_timedeltas = [\n self._var_1d(timedeltas[col_idx], skipna, ddof, split_every)\n for col_idx in timedeltas._meta.columns\n ]\n var_timedelta_names = [(v._name, 0) for v in var_timedeltas]\n\n name = (\n self._token_prefix + \"var-timedeltas-\" + tokenize(timedeltas, split_every)\n )\n\n layer = {\n (name, 0): (\n methods.wrap_var_reduction,\n var_timedelta_names,\n timedeltas._meta.columns,\n )\n }\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=var_timedeltas\n )\n\n return new_dd_object(\n graph, name, timedeltas._meta_nonempty.var(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_mixed__Frame._var_mixed.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_mixed__Frame._var_mixed.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1934, "end_line": 1956, "span_ids": ["_Frame._var_mixed"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _var_mixed(self, skipna=True, ddof=1, split_every=False):\n data = self.select_dtypes(include=[\"number\", \"bool\", np.timedelta64])\n\n timedelta_vars = self._var_timedeltas(skipna, ddof, split_every)\n numeric_vars = self._var_numeric(skipna, ddof, split_every)\n\n name = self._token_prefix + \"var-mixed-\" + tokenize(data, split_every)\n\n layer = {\n (name, 0): (\n methods.var_mixed_concat,\n (numeric_vars._name, 0),\n (timedelta_vars._name, 0),\n data._meta.columns,\n )\n }\n\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[numeric_vars, timedelta_vars]\n )\n return new_dd_object(\n graph, name, self._meta_nonempty.var(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_1d__Frame._var_1d.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._var_1d__Frame._var_1d.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1994, "end_line": 2021, "span_ids": ["_Frame._var_1d"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _var_1d(self, column, skipna=True, ddof=1, split_every=False):\n is_timedelta = is_timedelta64_dtype(column._meta)\n\n if is_timedelta:\n if not skipna:\n is_nan = column.isna()\n column = column.astype(\"i8\")\n column = column.mask(is_nan)\n else:\n column = column.dropna().astype(\"i8\")\n\n if pd.Int64Dtype.is_dtype(column._meta_nonempty):\n column = column.astype(\"f8\")\n\n if not np.issubdtype(column.dtype, np.number):\n column = column.astype(\"f8\")\n\n name = self._token_prefix + \"var-1d-\" + tokenize(column, split_every)\n\n var = da.nanvar if skipna or skipna is None else da.var\n array_var = var(column.values, axis=0, ddof=ddof, split_every=split_every)\n\n layer = {(name, 0): (methods.wrap_var_reduction, (array_var._name,), None)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])\n\n return new_dd_object(\n graph, name, column._meta_nonempty.var(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sem__Frame.sem.if_axis_1_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.sem__Frame.sem.if_axis_1_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2426, "end_line": 2459, "span_ids": ["_Frame.sem"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def sem(self, axis=None, skipna=True, ddof=1, split_every=False, numeric_only=None):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"sem\")\n meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)\n if axis == 1:\n return map_partitions(\n M.sem,\n self,\n meta=meta,\n token=self._token_prefix + \"sem\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n parent_meta=self._meta,\n )\n else:\n num = self._get_numeric_data()\n v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)\n n = num.count(split_every=split_every)\n name = self._token_prefix + \"sem\"\n result = map_partitions(\n np.sqrt,\n v / n,\n meta=meta,\n token=name,\n enforce_metadata=False,\n parent_meta=self._meta,\n )\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.quantile__Frame.quantile.if_axis_1_.else_.if_isinstance_quantiles_0.else_.return.DataFrame_graph_keyname_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.quantile__Frame.quantile.if_axis_1_.else_.if_isinstance_quantiles_0.else_.return.DataFrame_graph_keyname_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2339, "end_line": 2392, "span_ids": ["_Frame.quantile"], "tokens": 550}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def quantile(self, q=0.5, axis=0, method=\"default\"):\n \"\"\"Approximate row-wise and precise column-wise quantiles of DataFrame\n\n Parameters\n ----------\n q : list/array of floats, default 0.5 (50%)\n Iterable of numbers ranging from 0 to 1 for the desired quantiles\n axis : {0, 1, 'index', 'columns'} (default 0)\n 0 or 'index' for row-wise, 1 or 'columns' for column-wise\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest\n for floats and ints and fallback to the ``'dask'`` otherwise.\n \"\"\"\n axis = self._validate_axis(axis)\n keyname = \"quantiles-concat--\" + tokenize(self, q, axis)\n\n if axis == 1:\n if isinstance(q, list):\n # Not supported, the result will have current index as columns\n raise ValueError(\"'q' must be scalar when axis=1 is specified\")\n return map_partitions(\n M.quantile,\n self,\n q,\n axis,\n token=keyname,\n enforce_metadata=False,\n meta=(q, \"f8\"),\n parent_meta=self._meta,\n )\n else:\n _raise_if_object_series(self, \"quantile\")\n meta = self._meta.quantile(q, axis=axis)\n num = self._get_numeric_data()\n quantiles = tuple(quantile(self[c], q, method) for c in num.columns)\n\n qnames = [(_q._name, 0) for _q in quantiles]\n\n if isinstance(quantiles[0], Scalar):\n layer = {\n (keyname, 0): (type(meta), qnames, num.columns, None, meta.name)\n }\n graph = HighLevelGraph.from_collections(\n keyname, layer, dependencies=quantiles\n )\n divisions = (min(num.columns), max(num.columns))\n return Series(graph, keyname, meta, divisions)\n else:\n layer = {(keyname, 0): (methods.concat, qnames, 1)}\n graph = HighLevelGraph.from_collections(\n keyname, layer, dependencies=quantiles\n )\n return DataFrame(graph, keyname, meta, quantiles[0].divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.describe__Frame.describe.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.describe__Frame.describe.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2516, "end_line": 2599, "span_ids": ["_Frame.describe"], "tokens": 627}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def describe(\n self,\n split_every=False,\n percentiles=None,\n percentiles_method=\"default\",\n include=None,\n exclude=None,\n datetime_is_numeric=False,\n ):\n\n if PANDAS_GT_110:\n datetime_is_numeric_kwarg = {\"datetime_is_numeric\": datetime_is_numeric}\n elif datetime_is_numeric:\n raise NotImplementedError(\n \"datetime_is_numeric=True is only supported for pandas >= 1.1.0\"\n )\n else:\n datetime_is_numeric_kwarg = {}\n\n if self._meta.ndim == 1:\n\n meta = self._meta_nonempty.describe(\n percentiles=percentiles,\n include=include,\n exclude=exclude,\n **datetime_is_numeric_kwarg,\n )\n output = self._describe_1d(\n self, split_every, percentiles, percentiles_method, datetime_is_numeric\n )\n output._meta = meta\n return output\n elif (include is None) and (exclude is None):\n _include = [np.number, np.timedelta64]\n if datetime_is_numeric:\n _include.append(np.datetime64)\n data = self._meta.select_dtypes(include=_include)\n\n # when some numerics/timedeltas are found, by default keep them\n if len(data.columns) == 0:\n chosen_columns = self._meta.columns\n else:\n # check if there are timedelta, boolean, or datetime columns\n _include = [np.timedelta64, bool]\n if datetime_is_numeric:\n _include.append(np.datetime64)\n bools_and_times = self._meta.select_dtypes(include=_include)\n if len(bools_and_times.columns) == 0:\n return self._describe_numeric(\n self,\n split_every,\n percentiles,\n percentiles_method,\n )\n else:\n chosen_columns = data.columns\n elif include == \"all\":\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n chosen_columns = self._meta.columns\n else:\n chosen_columns = self._meta.select_dtypes(include=include, exclude=exclude)\n\n stats = [\n self._describe_1d(\n self[col_idx],\n split_every,\n percentiles,\n percentiles_method,\n datetime_is_numeric,\n )\n for col_idx in chosen_columns\n ]\n stats_names = [(s._name, 0) for s in stats]\n\n name = \"describe--\" + tokenize(self, split_every)\n layer = {(name, 0): (methods.describe_aggregate, stats_names)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)\n meta = self._meta_nonempty.describe(\n include=include, exclude=exclude, **datetime_is_numeric_kwarg\n )\n return new_dd_object(graph, name, meta, divisions=[None, None])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_1d__Frame._describe_1d.if_is_bool_dtype_data__me.else_.return.self__describe_nonnumeric": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_1d__Frame._describe_1d.if_is_bool_dtype_data__me.else_.return.self__describe_nonnumeric", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2601, "end_line": 2639, "span_ids": ["_Frame._describe_1d"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _describe_1d(\n self,\n data,\n split_every=False,\n percentiles=None,\n percentiles_method=\"default\",\n datetime_is_numeric=False,\n ):\n if is_bool_dtype(data._meta):\n return self._describe_nonnumeric_1d(\n data, split_every=split_every, datetime_is_numeric=datetime_is_numeric\n )\n elif is_numeric_dtype(data._meta):\n return self._describe_numeric(\n data,\n split_every=split_every,\n percentiles=percentiles,\n percentiles_method=percentiles_method,\n )\n elif is_timedelta64_dtype(data._meta):\n return self._describe_numeric(\n data.dropna(),\n split_every=split_every,\n percentiles=percentiles,\n percentiles_method=percentiles_method,\n is_timedelta_column=True,\n )\n elif is_datetime64_any_dtype(data._meta) and datetime_is_numeric:\n return self._describe_numeric(\n data.dropna(),\n split_every=split_every,\n percentiles=percentiles,\n percentiles_method=percentiles_method,\n is_datetime_column=True,\n )\n else:\n return self._describe_nonnumeric_1d(\n data, split_every=split_every, datetime_is_numeric=datetime_is_numeric\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_numeric__Frame._describe_numeric.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_numeric__Frame._describe_numeric.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2641, "end_line": 2694, "span_ids": ["_Frame._describe_numeric"], "tokens": 444}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _describe_numeric(\n self,\n data,\n split_every=False,\n percentiles=None,\n percentiles_method=\"default\",\n is_timedelta_column=False,\n is_datetime_column=False,\n ):\n from .numeric import to_numeric\n\n if is_timedelta_column or is_datetime_column:\n num = to_numeric(data)\n else:\n num = data._get_numeric_data()\n\n if data.ndim == 2 and len(num.columns) == 0:\n raise ValueError(\"DataFrame contains only non-numeric data.\")\n elif data.ndim == 1 and data.dtype == \"object\":\n raise ValueError(\"Cannot compute ``describe`` on object dtype.\")\n if percentiles is None:\n percentiles = [0.25, 0.5, 0.75]\n else:\n # always include the the 50%tle to calculate the median\n # unique removes duplicates and sorts quantiles\n percentiles = np.array(percentiles)\n percentiles = np.append(percentiles, 0.5)\n percentiles = np.unique(percentiles)\n percentiles = list(percentiles)\n stats = [\n num.count(split_every=split_every),\n num.mean(split_every=split_every),\n num.std(split_every=split_every),\n num.min(split_every=split_every),\n num.quantile(percentiles, method=percentiles_method),\n num.max(split_every=split_every),\n ]\n stats_names = [(s._name, 0) for s in stats]\n\n colname = data._meta.name if is_series_like(data._meta) else None\n\n name = \"describe-numeric--\" + tokenize(num, split_every)\n layer = {\n (name, 0): (\n methods.describe_numeric_aggregate,\n stats_names,\n colname,\n is_timedelta_column,\n is_datetime_column,\n )\n }\n graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)\n meta = num._meta_nonempty.describe()\n return new_dd_object(graph, name, meta, divisions=[None, None])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_nonnumeric_1d__Frame._describe_nonnumeric_1d.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._describe_nonnumeric_1d__Frame._describe_nonnumeric_1d.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2696, "end_line": 2738, "span_ids": ["_Frame._describe_nonnumeric_1d"], "tokens": 384}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _describe_nonnumeric_1d(\n self, data, split_every=False, datetime_is_numeric=False\n ):\n from .numeric import to_numeric\n\n vcounts = data.value_counts(split_every=split_every)\n count_nonzero = vcounts[vcounts != 0]\n count_unique = count_nonzero.size\n\n stats = [\n # nunique\n count_unique,\n # count\n data.count(split_every=split_every),\n # most common value\n vcounts._head(1, npartitions=1, compute=False, safe=False),\n ]\n\n if is_datetime64_any_dtype(data._meta) and not datetime_is_numeric:\n min_ts = to_numeric(data.dropna()).min(split_every=split_every)\n max_ts = to_numeric(data.dropna()).max(split_every=split_every)\n stats.extend([min_ts, max_ts])\n\n stats_names = [(s._name, 0) for s in stats]\n colname = data._meta.name\n\n name = \"describe-nonnumeric-1d--\" + tokenize(data, split_every)\n layer = {\n (name, 0): (methods.describe_nonnumeric_aggregate, stats_names, colname)\n }\n graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)\n\n if PANDAS_GT_110:\n datetime_is_numeric_kwarg = {\"datetime_is_numeric\": datetime_is_numeric}\n elif datetime_is_numeric:\n raise NotImplementedError(\n \"datetime_is_numeric=True is only supported for pandas >= 1.1.0\"\n )\n else:\n datetime_is_numeric_kwarg = {}\n\n meta = data._meta_nonempty.describe(**datetime_is_numeric_kwarg)\n return new_dd_object(graph, name, meta, divisions=[None, None])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._cum_agg__Frame._cum_agg.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._cum_agg__Frame._cum_agg.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2740, "end_line": 2792, "span_ids": ["_Frame._cum_agg"], "tokens": 470}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _cum_agg(\n self, op_name, chunk, aggregate, axis, skipna=True, chunk_kwargs=None, out=None\n ):\n \"\"\"Wrapper for cumulative operation\"\"\"\n\n axis = self._validate_axis(axis)\n\n if axis == 1:\n name = f\"{self._token_prefix}{op_name}(axis=1)\"\n result = self.map_partitions(chunk, token=name, **chunk_kwargs)\n return handle_out(out, result)\n else:\n # cumulate each partitions\n name1 = f\"{self._token_prefix}{op_name}-map\"\n cumpart = map_partitions(\n chunk, self, token=name1, meta=self, **chunk_kwargs\n )\n\n name2 = f\"{self._token_prefix}{op_name}-take-last\"\n cumlast = map_partitions(\n _take_last,\n cumpart,\n skipna,\n meta=pd.Series([], dtype=\"float\"),\n token=name2,\n )\n\n suffix = tokenize(self)\n name = f\"{self._token_prefix}{op_name}-{suffix}\"\n cname = f\"{self._token_prefix}{op_name}-cum-last-{suffix}\"\n\n # aggregate cumulated partisions and its previous last element\n layer = {}\n layer[(name, 0)] = (cumpart._name, 0)\n\n for i in range(1, self.npartitions):\n # store each cumulative step to graph to reduce computation\n if i == 1:\n layer[(cname, i)] = (cumlast._name, i - 1)\n else:\n # aggregate with previous cumulation results\n layer[(cname, i)] = (\n methods._cum_aggregate_apply,\n aggregate,\n (cname, i - 1),\n (cumlast._name, i - 1),\n )\n layer[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[cumpart, cumlast]\n )\n result = new_dd_object(graph, name, chunk(self._meta), self.divisions)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.cumsum__Frame.isna.if_hasattr_pd_isna_.else_.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.cumsum__Frame.isna.if_hasattr_pd_isna_.else_.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2451, "end_line": 2526, "span_ids": ["_Frame.where", "_Frame.isnull", "_Frame.cumsum", "_Frame.cummin", "_Frame.isna", "_Frame.cumprod", "_Frame.cummax", "_Frame.mask", "_Frame.notnull"], "tokens": 561}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def cumsum(self, axis=None, skipna=True, dtype=None, out=None):\n return self._cum_agg(\n \"cumsum\",\n chunk=M.cumsum,\n aggregate=methods.cumsum_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def cumprod(self, axis=None, skipna=True, dtype=None, out=None):\n return self._cum_agg(\n \"cumprod\",\n chunk=M.cumprod,\n aggregate=methods.cumprod_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def cummax(self, axis=None, skipna=True, out=None):\n return self._cum_agg(\n \"cummax\",\n chunk=M.cummax,\n aggregate=methods.cummax_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def cummin(self, axis=None, skipna=True, out=None):\n return self._cum_agg(\n \"cummin\",\n chunk=M.cummin,\n aggregate=methods.cummin_aggregate,\n axis=axis,\n skipna=skipna,\n chunk_kwargs=dict(axis=axis, skipna=skipna),\n out=out,\n )\n\n @derived_from(pd.DataFrame)\n def where(self, cond, other=np.nan):\n # cond and other may be dask instance,\n # passing map_partitions via keyword will not be aligned\n return map_partitions(M.where, self, cond, other, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def mask(self, cond, other=np.nan):\n return map_partitions(M.mask, self, cond, other, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def notnull(self):\n return self.map_partitions(M.notnull, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def isnull(self):\n return self.map_partitions(M.isnull, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def isna(self):\n if hasattr(pd, \"isna\"):\n return self.map_partitions(M.isna, enforce_metadata=False)\n else:\n raise NotImplementedError(\n \"Need more recent version of Pandas \"\n \"to support isna. \"\n \"Please use isnull instead.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.isin__Frame.isin.return.self_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.isin__Frame.isin.return.self_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2381, "end_line": 2396, "span_ids": ["_Frame.isin"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def isin(self, values):\n if is_dataframe_like(self._meta):\n # DataFrame.isin does weird alignment stuff\n bad_types = (_Frame, pd.Series, pd.DataFrame)\n else:\n bad_types = (_Frame,)\n if isinstance(values, bad_types):\n raise NotImplementedError(\"Passing a %r to `isin`\" % typename(type(values)))\n meta = self._meta_nonempty.isin(values)\n # We wrap values in a delayed for two reasons:\n # - avoid serializing data in every task\n # - avoid cost of traversal of large list in optimizations\n return self.map_partitions(\n M.isin, delayed(values), meta=meta, enforce_metadata=False\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.align__Frame.align.return.result1_result2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.align__Frame.align.return.result1_result2", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2435, "end_line": 2467, "span_ids": ["_Frame.align"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def align(self, other, join=\"outer\", axis=None, fill_value=None):\n meta1, meta2 = _emulate(\n M.align, self, other, join, axis=axis, fill_value=fill_value\n )\n aligned = self.map_partitions(\n M.align,\n other,\n join=join,\n axis=axis,\n fill_value=fill_value,\n enforce_metadata=False,\n )\n\n token = tokenize(self, other, join, axis, fill_value)\n\n name1 = \"align1-\" + token\n dsk1 = {\n (name1, i): (getitem, key, 0)\n for i, key in enumerate(aligned.__dask_keys__())\n }\n dsk1.update(aligned.dask)\n result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)\n\n name2 = \"align2-\" + token\n dsk2 = {\n (name2, i): (getitem, key, 1)\n for i, key in enumerate(aligned.__dask_keys__())\n }\n dsk2.update(aligned.dask)\n result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)\n\n return result1, result2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.combine__Frame.resample.return.Resampler_self_rule_clo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.combine__Frame.resample.return.Resampler_self_rule_clo", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2786, "end_line": 2805, "span_ids": ["_Frame.resample", "_Frame.combine", "_Frame._bind_operator_method", "_Frame.combine_first"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def combine(self, other, func, fill_value=None, overwrite=True):\n return self.map_partitions(\n M.combine, other, func, fill_value=fill_value, overwrite=overwrite\n )\n\n @derived_from(pd.DataFrame)\n def combine_first(self, other):\n return self.map_partitions(M.combine_first, other)\n\n @classmethod\n def _bind_operator_method(cls, name, op, original=pd.DataFrame):\n \"\"\"bind operator method like DataFrame.add to this class\"\"\"\n raise NotImplementedError\n\n @derived_from(pd.DataFrame)\n def resample(self, rule, closed=None, label=None):\n from .tseries.resample import Resampler\n\n return Resampler(self, rule, closed=closed, label=label)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.first__Frame.first.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.first__Frame.first.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3000, "end_line": 3032, "span_ids": ["_Frame.first"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def first(self, offset):\n # Let pandas error on bad args\n self._meta_nonempty.first(offset)\n\n if not self.known_divisions:\n raise ValueError(\"`first` is not implemented for unknown divisions\")\n\n offset = pd.tseries.frequencies.to_offset(offset)\n date = self.divisions[0] + offset\n end = self.loc._get_partitions(date)\n\n is_anchored = offset.is_anchored()\n\n include_right = is_anchored or not hasattr(offset, \"delta\")\n\n if end == self.npartitions - 1:\n divs = self.divisions\n else:\n divs = self.divisions[: end + 1] + (date,)\n\n name = \"first-\" + tokenize(self, offset)\n dsk = {(name, i): (self._name, i) for i in range(end)}\n dsk[(name, end)] = (\n methods.boundary_slice,\n (self._name, end),\n None,\n date,\n include_right,\n True,\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self, divs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.last__Frame.last.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.last__Frame.last.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3034, "end_line": 3065, "span_ids": ["_Frame.last"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def last(self, offset):\n # Let pandas error on bad args\n self._meta_nonempty.first(offset)\n\n if not self.known_divisions:\n raise ValueError(\"`last` is not implemented for unknown divisions\")\n\n offset = pd.tseries.frequencies.to_offset(offset)\n date = self.divisions[-1] - offset\n start = self.loc._get_partitions(date)\n\n if start == 0:\n divs = self.divisions\n else:\n divs = (date,) + self.divisions[start + 1 :]\n\n name = \"last-\" + tokenize(self, offset)\n dsk = {\n (name, i + 1): (self._name, j + 1)\n for i, j in enumerate(range(start, self.npartitions))\n }\n dsk[(name, 0)] = (\n methods.boundary_slice,\n (self._name, start),\n date,\n None,\n True,\n False,\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, self, divs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.nunique_approx__Frame.nunique_approx.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.nunique_approx__Frame.nunique_approx.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2562, "end_line": 2590, "span_ids": ["_Frame.nunique_approx"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def nunique_approx(self, split_every=None):\n \"\"\"Approximate number of unique rows.\n\n This method uses the HyperLogLog algorithm for cardinality\n estimation to compute the approximate number of unique rows.\n The approximate error is 0.406%.\n\n Parameters\n ----------\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used.\n Default is 8.\n\n Returns\n -------\n a float representing the approximate number of elements\n \"\"\"\n from . import hyperloglog # here to avoid circular import issues\n\n return aca(\n [self],\n chunk=hyperloglog.compute_hll_array,\n combine=hyperloglog.reduce_state,\n aggregate=hyperloglog.estimate_count,\n split_every=split_every,\n b=16,\n meta=float,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.values__Frame._validate_chunks.return.arr__chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.values__Frame._validate_chunks.return.arr__chunks", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3097, "end_line": 3128, "span_ids": ["_Frame.values", "_Frame._validate_chunks"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def values(self):\n \"\"\"Return a dask.array of the values of this dataframe\n\n Warning: This creates a dask.array without precise shape information.\n Operations that depend on shape information, like slicing or reshaping,\n will not work.\n \"\"\"\n return self.map_partitions(methods.values)\n\n def _validate_chunks(self, arr, lengths):\n from dask.array.core import normalize_chunks\n\n if isinstance(lengths, Sequence):\n lengths = tuple(lengths)\n\n if len(lengths) != self.npartitions:\n raise ValueError(\n \"The number of items in 'lengths' does not match the number of \"\n f\"partitions. {len(lengths)} != {self.npartitions}\"\n )\n\n if self.ndim == 1:\n chunks = normalize_chunks((lengths,))\n else:\n chunks = normalize_chunks((lengths, (len(self.columns),)))\n\n return chunks\n elif lengths is not None:\n raise ValueError(f\"Unexpected value for 'lengths': '{lengths}'\")\n\n return arr._chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._is_index_level_reference__raise_if_object_series.if_isinstance_x_Series_.raise_ValueError_s_no": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._is_index_level_reference__raise_if_object_series.if_isinstance_x_Series_.raise_ValueError_s_no", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2626, "end_line": 2657, "span_ids": ["_raise_if_object_series", "_Frame._contains_index_name", "_Frame._is_index_level_reference"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _is_index_level_reference(self, key):\n \"\"\"\n Test whether a key is an index level reference\n\n To be considered an index level reference, `key` must match the index name\n and must NOT match the name of any column (if a dataframe).\n \"\"\"\n return (\n self.index.name is not None\n and not is_dask_collection(key)\n and (np.isscalar(key) or isinstance(key, tuple))\n and key == self.index.name\n and key not in getattr(self, \"columns\", ())\n )\n\n def _contains_index_name(self, columns_or_index):\n \"\"\"\n Test whether the input contains a reference to the index of the DataFrame/Series\n \"\"\"\n if isinstance(columns_or_index, list):\n return any(self._is_index_level_reference(n) for n in columns_or_index)\n else:\n return self._is_index_level_reference(columns_or_index)\n\n\ndef _raise_if_object_series(x, funcname):\n \"\"\"\n Utility function to raise an error if an object column does not support\n a certain operation like `mean`.\n \"\"\"\n if isinstance(x, Series) and hasattr(x, \"dtype\") and x.dtype == object:\n raise ValueError(\"`%s` not supported with object series\" % funcname)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series_Series._repr_data.return._repr_data_series_self__m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series_Series._repr_data.return._repr_data_series_self__m", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3309, "end_line": 3413, "span_ids": ["Series.shape", "Series.axes", "Series._repr_data", "Series.name", "Series.name_3", "Series.dtype", "Series.__dir__", "Series.ndim", "Series.nbytes", "Series:11", "Series.__array_wrap__", "Series"], "tokens": 679}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n \"\"\"Parallel Pandas Series\n\n Do not use this class directly. Instead use functions like\n ``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.\n\n Parameters\n ----------\n\n dsk: dict\n The dask graph to compute this Series\n _name: str\n The key prefix that specifies which keys in the dask comprise this\n particular Series\n meta: pandas.Series\n An empty ``pandas.Series`` with names, dtypes, and index matching the\n expected output.\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n\n See Also\n --------\n dask.dataframe.DataFrame\n \"\"\"\n\n _partition_type = pd.Series\n _is_partition_type = staticmethod(is_series_like)\n _token_prefix = \"series-\"\n _accessors = set()\n\n def __array_wrap__(self, array, context=None):\n if isinstance(context, tuple) and len(context) > 0:\n if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():\n index = None\n else:\n index = context[1][0].index\n\n return pd.Series(array, index=index, name=self.name)\n\n @property\n def axes(self):\n return [self.index]\n\n @property\n def name(self):\n return self._meta.name\n\n @name.setter\n def name(self, name):\n self._meta.name = name\n renamed = _rename_dask(self, name)\n # update myself\n self.dask = renamed.dask\n self._name = renamed._name\n\n @property\n def ndim(self):\n \"\"\"Return dimensionality\"\"\"\n return 1\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple representing the dimensionality of a Series.\n\n The single element of the tuple is a Delayed result.\n\n Examples\n --------\n >>> series.shape # doctest: +SKIP\n (dd.Scalar,)\n \"\"\"\n return (self.size,)\n\n @property\n def dtype(self):\n \"\"\"Return data type\"\"\"\n return self._meta.dtype\n\n dt = CachedAccessor(\"dt\", DatetimeAccessor)\n\n cat = CachedAccessor(\"cat\", CategoricalAccessor)\n\n str = CachedAccessor(\"str\", StringAccessor)\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n # Remove the `cat` and `str` accessors if not available. We can't\n # decide this statically for the `dt` accessor, as it works on\n # datetime-like things as well.\n for accessor in [\"cat\", \"str\"]:\n if not hasattr(self._meta, accessor):\n o.remove(accessor)\n return list(o)\n\n @property\n def nbytes(self):\n \"\"\"Number of bytes\"\"\"\n return self.reduction(\n methods.nbytes, np.sum, token=\"nbytes\", meta=int, split_every=False\n )\n\n def _repr_data(self):\n return _repr_data_series(self._meta, self._repr_divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.__repr___Series.__repr__.return._Dask_klass_Structure": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.__repr___Series.__repr__.return._Dask_klass_Structure", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3278, "end_line": 3294, "span_ids": ["Series.__repr__"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n def __repr__(self):\n \"\"\"have to overwrite footer\"\"\"\n if self.name is not None:\n footer = f\"Name: {self.name}, dtype: {self.dtype}\"\n else:\n footer = f\"dtype: {self.dtype}\"\n\n return \"\"\"Dask {klass} Structure:\n{data}\n{footer}\nDask Name: {name}, {task} tasks\"\"\".format(\n klass=self.__class__.__name__,\n data=self.to_string(),\n footer=footer,\n name=key_split(self._name),\n task=len(self.dask),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.rename_Series.rename.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.rename_Series.rename.return.res", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3296, "end_line": 3369, "span_ids": ["Series.rename"], "tokens": 592}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n def rename(self, index=None, inplace=False, sorted_index=False):\n \"\"\"Alter Series index labels or name\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n Alternatively, change ``Series.name`` with a scalar value.\n\n Parameters\n ----------\n index : scalar, hashable sequence, dict-like or callable, optional\n If dict-like or callable, the transformation is applied to the\n index. Scalar or hashable sequence-like will alter the\n ``Series.name`` attribute.\n inplace : boolean, default False\n Whether to return a new Series or modify this one inplace.\n sorted_index : bool, default False\n If true, the output ``Series`` will have known divisions inferred\n from the input series and the transformation. Ignored for\n non-callable/dict-like ``index`` or when the input series has\n unknown divisions. Note that this may only be set to ``True`` if\n you know that the transformed index is monotonically increasing. Dask\n will check that transformed divisions are monotonic, but cannot\n check all the values between divisions, so incorrectly setting this\n can result in bugs.\n\n Returns\n -------\n renamed : Series\n\n See Also\n --------\n pandas.Series.rename\n \"\"\"\n from pandas.api.types import is_dict_like, is_list_like, is_scalar\n\n import dask.dataframe as dd\n\n if is_scalar(index) or (\n is_list_like(index)\n and not is_dict_like(index)\n and not isinstance(index, dd.Series)\n ):\n\n if inplace:\n warnings.warn(\n \"'inplace' argument for dask series will be removed in future versions\",\n PendingDeprecationWarning,\n )\n res = self if inplace else self.copy()\n res.name = index\n else:\n res = self.map_partitions(M.rename, index, enforce_metadata=False)\n if self.known_divisions:\n if sorted_index and (callable(index) or is_dict_like(index)):\n old = pd.Series(range(self.npartitions + 1), index=self.divisions)\n new = old.rename(index).index\n if not new.is_monotonic_increasing:\n msg = (\n \"sorted_index=True, but the transformed index \"\n \"isn't monotonic_increasing\"\n )\n raise ValueError(msg)\n res.divisions = tuple(methods.tolist(new))\n else:\n res = res.clear_divisions()\n if inplace:\n self.dask = res.dask\n self._name = res._name\n self.divisions = res.divisions\n self._meta = res._meta\n res = self\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.round_Series.quantile.return.quantile_self_q_method_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.round_Series.quantile.return.quantile_self_q_method_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2858, "end_line": 2880, "span_ids": ["Series.quantile", "Series.round", "Series.to_timestamp"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def round(self, decimals=0):\n return elemwise(M.round, self, decimals)\n\n @derived_from(pd.DataFrame)\n def to_timestamp(self, freq=None, how=\"start\", axis=0):\n df = elemwise(M.to_timestamp, self, freq, how, axis)\n df.divisions = tuple(pd.Index(self.divisions).to_timestamp())\n return df\n\n def quantile(self, q=0.5, method=\"default\"):\n \"\"\"Approximate quantiles of Series\n\n Parameters\n ----------\n q : list/array of floats, default 0.5 (50%)\n Iterable of numbers ranging from 0 to 1 for the desired quantiles\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest\n for floats and ints and fallback to the ``'dask'`` otherwise.\n \"\"\"\n return quantile(self, q, method=method)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.value_counts_Series.value_counts.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.value_counts_Series.value_counts.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3261, "end_line": 3303, "span_ids": ["Series.value_counts"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def value_counts(\n self,\n sort=None,\n ascending=False,\n dropna=None,\n normalize=False,\n split_every=None,\n split_out=1,\n ):\n \"\"\"\n Note: dropna is only supported in pandas >= 1.1.0, in which case it defaults to\n True.\n \"\"\"\n kwargs = {\"sort\": sort, \"ascending\": ascending}\n\n if dropna is not None:\n if not PANDAS_GT_110:\n raise NotImplementedError(\n \"dropna is not a valid argument for dask.dataframe.value_counts \"\n f\"if pandas < 1.1.0. Pandas version is {pd.__version__}\"\n )\n kwargs[\"dropna\"] = dropna\n\n aggregate_kwargs = {\"normalize\": normalize}\n if split_out > 1:\n aggregate_kwargs[\"total_length\"] = (\n len(self) if dropna is False else len(self.dropna())\n )\n\n return aca(\n self,\n chunk=M.value_counts,\n aggregate=methods.value_counts_aggregate,\n combine=methods.value_counts_combine,\n meta=self._meta.value_counts(normalize=normalize),\n token=\"value-counts\",\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n aggregate_kwargs=aggregate_kwargs,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.map_Series.map.return.type_self_graph_name_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.map_Series.map.return.type_self_graph_name_m", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3573, "end_line": 3602, "span_ids": ["Series.map"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @insert_meta_param_description(pad=12)\n @derived_from(pd.Series)\n def map(self, arg, na_action=None, meta=no_default):\n if is_series_like(arg) and is_dask_collection(arg):\n return series_map(self, arg)\n if not (\n isinstance(arg, dict)\n or callable(arg)\n or is_series_like(arg)\n and not is_dask_collection(arg)\n ):\n raise TypeError(\n f\"arg must be pandas.Series, dict or callable. Got {type(arg)}\"\n )\n name = \"map-\" + tokenize(self, arg, na_action)\n dsk = {\n (name, i): (M.map, k, arg, na_action)\n for i, k in enumerate(self.__dask_keys__())\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n if meta is no_default:\n meta = _emulate(M.map, self, arg, na_action=na_action, udf=True)\n else:\n meta = make_meta(\n meta,\n index=getattr(make_meta(self), \"index\", None),\n parent_meta=self._meta,\n )\n\n return type(self)(graph, name, meta, self.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.dropna_Series.to_string.return.self__repr_data_to_stri": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.dropna_Series.to_string.return.self__repr_data_to_stri", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3604, "end_line": 3665, "span_ids": ["Series.clip", "Series.combine", "Series.to_bag", "Series.align", "Series.squeeze", "Series.to_string", "Series.to_frame", "Series.between", "Series.combine_first", "Series.clip_upper", "Series.clip_lower", "Series.dropna"], "tokens": 475}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def dropna(self):\n return self.map_partitions(M.dropna, enforce_metadata=False)\n\n @derived_from(pd.Series)\n def between(self, left, right, inclusive=\"both\"):\n return self.map_partitions(\n M.between, left=left, right=right, inclusive=inclusive\n )\n\n @derived_from(pd.Series)\n def clip(self, lower=None, upper=None, out=None):\n if out is not None:\n raise ValueError(\"'out' must be None\")\n # np.clip may pass out\n return self.map_partitions(\n M.clip, lower=lower, upper=upper, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def clip_lower(self, threshold):\n return self.map_partitions(\n M.clip_lower, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def clip_upper(self, threshold):\n return self.map_partitions(\n M.clip_upper, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.Series)\n def align(self, other, join=\"outer\", axis=None, fill_value=None):\n return super().align(other, join=join, axis=axis, fill_value=fill_value)\n\n @derived_from(pd.Series)\n def combine(self, other, func, fill_value=None):\n return self.map_partitions(M.combine, other, func, fill_value=fill_value)\n\n @derived_from(pd.Series)\n def squeeze(self):\n return self\n\n @derived_from(pd.Series)\n def combine_first(self, other):\n return self.map_partitions(M.combine_first, other)\n\n def to_bag(self, index=False, format=\"tuple\"):\n \"\"\"Create a Dask Bag from a Series\"\"\"\n from .io import to_bag\n\n return to_bag(self, index, format=format)\n\n @derived_from(pd.Series)\n def to_frame(self, name=None):\n args = [] if name is None else [name]\n return self.map_partitions(M.to_frame, *args, meta=self._meta.to_frame(*args))\n\n @derived_from(pd.Series)\n def to_string(self, max_rows=5):\n # option_context doesn't affect\n return self._repr_data().to_string(max_rows=max_rows)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_operator_method_Series._bind_operator_method.setattr_cls_name_derive": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_operator_method_Series._bind_operator_method.setattr_cls_name_derive", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3461, "end_line": 3475, "span_ids": ["Series._bind_operator_method"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @classmethod\n def _bind_operator_method(cls, name, op, original=pd.Series):\n \"\"\"bind operator method like Series.add to this class\"\"\"\n\n def meth(self, other, level=None, fill_value=None, axis=0):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n axis = self._validate_axis(axis)\n meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)\n return map_partitions(\n op, self, other, meta=meta, axis=axis, fill_value=fill_value\n )\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_comparison_method_Series._bind_comparison_method.setattr_cls_name_derive": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._bind_comparison_method_Series._bind_comparison_method.setattr_cls_name_derive", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3477, "end_line": 3492, "span_ids": ["Series._bind_comparison_method"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @classmethod\n def _bind_comparison_method(cls, name, comparison, original=pd.Series):\n \"\"\"bind comparison method like Series.eq to this class\"\"\"\n\n def meth(self, other, level=None, fill_value=None, axis=0):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n axis = self._validate_axis(axis)\n if fill_value is None:\n return elemwise(comparison, self, other, axis=axis)\n else:\n op = partial(comparison, fill_value=fill_value)\n return elemwise(op, self, other, axis=axis)\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.apply_Series.apply.return.map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.apply_Series.apply.return.map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3150, "end_line": 3219, "span_ids": ["Series.apply"], "tokens": 548}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @insert_meta_param_description(pad=12)\n def apply(self, func, convert_dtype=True, meta=no_default, args=(), **kwds):\n \"\"\"Parallel version of pandas.Series.apply\n\n Parameters\n ----------\n func : function\n Function to apply\n convert_dtype : boolean, default True\n Try to find better dtype for elementwise function results.\n If False, leave as dtype=object.\n $META\n args : tuple\n Positional arguments to pass to function in addition to the value.\n\n Additional keyword arguments will be passed as keywords to the function.\n\n Returns\n -------\n applied : Series or DataFrame if func returns a Series.\n\n Examples\n --------\n >>> import dask.dataframe as dd\n >>> s = pd.Series(range(5), name='x')\n >>> ds = dd.from_pandas(s, npartitions=2)\n\n Apply a function elementwise across the Series, passing in extra\n arguments in ``args`` and ``kwargs``:\n\n >>> def myadd(x, a, b=1):\n ... return x + a + b\n >>> res = ds.apply(myadd, args=(2,), b=1.5) # doctest: +SKIP\n\n By default, dask tries to infer the output metadata by running your\n provided function on some fake data. This works well in many cases, but\n can sometimes be expensive, or even fail. To avoid this, you can\n manually specify the output metadata with the ``meta`` keyword. This\n can be specified in many forms, for more information see\n ``dask.dataframe.utils.make_meta``.\n\n Here we specify the output is a Series with name ``'x'``, and dtype\n ``float64``:\n\n >>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))\n\n In the case where the metadata doesn't change, you can also pass in\n the object itself directly:\n\n >>> res = ds.apply(lambda x: x + 1, meta=ds)\n\n See Also\n --------\n dask.Series.map_partitions\n \"\"\"\n if meta is no_default:\n meta = _emulate(\n M.apply,\n self._meta_nonempty,\n func,\n convert_dtype=convert_dtype,\n args=args,\n udf=True,\n **kwds,\n )\n warnings.warn(meta_warning(meta))\n\n return map_partitions(\n M.apply, self, func, convert_dtype, args, meta=meta, **kwds\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.cov_Series.corr.return.cov_corr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.cov_Series.corr.return.cov_corr_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3221, "end_line": 3241, "span_ids": ["Series.corr", "Series.cov"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def cov(self, other, min_periods=None, split_every=False):\n from .multi import concat\n\n if not isinstance(other, Series):\n raise TypeError(\"other must be a dask.dataframe.Series\")\n df = concat([self, other], axis=1)\n return cov_corr(df, min_periods, scalar=True, split_every=split_every)\n\n @derived_from(pd.Series)\n def corr(self, other, method=\"pearson\", min_periods=None, split_every=False):\n from .multi import concat\n\n if not isinstance(other, Series):\n raise TypeError(\"other must be a dask.dataframe.Series\")\n if method != \"pearson\":\n raise NotImplementedError(\"Only Pearson correlation has been implemented\")\n df = concat([self, other], axis=1)\n return cov_corr(\n df, min_periods, corr=True, scalar=True, split_every=split_every\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index_Index.__array_wrap__.return.pd_Index_array_name_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index_Index.__array_wrap__.return.pd_Index_array_name_self", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3849, "end_line": 3919, "span_ids": ["Index.index", "Index.__getattr__", "Index.__dir__", "Index.__array_wrap__", "Index"], "tokens": 414}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n _partition_type = pd.Index\n _is_partition_type = staticmethod(is_index_like)\n _token_prefix = \"index-\"\n _accessors = set()\n\n _dt_attributes = {\n \"nanosecond\",\n \"microsecond\",\n \"millisecond\",\n \"dayofyear\",\n \"minute\",\n \"hour\",\n \"day\",\n \"dayofweek\",\n \"second\",\n \"week\",\n \"weekday\",\n \"weekofyear\",\n \"month\",\n \"quarter\",\n \"year\",\n }\n\n _cat_attributes = {\n \"known\",\n \"as_known\",\n \"as_unknown\",\n \"add_categories\",\n \"categories\",\n \"remove_categories\",\n \"reorder_categories\",\n \"as_ordered\",\n \"codes\",\n \"remove_unused_categories\",\n \"set_categories\",\n \"as_unordered\",\n \"ordered\",\n \"rename_categories\",\n }\n\n _monotonic_attributes = {\n \"is_monotonic\",\n \"is_monotonic_increasing\",\n \"is_monotonic_decreasing\",\n }\n\n def __getattr__(self, key):\n if is_categorical_dtype(self.dtype) and key in self._cat_attributes:\n return getattr(self.cat, key)\n elif key in self._dt_attributes:\n return getattr(self.dt, key)\n elif key in self._monotonic_attributes:\n return getattr(self, key)\n raise AttributeError(\"'Index' object has no attribute %r\" % key)\n\n def __dir__(self):\n out = super().__dir__()\n out.extend(self._dt_attributes)\n if is_categorical_dtype(self.dtype):\n out.extend(self._cat_attributes)\n return out\n\n @property\n def index(self):\n raise AttributeError(\n f\"{self.__class__.__name__!r} object has no attribute 'index'\"\n )\n\n def __array_wrap__(self, array, context=None):\n return pd.Index(array, name=self.name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.head_Index.head.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.head_Index.head.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3331, "end_line": 3344, "span_ids": ["Index.head"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n def head(self, n=5, compute=True):\n \"\"\"First n items of the Index.\n\n Caveat, this only checks the first partition.\n \"\"\"\n name = \"head-%d-%s\" % (n, self._name)\n dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n\n result = new_dd_object(graph, name, self._meta, self.divisions[:2])\n\n if compute:\n result = result.compute()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.max_Index.count.return.self_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.max_Index.count.return.self_reduction_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3346, "end_line": 3371, "span_ids": ["Index.max", "Index.min", "Index.count"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n @derived_from(pd.Index)\n def max(self, split_every=False):\n return self.reduction(\n M.max,\n meta=self._meta_nonempty.max(),\n token=self._token_prefix + \"max\",\n split_every=split_every,\n )\n\n @derived_from(pd.Index)\n def min(self, split_every=False):\n return self.reduction(\n M.min,\n meta=self._meta_nonempty.min(),\n token=self._token_prefix + \"min\",\n split_every=split_every,\n )\n\n def count(self, split_every=False):\n return self.reduction(\n methods.index_count,\n np.sum,\n token=\"index-count\",\n meta=int,\n split_every=split_every,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.shift_Index.shift.return.maybe_shift_divisions_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.shift_Index.shift.return.maybe_shift_divisions_out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3373, "end_line": 3395, "span_ids": ["Index.shift"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n @derived_from(pd.Index)\n def shift(self, periods=1, freq=None):\n if isinstance(self._meta, pd.PeriodIndex):\n if freq is not None:\n raise ValueError(\"PeriodIndex doesn't accept `freq` argument\")\n meta = self._meta_nonempty.shift(periods)\n out = self.map_partitions(\n M.shift, periods, meta=meta, token=\"shift\", transform_divisions=False\n )\n else:\n # Pandas will raise for other index types that don't implement shift\n meta = self._meta_nonempty.shift(periods, freq=freq)\n out = self.map_partitions(\n M.shift,\n periods,\n token=\"shift\",\n meta=meta,\n freq=freq,\n transform_divisions=False,\n )\n if freq is None:\n freq = meta.freq\n return maybe_shift_divisions(out, periods, freq=freq)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.iloc_DataFrame.iloc.return._iLocIndexer_self_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.iloc_DataFrame.iloc.return._iLocIndexer_self_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3486, "end_line": 3502, "span_ids": ["DataFrame.iloc"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @property\n def iloc(self):\n \"\"\"Purely integer-location based indexing for selection by position.\n\n Only indexing the column positions is supported. Trying to select\n row positions will raise a ValueError.\n\n See :ref:`dataframe.indexing` for more.\n\n Examples\n --------\n >>> df.iloc[:, [2, 0, 1]] # doctest: +SKIP\n \"\"\"\n from .indexing import _iLocIndexer\n\n # For dataframes with unique column names, this will be transformed into a __getitem__ call\n return _iLocIndexer(self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__len___DataFrame.empty.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__len___DataFrame.empty.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3504, "end_line": 3522, "span_ids": ["DataFrame.__contains__", "DataFrame.__len__", "DataFrame.empty"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def __len__(self):\n try:\n s = self.iloc[:, 0]\n except IndexError:\n return super().__len__()\n else:\n return len(s)\n\n def __contains__(self, key):\n return key in self._meta\n\n @property\n def empty(self):\n raise NotImplementedError(\n \"Checking whether a Dask DataFrame has any rows may be expensive. \"\n \"However, checking the number of columns is fast. \"\n \"Depending on which of these results you need, use either \"\n \"`len(df.index) == 0` or `len(df.columns) == 0`\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__getitem___DataFrame.__getitem__.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__getitem___DataFrame.__getitem__.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3894, "end_line": 3952, "span_ids": ["DataFrame.__getitem__"], "tokens": 594}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def __getitem__(self, key):\n name = \"getitem-%s\" % tokenize(self, key)\n if np.isscalar(key) or isinstance(key, (tuple, str)):\n\n if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):\n if key not in self._meta.columns:\n if PANDAS_GT_120:\n warnings.warn(\n \"Indexing a DataFrame with a datetimelike index using a single \"\n \"string to slice the rows, like `frame[string]`, is deprecated \"\n \"and will be removed in a future version. Use `frame.loc[string]` \"\n \"instead.\",\n FutureWarning,\n )\n return self.loc[key]\n\n # error is raised from pandas\n meta = self._meta[_extract_meta(key)]\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, meta, self.divisions)\n elif isinstance(key, slice):\n from pandas.api.types import is_float_dtype\n\n is_integer_slice = any(\n isinstance(i, Integral) for i in (key.start, key.step, key.stop)\n )\n # Slicing with integer labels is always iloc based except for a\n # float indexer for some reason\n if is_integer_slice and not is_float_dtype(self.index.dtype):\n # NOTE: this always fails currently, as iloc is mostly\n # unsupported, but we call it anyway here for future-proofing\n # and error-attribution purposes\n return self.iloc[key]\n else:\n return self.loc[key]\n\n if isinstance(key, (np.ndarray, list)) or (\n not is_dask_collection(key) and (is_series_like(key) or is_index_like(key))\n ):\n # error is raised from pandas\n meta = self._meta[_extract_meta(key)]\n\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return new_dd_object(graph, name, meta, self.divisions)\n if isinstance(key, Series):\n # do not perform dummy calculation, as columns will not be changed.\n if self.divisions != key.divisions:\n from .multi import _maybe_align_partitions\n\n self, key = _maybe_align_partitions([self, key])\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])\n return new_dd_object(graph, name, self, self.divisions)\n if isinstance(key, DataFrame):\n return self.where(key, np.nan)\n\n raise NotImplementedError(key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__setitem___DataFrame.__setitem__.self.divisions.df_divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__setitem___DataFrame.__setitem__.self.divisions.df_divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4223, "end_line": 4244, "span_ids": ["DataFrame.__setitem__"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def __setitem__(self, key, value):\n if isinstance(key, (tuple, list)) and isinstance(value, DataFrame):\n df = self.assign(**{k: value[c] for k, c in zip(key, value.columns)})\n\n elif isinstance(key, pd.Index) and not isinstance(value, DataFrame):\n key = list(key)\n df = self.assign(**{k: value for k in key})\n elif (\n is_dataframe_like(key)\n or is_series_like(key)\n or isinstance(key, (DataFrame, Series))\n ):\n df = self.where(~key, value)\n elif not isinstance(key, str):\n raise NotImplementedError(f\"Item assignment with {type(key)} not supported\")\n else:\n df = self.assign(**{key: value})\n\n self.dask = df.dask\n self._name = df._name\n self._meta = df._meta\n self.divisions = df.divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__delitem___DataFrame.ndim.return.2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.__delitem___DataFrame.ndim.return.2", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3973, "end_line": 4012, "span_ids": ["DataFrame.__dir__", "DataFrame.__delitem__", "DataFrame._ipython_key_completions_", "DataFrame.__setattr__", "DataFrame.ndim", "DataFrame.__iter__", "DataFrame.__getattr__"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def __delitem__(self, key):\n result = self.drop([key], axis=1)\n self.dask = result.dask\n self._name = result._name\n self._meta = result._meta\n\n def __setattr__(self, key, value):\n try:\n columns = object.__getattribute__(self, \"_meta\").columns\n except AttributeError:\n columns = ()\n\n # exclude protected attributes from setitem\n if key in columns and key not in [\"divisions\", \"dask\", \"_name\", \"_meta\"]:\n self[key] = value\n else:\n object.__setattr__(self, key, value)\n\n def __getattr__(self, key):\n if key in self.columns:\n return self[key]\n else:\n raise AttributeError(\"'DataFrame' object has no attribute %r\" % key)\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n o.update(c for c in self.columns if (isinstance(c, str) and c.isidentifier()))\n return list(o)\n\n def __iter__(self):\n return iter(self._meta)\n\n def _ipython_key_completions_(self):\n return methods.tolist(self.columns)\n\n @property\n def ndim(self):\n \"\"\"Return dimensionality\"\"\"\n return 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.shape_DataFrame.shape.return._row_size_col_size_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.shape_DataFrame.shape.return._row_size_col_size_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3629, "end_line": 3646, "span_ids": ["DataFrame.shape"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n The number of rows is a Delayed result. The number of columns\n is a concrete integer.\n\n Examples\n --------\n >>> df.size # doctest: +SKIP\n (Delayed('int-07f06075-5ecc-4d77-817e-63c69a9188a8'), 2)\n \"\"\"\n col_size = len(self.columns)\n if col_size == 0:\n return (self.index.shape[0], 0)\n row_size = delayed(int)(self.size / col_size)\n return (row_size, col_size)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.dtypes_DataFrame.select_dtypes.return.self_list_cs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.dtypes_DataFrame.select_dtypes.return.self_list_cs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4033, "end_line": 4049, "span_ids": ["DataFrame.get_ftype_counts", "DataFrame.get_dtype_counts", "DataFrame.select_dtypes", "DataFrame.dtypes"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @property\n def dtypes(self):\n \"\"\"Return data types\"\"\"\n return self._meta.dtypes\n\n @derived_from(pd.DataFrame)\n def get_dtype_counts(self):\n return self._meta.get_dtype_counts()\n\n @derived_from(pd.DataFrame)\n def get_ftype_counts(self):\n return self._meta.get_ftype_counts()\n\n @derived_from(pd.DataFrame)\n def select_dtypes(self, include=None, exclude=None):\n cs = self._meta.select_dtypes(include=include, exclude=exclude).columns\n return self[list(cs)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index_DataFrame.set_index._Set_the_DataFrame_inde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index_DataFrame.set_index._Set_the_DataFrame_inde", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4388, "end_line": 4498, "span_ids": ["DataFrame.set_index"], "tokens": 1376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def set_index(\n self,\n other,\n drop=True,\n sorted=False,\n npartitions=None,\n divisions=None,\n inplace=False,\n **kwargs,\n ):\n \"\"\"Set the DataFrame index (row labels) using an existing column.\n\n This realigns the dataset to be sorted by a new column. This can have a\n significant impact on performance, because joins, groupbys, lookups, etc.\n are all much faster on that column. However, this performance increase\n comes with a cost, sorting a parallel dataset requires expensive shuffles.\n Often we ``set_index`` once directly after data ingest and filtering and\n then perform many cheap computations off of the sorted dataset.\n\n This function operates exactly like ``pandas.set_index`` except with\n different performance costs (dask dataframe ``set_index`` is much more expensive).\n Under normal operation this function does an initial pass over the index column\n to compute approximate quantiles to serve as future divisions. It then passes\n over the data a second time, splitting up each input partition into several\n pieces and sharing those pieces to all of the output partitions now in\n sorted order.\n\n In some cases we can alleviate those costs, for example if your dataset is\n sorted already then we can avoid making many small pieces or if you know\n good values to split the new index column then we can avoid the initial\n pass over the data. For example if your new index is a datetime index and\n your data is already sorted by day then this entire operation can be done\n for free. You can control these options with the following parameters.\n\n Parameters\n ----------\n other: string or Dask Series\n drop: boolean, default True\n Delete column to be used as the new index.\n sorted: bool, optional\n If the index column is already sorted in increasing order.\n Defaults to False\n npartitions: int, None, or 'auto'\n The ideal number of output partitions. If None, use the same as\n the input. If 'auto' then decide by memory use.\n Only used when ``divisions`` is not given. If ``divisions`` is given,\n the number of output partitions will be ``len(divisions) - 1``.\n divisions: list, optional\n The \"dividing lines\" used to split the new index into partitions.\n For ``divisions=[0, 10, 50, 100]``, there would be three output partitions,\n where the new index contained [0, 10), [10, 50), and [50, 100), respectively.\n See https://docs.dask.org/en/latest/dataframe-design.html#partitions.\n If not given (default), good divisions are calculated by immediately computing\n the data and looking at the distribution of its values. For large datasets,\n this can be expensive.\n Note that if ``sorted=True``, specified divisions are assumed to match\n the existing partitions in the data; if this is untrue you should\n leave divisions empty and call ``repartition`` after ``set_index``.\n inplace: bool, optional\n Modifying the DataFrame in place is not supported by Dask.\n Defaults to False.\n shuffle: string, 'disk' or 'tasks', optional\n Either ``'disk'`` for single-node operation or ``'tasks'`` for\n distributed operation. Will be inferred by your current scheduler.\n compute: bool, default False\n Whether or not to trigger an immediate computation. Defaults to False.\n Note, that even if you set ``compute=False``, an immediate computation\n will still be triggered if ``divisions`` is ``None``.\n partition_size: int, optional\n Desired size of each partitions in bytes.\n Only used when ``npartitions='auto'``\n\n Examples\n --------\n >>> import dask\n >>> ddf = dask.datasets.timeseries(start=\"2021-01-01\", end=\"2021-01-07\", freq=\"1H\").reset_index()\n >>> ddf2 = ddf.set_index(\"x\")\n >>> ddf2 = ddf.set_index(ddf.x)\n >>> ddf2 = ddf.set_index(ddf.timestamp, sorted=True)\n\n A common case is when we have a datetime column that we know to be\n sorted and is cleanly divided by day. We can set this index for free\n by specifying both that the column is pre-sorted and the particular\n divisions along which is is separated\n\n >>> import pandas as pd\n >>> divisions = pd.date_range(start=\"2021-01-01\", end=\"2021-01-07\", freq='1D')\n >>> divisions\n DatetimeIndex(['2021-01-01', '2021-01-02', '2021-01-03', '2021-01-04',\n '2021-01-05', '2021-01-06', '2021-01-07'],\n dtype='datetime64[ns]', freq='D')\n\n Note that ``len(divisons)`` is equal to ``npartitions + 1``. This is because ``divisions``\n represents the upper and lower bounds of each partition. The first item is the\n lower bound of the first partition, the second item is the lower bound of the\n second partition and the upper bound of the first partition, and so on.\n The second-to-last item is the lower bound of the last partition, and the last\n (extra) item is the upper bound of the last partition.\n\n >>> ddf2 = ddf.set_index(\"timestamp\", sorted=True, divisions=divisions.tolist())\n\n If you'll be running `set_index` on the same (or similar) datasets repeatedly,\n you could save time by letting Dask calculate good divisions once, then copy-pasting\n them to reuse. This is especially helpful running in a Jupyter notebook:\n\n >>> ddf2 = ddf.set_index(\"name\") # slow, calculates data distribution\n >>> ddf2.divisions # doctest: +SKIP\n [\"Alice\", \"Laura\", \"Ursula\", \"Zelda\"]\n >>> # ^ Now copy-paste this and edit the line above to:\n >>> # ddf2 = ddf.set_index(\"name\", divisions=[\"Alice\", \"Laura\", \"Ursula\", \"Zelda\"])\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index.if_inplace__DataFrame.set_index.if_pre_sorted_.else_.return.set_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.set_index.if_inplace__DataFrame.set_index.if_pre_sorted_.else_.return.set_index_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3746, "end_line": 3770, "span_ids": ["DataFrame.set_index"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def set_index(\n self,\n other,\n drop=True,\n sorted=False,\n npartitions=None,\n divisions=None,\n inplace=False,\n **kwargs,\n ):\n if inplace:\n raise NotImplementedError(\"The inplace= keyword is not supported\")\n pre_sorted = sorted\n del sorted\n\n if divisions is not None:\n check_divisions(divisions)\n\n if pre_sorted:\n from .shuffle import set_sorted_index\n\n return set_sorted_index(\n self, other, drop=drop, divisions=divisions, **kwargs\n )\n else:\n from .shuffle import set_index\n\n return set_index(\n self,\n other,\n drop=drop,\n npartitions=npartitions,\n divisions=divisions,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.pop_DataFrame.categorize.return.categorize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.pop_DataFrame.categorize.return.categorize_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3914, "end_line": 3968, "span_ids": ["DataFrame.nlargest", "DataFrame.pop", "DataFrame.categorize", "DataFrame.groupby", "DataFrame.nsmallest"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def pop(self, item):\n out = self[item]\n del self[item]\n return out\n\n @derived_from(pd.DataFrame)\n def nlargest(self, n=5, columns=None, split_every=None):\n token = \"dataframe-nlargest\"\n return aca(\n self,\n chunk=M.nlargest,\n aggregate=M.nlargest,\n meta=self._meta,\n token=token,\n split_every=split_every,\n n=n,\n columns=columns,\n )\n\n @derived_from(pd.DataFrame)\n def nsmallest(self, n=5, columns=None, split_every=None):\n token = \"dataframe-nsmallest\"\n return aca(\n self,\n chunk=M.nsmallest,\n aggregate=M.nsmallest,\n meta=self._meta,\n token=token,\n split_every=split_every,\n n=n,\n columns=columns,\n )\n\n @derived_from(pd.DataFrame)\n def groupby(\n self, by=None, group_keys=True, sort=None, observed=None, dropna=None, **kwargs\n ):\n from dask.dataframe.groupby import DataFrameGroupBy\n\n return DataFrameGroupBy(\n self,\n by=by,\n group_keys=group_keys,\n sort=sort,\n observed=observed,\n dropna=dropna,\n **kwargs,\n )\n\n @wraps(categorize)\n def categorize(self, columns=None, index=None, split_every=None, **kwargs):\n return categorize(\n self, columns=columns, index=index, split_every=split_every, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.rename_DataFrame.query.return.self_map_partitions_M_que": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.rename_DataFrame.query.return.self_map_partitions_M_que", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4620, "end_line": 4647, "span_ids": ["DataFrame.rename", "DataFrame.query"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame, ua_args=[\"index\"])\n def rename(self, index=None, columns=None):\n if index is not None:\n raise ValueError(\"Cannot rename index.\")\n\n # *args here is index, columns but columns arg is already used\n return self.map_partitions(M.rename, None, columns=columns)\n\n def query(self, expr, **kwargs):\n \"\"\"Filter dataframe with complex expression\n\n Blocked version of pd.DataFrame.query\n\n This is like the sequential version except that this will also happen\n in many threads. This may conflict with ``numexpr`` which will use\n multiple threads itself. We recommend that you set ``numexpr`` to use a\n single thread:\n\n .. code-block:: python\n\n import numexpr\n numexpr.set_num_threads(1)\n\n See also\n --------\n pandas.DataFrame.query\n \"\"\"\n return self.map_partitions(M.query, expr, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.merge_DataFrame.merge.return.merge_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.merge_DataFrame.merge.return.merge_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4378, "end_line": 4497, "span_ids": ["DataFrame.merge"], "tokens": 1149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def merge(\n self,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n suffixes=(\"_x\", \"_y\"),\n indicator=False,\n npartitions=None,\n shuffle=None,\n broadcast=None,\n ):\n \"\"\"Merge the DataFrame with another DataFrame\n\n This will merge the two datasets, either on the indices, a certain column\n in each dataset or the index in one dataset and the column in another.\n\n Parameters\n ----------\n right: dask.dataframe.DataFrame\n how : {'left', 'right', 'outer', 'inner'}, default: 'inner'\n How to handle the operation of the two objects:\n\n - left: use calling frame's index (or column if on is specified)\n - right: use other frame's index\n - outer: form union of calling frame's index (or column if on is\n specified) with other frame's index, and sort it\n lexicographically\n - inner: form intersection of calling frame's index (or column if\n on is specified) with other frame's index, preserving the order\n of the calling's one\n\n on : label or list\n Column or index level names to join on. These must be found in both\n DataFrames. If on is None and not merging on indexes then this\n defaults to the intersection of the columns in both DataFrames.\n left_on : label or list, or array-like\n Column to join on in the left DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n right_on : label or list, or array-like\n Column to join on in the right DataFrame. Other than in pandas\n arrays and lists are only support if their length is 1.\n left_index : boolean, default False\n Use the index from the left DataFrame as the join key.\n right_index : boolean, default False\n Use the index from the right DataFrame as the join key.\n suffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and\n right side, respectively\n indicator : boolean or string, default False\n If True, adds a column to output DataFrame called \"_merge\" with\n information on the source of each row. If string, column with\n information on source of each row will be added to output DataFrame,\n and column will be named value of string. Information column is\n Categorical-type and takes on a value of \"left_only\" for observations\n whose merge key only appears in `left` DataFrame, \"right_only\" for\n observations whose merge key only appears in `right` DataFrame,\n and \"both\" if the observation\u2019s merge key is found in both.\n npartitions: int or None, optional\n The ideal number of output partitions. This is only utilised when\n performing a hash_join (merging on columns only). If ``None`` then\n ``npartitions = max(lhs.npartitions, rhs.npartitions)``.\n Default is ``None``.\n shuffle: {'disk', 'tasks'}, optional\n Either ``'disk'`` for single-node operation or ``'tasks'`` for\n distributed operation. Will be inferred by your current scheduler.\n broadcast: boolean or float, optional\n Whether to use a broadcast-based join in lieu of a shuffle-based\n join for supported cases. By default, a simple heuristic will be\n used to select the underlying algorithm. If a floating-point value\n is specified, that number will be used as the ``broadcast_bias``\n within the simple heuristic (a large number makes Dask more likely\n to choose the ``broacast_join`` code path). See ``broadcast_join``\n for more information.\n\n Notes\n -----\n\n There are three ways to join dataframes:\n\n 1. Joining on indices. In this case the divisions are\n aligned using the function ``dask.dataframe.multi.align_partitions``.\n Afterwards, each partition is merged with the pandas merge function.\n\n 2. Joining one on index and one on column. In this case the divisions of\n dataframe merged by index (:math:`d_i`) are used to divide the column\n merged dataframe (:math:`d_c`) one using\n ``dask.dataframe.multi.rearrange_by_divisions``. In this case the\n merged dataframe (:math:`d_m`) has the exact same divisions\n as (:math:`d_i`). This can lead to issues if you merge multiple rows from\n (:math:`d_c`) to one row in (:math:`d_i`).\n\n 3. Joining both on columns. In this case a hash join is performed using\n ``dask.dataframe.multi.hash_join``.\n\n \"\"\"\n\n if not is_dataframe_like(right):\n raise ValueError(\"right must be DataFrame\")\n\n from .multi import merge\n\n return merge(\n self,\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n npartitions=npartitions,\n indicator=indicator,\n shuffle=shuffle,\n broadcast=broadcast,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.join_DataFrame.join.return.merge_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.join_DataFrame.join.return.merge_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5052, "end_line": 5112, "span_ids": ["DataFrame.join"], "tokens": 378}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def join(\n self,\n other,\n on=None,\n how=\"left\",\n lsuffix=\"\",\n rsuffix=\"\",\n npartitions=None,\n shuffle=None,\n ):\n if is_series_like(other) and hasattr(other, \"name\"):\n other = other.to_frame()\n\n if not is_dataframe_like(other):\n if not isinstance(other, list) or not all(\n [is_dataframe_like(o) for o in other]\n ):\n raise ValueError(\"other must be DataFrame or list of DataFrames\")\n if how not in [\"outer\", \"left\"]:\n raise ValueError(\"merge_multi only supports left or outer joins\")\n\n from .multi import _recursive_pairwise_outer_join\n\n # If its an outer join we can use the full recursive pairwise join.\n if how == \"outer\":\n full = [self] + other\n\n return _recursive_pairwise_outer_join(\n full,\n on=on,\n lsuffix=lsuffix,\n rsuffix=rsuffix,\n npartitions=npartitions,\n shuffle=shuffle,\n )\n else:\n # Do recursive pairwise join on everything _except_ the last join\n # where we need to do a left join.\n other = _recursive_pairwise_outer_join(\n other,\n on=on,\n lsuffix=lsuffix,\n rsuffix=rsuffix,\n npartitions=npartitions,\n shuffle=shuffle,\n )\n\n from .multi import merge\n\n return merge(\n self,\n other,\n how=how,\n left_index=on is None,\n right_index=True,\n left_on=on,\n suffixes=(lsuffix, rsuffix),\n npartitions=npartitions,\n shuffle=shuffle,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.append_DataFrame.items.for_col_idx_label_in_enu.yield_label_self_iloc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.append_DataFrame.items.for_col_idx_label_in_enu.yield_label_self_iloc_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4943, "end_line": 4970, "span_ids": ["DataFrame.append", "DataFrame.items", "DataFrame.iterrows", "DataFrame.itertuples"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def append(self, other, interleave_partitions=False):\n if isinstance(other, Series):\n msg = (\n \"Unable to appending dd.Series to dd.DataFrame.\"\n \"Use pd.Series to append as row.\"\n )\n raise ValueError(msg)\n elif is_series_like(other):\n other = other.to_frame().T\n return super().append(other, interleave_partitions=interleave_partitions)\n\n @derived_from(pd.DataFrame)\n def iterrows(self):\n for i in range(self.npartitions):\n df = self.get_partition(i).compute()\n yield from df.iterrows()\n\n @derived_from(pd.DataFrame)\n def itertuples(self, index=True, name=\"Pandas\"):\n for i in range(self.npartitions):\n df = self.get_partition(i).compute()\n yield from df.itertuples(index=index, name=name)\n\n @derived_from(pd.DataFrame)\n def items(self):\n for col_idx, label in enumerate(self.columns):\n yield label, self.iloc[:, col_idx]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._bind_operator_method_DataFrame._bind_comparison_method.setattr_cls_name_derive": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._bind_operator_method_DataFrame._bind_comparison_method.setattr_cls_name_derive", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4972, "end_line": 5032, "span_ids": ["DataFrame._bind_operator_method", "DataFrame._bind_comparison_method"], "tokens": 468}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @classmethod\n def _bind_operator_method(cls, name, op, original=pd.DataFrame):\n \"\"\"bind operator method like DataFrame.add to this class\"\"\"\n\n # name must be explicitly passed for div method whose name is truediv\n\n def meth(self, other, axis=\"columns\", level=None, fill_value=None):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n\n axis = self._validate_axis(axis)\n\n if axis in (1, \"columns\"):\n # When axis=1 and other is a series, `other` is transposed\n # and the operator is applied broadcast across rows. This\n # isn't supported with dd.Series.\n if isinstance(other, Series):\n msg = f\"Unable to {name} dd.Series with axis=1\"\n raise ValueError(msg)\n elif is_series_like(other):\n # Special case for pd.Series to avoid unwanted partitioning\n # of other. We pass it in as a kwarg to prevent this.\n meta = _emulate(\n op, self, other=other, axis=axis, fill_value=fill_value\n )\n return map_partitions(\n op,\n self,\n other=other,\n meta=meta,\n axis=axis,\n fill_value=fill_value,\n enforce_metadata=False,\n )\n\n meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)\n return map_partitions(\n op,\n self,\n other,\n meta=meta,\n axis=axis,\n fill_value=fill_value,\n enforce_metadata=False,\n )\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))\n\n @classmethod\n def _bind_comparison_method(cls, name, comparison, original=pd.DataFrame):\n \"\"\"bind comparison method like DataFrame.eq to this class\"\"\"\n\n def meth(self, other, axis=\"columns\", level=None):\n if level is not None:\n raise NotImplementedError(\"level must be None\")\n axis = self._validate_axis(axis)\n return elemwise(comparison, self, other, axis=axis)\n\n meth.__name__ = name\n setattr(cls, name, derived_from(original)(meth))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.apply_DataFrame.apply.return.map_partitions_M_apply_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.apply_DataFrame.apply.return.map_partitions_M_apply_s", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5034, "end_line": 5133, "span_ids": ["DataFrame.apply"], "tokens": 820}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @insert_meta_param_description(pad=12)\n def apply(\n self,\n func,\n axis=0,\n broadcast=None,\n raw=False,\n reduce=None,\n args=(),\n meta=no_default,\n result_type=None,\n **kwds,\n ):\n \"\"\"Parallel version of pandas.DataFrame.apply\n\n This mimics the pandas version except for the following:\n\n 1. Only ``axis=1`` is supported (and must be specified explicitly).\n 2. The user should provide output metadata via the `meta` keyword.\n\n Parameters\n ----------\n func : function\n Function to apply to each column/row\n axis : {0 or 'index', 1 or 'columns'}, default 0\n - 0 or 'index': apply function to each column (NOT SUPPORTED)\n - 1 or 'columns': apply function to each row\n $META\n args : tuple\n Positional arguments to pass to function in addition to the array/series\n\n Additional keyword arguments will be passed as keywords to the function\n\n Returns\n -------\n applied : Series or DataFrame\n\n Examples\n --------\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],\n ... 'y': [1., 2., 3., 4., 5.]})\n >>> ddf = dd.from_pandas(df, npartitions=2)\n\n Apply a function to row-wise passing in extra arguments in ``args`` and\n ``kwargs``:\n\n >>> def myadd(row, a, b=1):\n ... return row.sum() + a + b\n >>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP\n\n By default, dask tries to infer the output metadata by running your\n provided function on some fake data. This works well in many cases, but\n can sometimes be expensive, or even fail. To avoid this, you can\n manually specify the output metadata with the ``meta`` keyword. This\n can be specified in many forms, for more information see\n ``dask.dataframe.utils.make_meta``.\n\n Here we specify the output is a Series with name ``'x'``, and dtype\n ``float64``:\n\n >>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))\n\n In the case where the metadata doesn't change, you can also pass in\n the object itself directly:\n\n >>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)\n\n See Also\n --------\n dask.DataFrame.map_partitions\n \"\"\"\n\n if broadcast is not None:\n warnings.warn(\n \"The `broadcast` argument is no longer used/supported. \"\n \"It will be dropped in a future release.\",\n category=FutureWarning,\n )\n\n axis = self._validate_axis(axis)\n pandas_kwargs = {\"axis\": axis, \"raw\": raw, \"result_type\": result_type}\n\n kwds.update(pandas_kwargs)\n\n if axis == 0:\n msg = (\n \"dd.DataFrame.apply only supports axis=1\\n\"\n \" Try: df.apply(func, axis=1)\"\n )\n raise NotImplementedError(msg)\n\n if meta is no_default:\n meta = _emulate(\n M.apply, self._meta_nonempty, func, args=args, udf=True, **kwds\n )\n warnings.warn(meta_warning(meta))\n kwds.update({\"parent_meta\": self._meta})\n return map_partitions(M.apply, self, func, args=args, meta=meta, **kwds)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.info_DataFrame.info.put_lines_buf_lines_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.info_DataFrame.info.put_lines_buf_lines_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5385, "end_line": 5472, "span_ids": ["DataFrame.info"], "tokens": 619}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def info(self, buf=None, verbose=False, memory_usage=False):\n \"\"\"\n Concise summary of a Dask DataFrame.\n \"\"\"\n\n if buf is None:\n import sys\n\n buf = sys.stdout\n\n lines = [str(type(self))]\n\n if len(self.columns) == 0:\n lines.append(\"Index: 0 entries\")\n lines.append(f\"Empty {type(self).__name__}\")\n if PANDAS_GT_150:\n # pandas dataframe started adding a newline when info is called.\n lines.append(\"\")\n put_lines(buf, lines)\n return\n\n # Group and execute the required computations\n computations = {}\n if verbose:\n memory_usage = True\n computations.update({\"index\": self.index, \"count\": self.count()})\n if memory_usage:\n computations.update(\n {\"memory_usage\": self.map_partitions(M.memory_usage, index=True)}\n )\n computations = dict(\n zip(computations.keys(), da.compute(*computations.values()))\n )\n\n if verbose:\n import textwrap\n\n index = computations[\"index\"]\n counts = computations[\"count\"]\n lines.append(index_summary(index))\n lines.append(f\"Data columns (total {len(self.columns)} columns):\")\n\n from pandas.io.formats.printing import pprint_thing\n\n space = max(len(pprint_thing(k)) for k in self.columns) + 1\n column_width = max(space, 7)\n\n header = (\n textwrap.dedent(\n \"\"\"\\\n # {{column:<{column_width}}} Non-Null Count Dtype\n --- {{underl:<{column_width}}} -------------- -----\"\"\"\n )\n .format(column_width=column_width)\n .format(column=\"Column\", underl=\"------\")\n )\n column_template = textwrap.dedent(\n \"\"\"\\\n {{i:^3}} {{name:<{column_width}}} {{count}} non-null {{dtype}}\"\"\".format(\n column_width=column_width\n )\n )\n column_info = [\n column_template.format(\n i=pprint_thing(i),\n name=pprint_thing(name),\n count=pprint_thing(count),\n dtype=pprint_thing(dtype),\n )\n for i, (name, count, dtype) in enumerate(\n zip(self.columns, counts, self.dtypes)\n )\n ]\n lines.extend(header.split(\"\\n\"))\n else:\n column_info = [index_summary(self.columns, name=\"Columns\")]\n\n lines.extend(column_info)\n dtype_counts = [\n \"%s(%d)\" % k for k in sorted(self.dtypes.value_counts().items(), key=str)\n ]\n lines.append(\"dtypes: {}\".format(\", \".join(dtype_counts)))\n\n if memory_usage:\n memory_int = computations[\"memory_usage\"].sum()\n lines.append(f\"memory usage: {memory_repr(memory_int)}\\n\")\n\n put_lines(buf, lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.memory_usage_DataFrame.pivot_table.return.pivot_table_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.memory_usage_DataFrame.pivot_table.return.pivot_table_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4467, "end_line": 4497, "span_ids": ["DataFrame.memory_usage", "DataFrame.pivot_table"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def memory_usage(self, index=True, deep=False):\n result = self.map_partitions(M.memory_usage, index=index, deep=deep)\n result = result.groupby(result.index).sum()\n return result\n\n def pivot_table(self, index=None, columns=None, values=None, aggfunc=\"mean\"):\n \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.\n\n Parameters\n ----------\n values : scalar\n column to aggregate\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n aggfunc : {'mean', 'sum', 'count'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n \"\"\"\n from .reshape import pivot_table\n\n return pivot_table(\n self, index=index, columns=columns, values=values, aggfunc=aggfunc\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.melt_DataFrame.melt.return.melt_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.melt_DataFrame.melt.return.melt_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4499, "end_line": 4551, "span_ids": ["DataFrame.melt"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def melt(\n self,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level=None,\n ):\n \"\"\"\n Unpivots a DataFrame from wide format to long format,\n optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where\n one or more columns are identifier variables (``id_vars``), while\n all other columns, considered measured variables (``value_vars``),\n are \"unpivoted\" to the row axis, leaving just two non-identifier\n columns, 'variable' and 'value'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar\n Name to use for the 'variable' column. If None it uses\n ``frame.columns.name`` or 'variable'.\n value_name : scalar, default 'value'\n Name to use for the 'value' column.\n col_level : int or string, optional\n If columns are a MultiIndex then use this level to melt.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n See Also\n --------\n pandas.DataFrame.melt\n \"\"\"\n from .reshape import melt\n\n return melt(\n self,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._select_columns_or_index_DataFrame._is_column_label_reference.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._select_columns_or_index_DataFrame._is_column_label_reference.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4598, "end_line": 4642, "span_ids": ["DataFrame._is_column_label_reference", "DataFrame._select_columns_or_index"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def _select_columns_or_index(self, columns_or_index):\n \"\"\"\n Parameters\n ----------\n columns_or_index\n Column or index name, or a list of these\n\n Returns\n -------\n dd.DataFrame\n Dask DataFrame with columns corresponding to each column or\n index level in columns_or_index. If included, the column\n corresponding to the index level is named _index\n \"\"\"\n\n # Ensure columns_or_index is a list\n columns_or_index = (\n columns_or_index\n if isinstance(columns_or_index, list)\n else [columns_or_index]\n )\n\n column_names = [\n n for n in columns_or_index if self._is_column_label_reference(n)\n ]\n\n selected_df = self[column_names]\n if self._contains_index_name(columns_or_index):\n # Index name was included\n selected_df = selected_df.assign(_index=self.index)\n\n return selected_df\n\n def _is_column_label_reference(self, key):\n \"\"\"\n Test whether a key is a column label reference\n\n To be considered a column label reference, `key` must match the name of at\n least one column.\n \"\"\"\n return (\n not is_dask_collection(key)\n and (np.isscalar(key) or isinstance(key, tuple))\n and key in self.columns\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__bind_operators_is_broadcastable.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__bind_operators_is_broadcastable.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5476, "end_line": 5547, "span_ids": ["is_broadcastable", "DataFrame._is_column_label_reference", "impl:5"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# bind operators\nfor op in [\n operator.abs,\n operator.add,\n operator.and_,\n operator.eq,\n operator.gt,\n operator.ge,\n operator.inv,\n operator.lt,\n operator.le,\n operator.mod,\n operator.mul,\n operator.ne,\n operator.neg,\n operator.or_,\n operator.pow,\n operator.sub,\n operator.truediv,\n operator.floordiv,\n operator.xor,\n]:\n _Frame._bind_operator(op)\n Scalar._bind_operator(op)\n\nfor name in [\n \"add\",\n \"sub\",\n \"mul\",\n \"div\",\n \"divide\",\n \"truediv\",\n \"floordiv\",\n \"mod\",\n \"pow\",\n \"radd\",\n \"rsub\",\n \"rmul\",\n \"rdiv\",\n \"rtruediv\",\n \"rfloordiv\",\n \"rmod\",\n \"rpow\",\n]:\n meth = getattr(pd.DataFrame, name)\n DataFrame._bind_operator_method(name, meth)\n\n meth = getattr(pd.Series, name)\n Series._bind_operator_method(name, meth)\n\nfor name in [\"lt\", \"gt\", \"le\", \"ge\", \"ne\", \"eq\"]:\n meth = getattr(pd.DataFrame, name)\n DataFrame._bind_comparison_method(name, meth)\n\n meth = getattr(pd.Series, name)\n Series._bind_comparison_method(name, meth)\n\n\ndef is_broadcastable(dfs, s):\n \"\"\"\n This Series is broadcastable against another dataframe in the sequence\n \"\"\"\n return (\n isinstance(s, Series)\n and s.npartitions == 1\n and s.known_divisions\n and any(\n s.divisions == (df.columns.min(), df.columns.max())\n for df in dfs\n if isinstance(df, DataFrame)\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise_elemwise.graph.HighLevelGraph_from_colle": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise_elemwise.graph.HighLevelGraph_from_colle", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5550, "end_line": 5630, "span_ids": ["elemwise"], "tokens": 736}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def elemwise(op, *args, meta=no_default, out=None, transform_divisions=True, **kwargs):\n \"\"\"Elementwise operation for Dask dataframes\n\n Parameters\n ----------\n op: callable\n Function to apply across input dataframes\n *args: DataFrames, Series, Scalars, Arrays,\n The arguments of the operation\n meta: pd.DataFrame, pd.Series (optional)\n Valid metadata for the operation. Will evaluate on a small piece of\n data if not provided.\n transform_divisions: boolean\n If the input is a ``dask.dataframe.Index`` we normally will also apply\n the function onto the divisions and apply those transformed divisions\n to the output. You can pass ``transform_divisions=False`` to override\n this behavior\n out : ``dask.array`` or ``None``\n If out is a dask.DataFrame, dask.Series or dask.Scalar then\n this overwrites the contents of it with the result\n **kwargs: scalars\n\n Examples\n --------\n >>> elemwise(operator.add, df.x, df.y) # doctest: +SKIP\n \"\"\"\n _name = funcname(op) + \"-\" + tokenize(op, *args, **kwargs)\n\n args = _maybe_from_pandas(args)\n\n from .multi import _maybe_align_partitions\n\n args = _maybe_align_partitions(args)\n dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar, Array))]\n dfs = [df for df in dasks if isinstance(df, _Frame)]\n\n # Clean up dask arrays if present\n deps = dasks.copy()\n for i, a in enumerate(dasks):\n if not isinstance(a, Array):\n continue\n # Ensure that they have similar-ish chunk structure\n if not all(not a.chunks or len(a.chunks[0]) == df.npartitions for df in dfs):\n msg = (\n \"When combining dask arrays with dataframes they must \"\n \"match chunking exactly. Operation: %s\" % funcname(op)\n )\n raise ValueError(msg)\n # Rechunk to have a single chunk along all other axes\n if a.ndim > 1:\n a = a.rechunk({i + 1: d for i, d in enumerate(a.shape[1:])})\n dasks[i] = a\n\n divisions = dfs[0].divisions\n if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:\n try:\n divisions = op(\n *[pd.Index(arg.divisions) if arg is dfs[0] else arg for arg in args],\n **kwargs,\n )\n if isinstance(divisions, pd.Index):\n divisions = methods.tolist(divisions)\n except Exception:\n pass\n else:\n if not valid_divisions(divisions):\n divisions = [None] * (dfs[0].npartitions + 1)\n\n _is_broadcastable = partial(is_broadcastable, dfs)\n dfs = list(remove(_is_broadcastable, dfs))\n\n other = [\n (i, arg)\n for i, arg in enumerate(args)\n if not isinstance(arg, (_Frame, Scalar, Array))\n ]\n\n # adjust the key length of Scalar\n dsk = partitionwise_graph(op, _name, *args, **kwargs)\n\n graph = HighLevelGraph.from_collections(_name, dsk, dependencies=deps)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise.if_meta_is_no_default__elemwise.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_elemwise.if_meta_is_no_default__elemwise.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5632, "end_line": 5650, "span_ids": ["elemwise"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def elemwise(op, *args, meta=no_default, out=None, transform_divisions=True, **kwargs):\n # ... other code\n\n if meta is no_default:\n if len(dfs) >= 2 and not all(hasattr(d, \"npartitions\") for d in dasks):\n # should not occur in current funcs\n msg = \"elemwise with 2 or more DataFrames and Scalar is not supported\"\n raise NotImplementedError(msg)\n # For broadcastable series, use no rows.\n parts = [\n d._meta\n if _is_broadcastable(d)\n else np.empty((), dtype=d.dtype)\n if isinstance(d, Array)\n else d._meta_nonempty\n for d in dasks\n ]\n with raise_on_meta_error(funcname(op)):\n meta = partial_by_order(*parts, function=op, other=other)\n\n result = new_dd_object(graph, _name, meta, divisions)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_handle_out_handle_out.if_isinstance_out_Serie.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_handle_out_handle_out.if_isinstance_out_Serie.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4975, "end_line": 5019, "span_ids": ["handle_out"], "tokens": 364}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def handle_out(out, result):\n \"\"\"Handle out parameters\n\n If out is a dask.DataFrame, dask.Series or dask.Scalar then\n this overwrites the contents of it with the result\n \"\"\"\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n\n # Notice, we use .__class__ as opposed to type() in order to support\n # object proxies see \n if out is not None and out.__class__ != result.__class__:\n raise TypeError(\n \"Mismatched types between result and out parameter. \"\n \"out=%s, result=%s\" % (str(type(out)), str(type(result)))\n )\n\n if isinstance(out, DataFrame):\n if len(out.columns) != len(result.columns):\n raise ValueError(\n \"Mismatched columns count between result and out parameter. \"\n \"out=%s, result=%s\" % (str(len(out.columns)), str(len(result.columns)))\n )\n\n if isinstance(out, (Series, DataFrame, Scalar)):\n out._meta = result._meta\n out._name = result._name\n out.dask = result.dask\n\n if not isinstance(out, Scalar):\n out.divisions = result.divisions\n elif out is not None:\n msg = (\n \"The out parameter is not fully supported.\"\n \" Received type %s, expected %s \"\n % (typename(type(out)), typename(type(result)))\n )\n raise NotImplementedError(msg)\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__maybe_from_pandas_split_out_on_cols.return.df_cols_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__maybe_from_pandas_split_out_on_cols.return.df_cols_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5322, "end_line": 5363, "span_ids": ["split_evenly", "split_out_on_index", "hash_shard", "split_out_on_cols", "_maybe_from_pandas"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _maybe_from_pandas(dfs):\n from .io import from_pandas\n\n dfs = [\n from_pandas(df, 1)\n if (is_series_like(df) or is_dataframe_like(df)) and not is_dask_collection(df)\n else df\n for df in dfs\n ]\n return dfs\n\n\ndef hash_shard(\n df, nparts, split_out_setup=None, split_out_setup_kwargs=None, ignore_index=False\n):\n if split_out_setup:\n h = split_out_setup(df, **(split_out_setup_kwargs or {}))\n else:\n h = df\n\n h = hash_object_dispatch(h, index=False)\n if is_series_like(h):\n h = h.values\n np.mod(h, nparts, out=h)\n return group_split_dispatch(df, h, nparts, ignore_index=ignore_index)\n\n\ndef split_evenly(df, k):\n \"\"\"Split dataframe into k roughly equal parts\"\"\"\n divisions = np.linspace(0, len(df), k + 1).astype(int)\n return {i: df.iloc[divisions[i] : divisions[i + 1]] for i in range(k)}\n\n\ndef split_out_on_index(df):\n h = df.index\n if isinstance(h, pd.MultiIndex):\n h = pd.DataFrame([], index=h).reset_index()\n return h\n\n\ndef split_out_on_cols(df, cols=None):\n return df[cols]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply_apply_concat_apply.npartitions_2.npartitions_pop_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply_apply_concat_apply.npartitions_2.npartitions_pop_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5744, "end_line": 5843, "span_ids": ["apply_concat_apply"], "tokens": 760}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef apply_concat_apply(\n args,\n chunk=None,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n split_every=None,\n split_out=None,\n split_out_setup=None,\n split_out_setup_kwargs=None,\n sort=None,\n ignore_index=False,\n **kwargs,\n):\n \"\"\"Apply a function to blocks, then concat, then apply again\n\n Parameters\n ----------\n args :\n Positional arguments for the `chunk` function. All `dask.dataframe`\n objects should be partitioned and indexed equivalently.\n chunk : function [block-per-arg] -> block\n Function to operate on each block of data\n aggregate : function concatenated-block -> block\n Function to operate on the concatenated result of chunk\n combine : function concatenated-block -> block, optional\n Function to operate on intermediate concatenated results of chunk\n in a tree-reduction. If not provided, defaults to aggregate.\n $META\n token : str, optional\n The name to use for the output keys.\n chunk_kwargs : dict, optional\n Keywords for the chunk function only.\n aggregate_kwargs : dict, optional\n Keywords for the aggregate function only.\n combine_kwargs : dict, optional\n Keywords for the combine function only.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used,\n and all intermediates will be concatenated and passed to ``aggregate``.\n Default is 8.\n split_out : int, optional\n Number of output partitions. Split occurs after first chunk reduction.\n split_out_setup : callable, optional\n If provided, this function is called on each chunk before performing\n the hash-split. It should return a pandas object, where each row\n (excluding the index) is hashed. If not provided, the chunk is hashed\n as is.\n split_out_setup_kwargs : dict, optional\n Keywords for the `split_out_setup` function only.\n sort : bool, default None\n If allowed, sort the keys of the output aggregation.\n ignore_index : bool, default False\n If True, do not preserve index values throughout ACA operations.\n kwargs :\n All remaining keywords will be passed to ``chunk``, ``aggregate``, and\n ``combine``.\n\n Examples\n --------\n >>> def chunk(a_block, b_block):\n ... pass\n\n >>> def agg(df):\n ... pass\n\n >>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP\n \"\"\"\n if chunk_kwargs is None:\n chunk_kwargs = dict()\n if aggregate_kwargs is None:\n aggregate_kwargs = dict()\n chunk_kwargs.update(kwargs)\n aggregate_kwargs.update(kwargs)\n\n if combine is None:\n if combine_kwargs:\n raise ValueError(\"`combine_kwargs` provided with no `combine`\")\n combine = aggregate\n combine_kwargs = aggregate_kwargs\n else:\n if combine_kwargs is None:\n combine_kwargs = dict()\n combine_kwargs.update(kwargs)\n\n if not isinstance(args, (tuple, list)):\n args = [args]\n\n dfs = [arg for arg in args if isinstance(arg, _Frame)]\n\n npartitions = {arg.npartitions for arg in dfs}\n if len(npartitions) > 1:\n raise ValueError(\"All arguments must have same number of partitions\")\n npartitions = npartitions.pop()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_aca__emulate.with_raise_on_meta_error_.return.func__extract_meta_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_aca__emulate.with_raise_on_meta_error_.return.func__extract_meta_args_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5958, "end_line": 5990, "span_ids": ["impl:22", "_emulate", "_extract_meta"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "aca = apply_concat_apply\n\n\ndef _extract_meta(x, nonempty=False):\n \"\"\"\n Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series\n \"\"\"\n if isinstance(x, (Scalar, _Frame)):\n return x._meta_nonempty if nonempty else x._meta\n elif isinstance(x, list):\n return [_extract_meta(_x, nonempty) for _x in x]\n elif isinstance(x, tuple):\n return tuple(_extract_meta(_x, nonempty) for _x in x)\n elif isinstance(x, dict):\n res = {}\n for k in x:\n res[k] = _extract_meta(x[k], nonempty)\n return res\n elif isinstance(x, Delayed):\n raise ValueError(\n \"Cannot infer dataframe metadata with a `dask.delayed` argument\"\n )\n else:\n return x\n\n\ndef _emulate(func, *args, udf=False, **kwargs):\n \"\"\"\n Apply a function using args / kwargs. If arguments contain dd.DataFrame /\n dd.Series, using internal cache (``_meta``) for calculation\n \"\"\"\n with raise_on_meta_error(funcname(func), udf=udf):\n return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_and_enforce_apply_and_enforce.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_and_enforce_apply_and_enforce.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5283, "end_line": 5299, "span_ids": ["apply_and_enforce"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_and_enforce(*args, **kwargs):\n \"\"\"Apply a function, and enforce the output to match meta\n\n Ensures the output has the same columns, even if empty.\"\"\"\n func = kwargs.pop(\"_func\")\n meta = kwargs.pop(\"_meta\")\n df = func(*args, **kwargs)\n if is_dataframe_like(df) or is_series_like(df) or is_index_like(df):\n if not len(df):\n return meta\n if is_dataframe_like(df):\n check_matching_columns(meta, df)\n c = meta.columns\n else:\n c = meta.name\n return _rename(c, df)\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename__rename.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename__rename.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5302, "end_line": 5346, "span_ids": ["_rename"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _rename(columns, df):\n \"\"\"\n Rename columns of pd.DataFrame or name of pd.Series.\n Not for dd.DataFrame or dd.Series.\n\n Parameters\n ----------\n columns : tuple, string, pd.DataFrame or pd.Series\n Column names, Series name or pandas instance which has the\n target column names / name.\n df : pd.DataFrame or pd.Series\n target DataFrame / Series to be renamed\n \"\"\"\n assert not isinstance(df, _Frame)\n\n if columns is no_default:\n return df\n\n if isinstance(columns, Iterator):\n columns = list(columns)\n\n if is_dataframe_like(df):\n if is_dataframe_like(columns):\n columns = columns.columns\n if not isinstance(columns, pd.Index):\n columns = pd.Index(columns)\n if (\n len(columns) == len(df.columns)\n and type(columns) is type(df.columns)\n and columns.equals(df.columns)\n ):\n # if target is identical, rename is not necessary\n return df\n # deep=False doesn't doesn't copy any data/indices, so this is cheap\n df = df.copy(deep=False)\n df.columns = columns\n return df\n elif is_series_like(df) or is_index_like(df):\n if is_series_like(columns) or is_index_like(columns):\n columns = columns.name\n if df.name == columns:\n return df\n return df.rename(columns)\n # map_partition may pass other types\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename_dask__rename_dask.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__rename_dask__rename_dask.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6214, "end_line": 6236, "span_ids": ["_rename_dask"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _rename_dask(df, names):\n \"\"\"\n Destructively rename columns of dd.DataFrame or name of dd.Series.\n Not for pd.DataFrame or pd.Series.\n\n Internally used to overwrite dd.DataFrame.columns and dd.Series.name\n We can't use map_partition because it applies function then rename\n\n Parameters\n ----------\n df : dd.DataFrame or dd.Series\n target DataFrame / Series to be renamed\n names : tuple, string\n Column names/Series name\n \"\"\"\n\n assert isinstance(df, _Frame)\n metadata = _rename(names, df._meta)\n name = f\"rename-{tokenize(df, metadata)}\"\n\n dsk = partitionwise_graph(_rename, name, metadata, df)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])\n return new_dd_object(graph, name, metadata, df.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile_quantile.df.df_dropna_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile_quantile.df.df_dropna_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5374, "end_line": 5443, "span_ids": ["quantile"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def quantile(df, q, method=\"default\"):\n \"\"\"Approximate quantiles of Series.\n\n Parameters\n ----------\n q : list/array of floats\n Iterable of numbers ranging from 0 to 100 for the desired quantiles\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use dask's internal custom\n algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for\n floats and ints and fallback to the ``'dask'`` otherwise.\n \"\"\"\n # current implementation needs q to be sorted so\n # sort if array-like, otherwise leave it alone\n q_ndarray = np.array(q)\n if q_ndarray.ndim > 0:\n q_ndarray.sort(kind=\"mergesort\")\n q = q_ndarray\n\n assert isinstance(df, Series)\n\n allowed_methods = [\"default\", \"dask\", \"tdigest\"]\n if method not in allowed_methods:\n raise ValueError(\"method can only be 'default', 'dask' or 'tdigest'\")\n\n if method == \"default\":\n internal_method = \"dask\"\n else:\n internal_method = method\n\n # currently, only Series has quantile method\n if isinstance(df, Index):\n series_typ = df._meta.to_series()._constructor\n meta = df._meta_nonempty.to_series().quantile(q)\n else:\n if is_series_like(df._meta):\n series_typ = df._meta._constructor\n else:\n series_typ = df._meta._constructor_sliced\n meta = df._meta_nonempty.quantile(q)\n\n if is_series_like(meta):\n # Index.quantile(list-like) must be pd.Series, not pd.Index\n df_name = df.name\n finalize_tsk = lambda tsk: (series_typ, tsk, q, None, df_name)\n return_type = Series\n else:\n finalize_tsk = lambda tsk: (getitem, tsk, 0)\n return_type = Scalar\n q = [q]\n\n # pandas uses quantile in [0, 1]\n # numpy / everyone else uses [0, 100]\n qs = np.asarray(q) * 100\n token = tokenize(df, qs)\n\n if len(qs) == 0:\n name = \"quantiles-\" + token\n empty_index = pd.Index([], dtype=float)\n\n return Series(\n {(name, 0): series_typ([], name=df.name, index=empty_index, dtype=\"float\")},\n name,\n df._meta,\n [None, None],\n )\n else:\n new_divisions = [np.min(q), np.max(q)]\n\n df = df.dropna()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile.if_internal_method_td_quantile.return.return_type_graph_name2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_quantile.if_internal_method_td_quantile.return.return_type_graph_name2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6310, "end_line": 6362, "span_ids": ["quantile"], "tokens": 454}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def quantile(df, q, method=\"default\"):\n # ... other code\n\n if internal_method == \"tdigest\" and (\n np.issubdtype(df.dtype, np.floating) or np.issubdtype(df.dtype, np.integer)\n ):\n\n from dask.utils import import_required\n\n import_required(\n \"crick\", \"crick is a required dependency for using the t-digest method.\"\n )\n\n from dask.array.percentile import _percentiles_from_tdigest, _tdigest_chunk\n\n name = \"quantiles_tdigest-1-\" + token\n val_dsk = {\n (name, i): (_tdigest_chunk, (getattr, key, \"values\"))\n for i, key in enumerate(df.__dask_keys__())\n }\n\n name2 = \"quantiles_tdigest-2-\" + token\n merge_dsk = {\n (name2, 0): finalize_tsk((_percentiles_from_tdigest, qs, sorted(val_dsk)))\n }\n else:\n\n from dask.array.dispatch import percentile_lookup as _percentile\n from dask.array.percentile import merge_percentiles\n\n # Add 0 and 100 during calculation for more robust behavior (hopefully)\n calc_qs = np.pad(qs, 1, mode=\"constant\")\n calc_qs[-1] = 100\n name = \"quantiles-1-\" + token\n val_dsk = {\n (name, i): (_percentile, key, calc_qs)\n for i, key in enumerate(df.__dask_keys__())\n }\n\n name2 = \"quantiles-2-\" + token\n merge_dsk = {\n (name2, 0): finalize_tsk(\n (\n merge_percentiles,\n qs,\n [calc_qs] * df.npartitions,\n sorted(val_dsk),\n \"lower\",\n None,\n False,\n )\n )\n }\n dsk = merge(val_dsk, merge_dsk)\n graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[df])\n return return_type(graph, name2, meta, new_divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_cov_corr.return.DataFrame_graph_name_me": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_cov_corr.return.DataFrame_graph_name_me", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6365, "end_line": 6439, "span_ids": ["cov_corr"], "tokens": 644}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cov_corr(df, min_periods=None, corr=False, scalar=False, split_every=False):\n \"\"\"DataFrame covariance and pearson correlation.\n\n Computes pairwise covariance or correlation of columns, excluding NA/null\n values.\n\n Parameters\n ----------\n df : DataFrame\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n corr : bool, optional\n If True, compute the Pearson correlation. If False [default], compute\n the covariance.\n scalar : bool, optional\n If True, compute covariance between two variables as a scalar. Only\n valid if `df` has 2 columns. If False [default], compute the entire\n covariance/correlation matrix.\n split_every : int, optional\n Group partitions into groups of this size while performing a\n tree-reduction. If set to False, no tree-reduction will be used.\n Default is False.\n \"\"\"\n if min_periods is None:\n min_periods = 2\n elif min_periods < 2:\n raise ValueError(\"min_periods must be >= 2\")\n\n if split_every is False:\n split_every = df.npartitions\n elif split_every < 2 or not isinstance(split_every, Integral):\n raise ValueError(\"split_every must be an integer >= 2\")\n\n df = df._get_numeric_data()\n\n if scalar and len(df.columns) != 2:\n raise ValueError(\"scalar only valid for 2 column dataframe\")\n\n token = tokenize(df, min_periods, scalar, split_every)\n\n funcname = \"corr\" if corr else \"cov\"\n a = f\"{funcname}-chunk-{df._name}\"\n dsk = {\n (a, i): (cov_corr_chunk, f, corr) for (i, f) in enumerate(df.__dask_keys__())\n }\n\n prefix = f\"{funcname}-combine-{df._name}-\"\n k = df.npartitions\n b = a\n depth = 0\n while k > split_every:\n b = prefix + str(depth)\n for part_i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(b, part_i)] = (cov_corr_combine, [(a, i) for i in inds], corr)\n k = part_i + 1\n a = b\n depth += 1\n\n name = f\"{funcname}-{token}\"\n dsk[(name, 0)] = (\n cov_corr_agg,\n [(a, i) for i in range(k)],\n df.columns,\n min_periods,\n corr,\n scalar,\n )\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])\n if scalar:\n return Scalar(graph, name, \"f8\")\n meta = make_meta(\n [(c, \"f8\") for c in df.columns], index=df.columns, parent_meta=df._meta\n )\n return DataFrame(graph, name, meta, (df.columns[0], df.columns[-1]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_chunk_cov_corr_chunk.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_chunk_cov_corr_chunk.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6442, "end_line": 6473, "span_ids": ["cov_corr_chunk"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cov_corr_chunk(df, corr=False):\n \"\"\"Chunk part of a covariance or correlation computation\"\"\"\n shape = (df.shape[1], df.shape[1])\n df = df.astype(\"float64\", copy=False)\n sums = np.zeros_like(df.values, shape=shape)\n counts = np.zeros_like(df.values, shape=shape)\n for idx, col in enumerate(df):\n mask = df.iloc[:, idx].notnull()\n sums[idx] = df[mask].sum().values\n counts[idx] = df[mask].count().values\n cov = df.cov().values\n dtype = [(\"sum\", sums.dtype), (\"count\", counts.dtype), (\"cov\", cov.dtype)]\n if corr:\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\")\n mu = (sums / counts).T\n m = np.zeros_like(df.values, shape=shape)\n mask = df.isnull().values\n for idx, x in enumerate(df):\n # Avoid using ufunc.outer (not supported by cupy)\n mu_discrepancy = (\n np.subtract(df.iloc[:, idx].values[:, None], mu[idx][None, :]) ** 2\n )\n mu_discrepancy[mask] = np.nan\n m[idx] = np.nansum(mu_discrepancy, axis=0)\n m = m.T\n dtype.append((\"m\", m.dtype))\n\n out = {\"sum\": sums, \"count\": counts, \"cov\": cov * (counts - 1)}\n if corr:\n out[\"m\"] = m\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_combine_cov_corr_combine.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_combine_cov_corr_combine.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6476, "end_line": 6509, "span_ids": ["cov_corr_combine"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cov_corr_combine(data_in, corr=False):\n data = {\"sum\": None, \"count\": None, \"cov\": None}\n if corr:\n data[\"m\"] = None\n\n for k in data.keys():\n data[k] = [d[k] for d in data_in]\n data[k] = np.concatenate(data[k]).reshape((len(data[k]),) + data[k][0].shape)\n\n sums = np.nan_to_num(data[\"sum\"])\n counts = data[\"count\"]\n\n cum_sums = np.cumsum(sums, 0)\n cum_counts = np.cumsum(counts, 0)\n\n s1 = cum_sums[:-1]\n s2 = sums[1:]\n n1 = cum_counts[:-1]\n n2 = counts[1:]\n with np.errstate(invalid=\"ignore\"):\n d = (s2 / n2) - (s1 / n1)\n C = np.nansum(\n (n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0\n ) + np.nansum(data[\"cov\"], 0)\n\n out = {\"sum\": cum_sums[-1], \"count\": cum_counts[-1], \"cov\": C}\n\n if corr:\n nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)\n mu = cum_sums[-1] / nobs\n counts_na = np.where(counts, counts, np.nan)\n m = np.nansum(data[\"m\"] + counts * (sums / counts_na - mu) ** 2, axis=0)\n out[\"m\"] = m\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_agg_cov_corr_agg.return.pd_DataFrame_mat_columns": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_cov_corr_agg_cov_corr_agg.return.pd_DataFrame_mat_columns", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5637, "end_line": 5651, "span_ids": ["cov_corr_agg"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cov_corr_agg(data, cols, min_periods=2, corr=False, scalar=False):\n out = cov_corr_combine(data, corr)\n counts = out[\"count\"]\n C = out[\"cov\"]\n C[counts < min_periods] = np.nan\n if corr:\n m2 = out[\"m\"]\n den = np.sqrt(m2 * m2.T)\n else:\n den = np.where(counts, counts, np.nan) - 1\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n mat = C / den\n if scalar:\n return float(mat[0, 1])\n return pd.DataFrame(mat, columns=cols, index=cols)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_pd_split_pd_split.return._df_iloc_index_i_for_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_pd_split_pd_split.return._df_iloc_index_i_for_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5654, "end_line": 5680, "span_ids": ["pd_split"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pd_split(df, p, random_state=None, shuffle=False):\n \"\"\"Split DataFrame into multiple pieces pseudorandomly\n\n >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [2, 3, 4, 5, 6, 7]})\n\n >>> a, b = pd_split(\n ... df, [0.5, 0.5], random_state=123, shuffle=True\n ... ) # roughly 50/50 split\n >>> a\n a b\n 3 4 5\n 0 1 2\n 5 6 7\n >>> b\n a b\n 1 2 3\n 4 5 6\n 2 3 4\n \"\"\"\n p = list(p)\n if shuffle:\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n df = df.sample(frac=1.0, random_state=random_state)\n index = pseudorandom(len(df), p, random_state)\n return [df.iloc[index == i] for i in range(len(p))]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions_repartition_divisions._left_part_of_new_divisi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions_repartition_divisions._left_part_of_new_divisi", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6610, "end_line": 6683, "span_ids": ["repartition_divisions"], "tokens": 680}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_divisions(a, b, name, out1, out2, force=False):\n \"\"\"dask graph to repartition dataframe by new divisions\n\n Parameters\n ----------\n a : tuple\n old divisions\n b : tuple, list\n new divisions\n name : str\n name of old dataframe\n out1 : str\n name of temporary splits\n out2 : str\n name of new dataframe\n force : bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions lower and upper bounds must be\n the same as the old divisions.\n\n Examples\n --------\n >>> from pprint import pprint\n >>> pprint(repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c')) # doctest: +ELLIPSIS\n {('b', 0): (, ('a', 0), 1, 3, False),\n ('b', 1): (, ('a', 1), 3, 4, False),\n ('b', 2): (, ('a', 1), 4, 6, False),\n ('b', 3): (, ('a', 1), 6, 7, True),\n ('c', 0): (, [('b', 0), ('b', 1)]),\n ('c', 1): ('b', 2),\n ('c', 2): ('b', 3)}\n \"\"\"\n check_divisions(b)\n\n if len(b) < 2:\n # minimum division is 2 elements, like [0, 0]\n raise ValueError(\"New division must be longer than 2 elements\")\n\n if force:\n if a[0] < b[0]:\n msg = (\n \"left side of the new division must be equal or smaller \"\n \"than old division\"\n )\n raise ValueError(msg)\n if a[-1] > b[-1]:\n msg = (\n \"right side of the new division must be equal or larger \"\n \"than old division\"\n )\n raise ValueError(msg)\n else:\n if a[0] != b[0]:\n msg = \"left side of old and new divisions are different\"\n raise ValueError(msg)\n if a[-1] != b[-1]:\n msg = \"right side of old and new divisions are different\"\n raise ValueError(msg)\n\n def _is_single_last_div(x):\n \"\"\"Whether last division only contains single label\"\"\"\n return len(x) >= 2 and x[-1] == x[-2]\n\n c = [a[0]]\n d = dict()\n low = a[0]\n\n i, j = 1, 1 # indices for old/new divisions\n k = 0 # index for temp divisions\n\n last_elem = _is_single_last_div(a)\n\n # process through old division\n # left part of new division can be processed in this loop\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions.while_i_len_a_and_j__repartition_divisions.return.d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_divisions.while_i_len_a_and_j__repartition_divisions.return.d", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5809, "end_line": 5882, "span_ids": ["repartition_divisions"], "tokens": 795}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_divisions(a, b, name, out1, out2, force=False):\n # ... other code\n while i < len(a) and j < len(b):\n if a[i] < b[j]:\n # tuple is something like:\n # (methods.boundary_slice, ('from_pandas-#', 0), 3, 4, False))\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, a[i], False)\n low = a[i]\n i += 1\n elif a[i] > b[j]:\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)\n low = b[j]\n j += 1\n else:\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)\n low = b[j]\n if len(a) == i + 1 or a[i] < a[i + 1]:\n j += 1\n i += 1\n c.append(low)\n k += 1\n\n # right part of new division can remain\n if a[-1] < b[-1] or b[-1] == b[-2]:\n for _j in range(j, len(b)):\n # always use right-most of old division\n # because it may contain last element\n m = len(a) - 2\n d[(out1, k)] = (methods.boundary_slice, (name, m), low, b[_j], False)\n low = b[_j]\n c.append(low)\n k += 1\n else:\n # even if new division is processed through,\n # right-most element of old division can remain\n if last_elem and i < len(a):\n d[(out1, k)] = (methods.boundary_slice, (name, i - 1), a[i], a[i], False)\n k += 1\n c.append(a[-1])\n\n # replace last element of tuple with True\n d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)\n\n i, j = 0, 1\n\n last_elem = _is_single_last_div(c)\n\n while j < len(b):\n tmp = []\n while c[i] < b[j]:\n tmp.append((out1, i))\n i += 1\n while (\n last_elem\n and c[i] == b[-1]\n and (b[-1] != b[-2] or j == len(b) - 1)\n and i < k\n ):\n # append if last split is not included\n tmp.append((out1, i))\n i += 1\n if len(tmp) == 0:\n # dummy slice to return empty DataFrame or Series,\n # which retain original data attributes (columns / name)\n d[(out2, j - 1)] = (methods.boundary_slice, (name, 0), a[0], a[0], False)\n elif len(tmp) == 1:\n d[(out2, j - 1)] = tmp[0]\n else:\n if not tmp:\n raise ValueError(\n \"check for duplicate partitions\\nold:\\n%s\\n\\n\"\n \"new:\\n%s\\n\\ncombined:\\n%s\" % (pformat(a), pformat(b), pformat(c))\n )\n d[(out2, j - 1)] = (methods.concat, tmp)\n j += 1\n return d", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_freq_repartition_freq.return.df_repartition_divisions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_freq_repartition_freq.return.df_repartition_divisions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6373, "end_line": 6394, "span_ids": ["repartition_freq"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_freq(df, freq=None):\n \"\"\"Repartition a timeseries dataframe by a new frequency\"\"\"\n if not isinstance(df.divisions[0], pd.Timestamp):\n raise TypeError(\"Can only repartition on frequency for timeseries\")\n\n freq = _map_freq_to_period_start(freq)\n\n try:\n start = df.divisions[0].ceil(freq)\n except ValueError:\n start = df.divisions[0]\n divisions = methods.tolist(\n pd.date_range(start=start, end=df.divisions[-1], freq=freq)\n )\n if not len(divisions):\n divisions = [df.divisions[0], df.divisions[-1]]\n else:\n divisions.append(df.divisions[-1])\n if divisions[0] != df.divisions[0]:\n divisions = [df.divisions[0]] + divisions\n\n return df.repartition(divisions=divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_size_total_mem_usage.return.mem_usage": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_size_total_mem_usage.return.mem_usage", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6823, "end_line": 6856, "span_ids": ["total_mem_usage", "repartition_size"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_size(df, size):\n \"\"\"\n Repartition dataframe so that new partitions have approximately `size` memory usage each\n \"\"\"\n if isinstance(size, str):\n size = parse_bytes(size)\n size = int(size)\n\n mem_usages = df.map_partitions(total_mem_usage, deep=True).compute()\n\n # 1. split each partition that is larger than partition_size\n nsplits = 1 + mem_usages // size\n if np.any(nsplits > 1):\n split_name = f\"repartition-split-{size}-{tokenize(df)}\"\n df = _split_partitions(df, nsplits, split_name)\n # update mem_usages to account for the split partitions\n split_mem_usages = []\n for n, usage in zip(nsplits, mem_usages):\n split_mem_usages.extend([usage / n] * n)\n mem_usages = pd.Series(split_mem_usages)\n\n # 2. now that all partitions are less than size, concat them up to size\n assert np.all(mem_usages <= size)\n new_npartitions = list(map(len, iter_chunks(mem_usages, size)))\n new_partitions_boundaries = np.cumsum(new_npartitions)\n new_name = f\"repartition-{size}-{tokenize(df)}\"\n return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)\n\n\ndef total_mem_usage(df, index=True, deep=False):\n mem_usage = df.memory_usage(index=index, deep=deep)\n if is_series_like(mem_usage):\n mem_usage = mem_usage.sum()\n return mem_usage", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_npartitions_repartition_npartitions.if_df_npartitions_npar.else_.if_df_known_divisions_and.else_.return._split_partitions_df_nsp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_npartitions_repartition_npartitions.if_df_npartitions_npar.else_.if_df_known_divisions_and.else_.return._split_partitions_df_nsp", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6472, "end_line": 6521, "span_ids": ["repartition_npartitions"], "tokens": 454}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition_npartitions(df, npartitions):\n \"\"\"Repartition dataframe to a smaller number of partitions\"\"\"\n new_name = \"repartition-%d-%s\" % (npartitions, tokenize(df))\n if df.npartitions == npartitions:\n return df\n elif df.npartitions > npartitions:\n npartitions_ratio = df.npartitions / npartitions\n new_partitions_boundaries = [\n int(new_partition_index * npartitions_ratio)\n for new_partition_index in range(npartitions + 1)\n ]\n return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)\n else:\n original_divisions = divisions = pd.Series(df.divisions)\n if df.known_divisions and (\n np.issubdtype(divisions.dtype, np.datetime64)\n or np.issubdtype(divisions.dtype, np.number)\n ):\n if np.issubdtype(divisions.dtype, np.datetime64):\n divisions = divisions.values.astype(\"float64\")\n\n if is_series_like(divisions):\n divisions = divisions.values\n\n n = len(divisions)\n divisions = np.interp(\n x=np.linspace(0, n, npartitions + 1),\n xp=np.linspace(0, n, n),\n fp=divisions,\n )\n if np.issubdtype(original_divisions.dtype, np.datetime64):\n divisions = methods.tolist(\n pd.Series(divisions).astype(original_divisions.dtype)\n )\n elif np.issubdtype(original_divisions.dtype, np.integer):\n divisions = divisions.astype(original_divisions.dtype)\n\n if isinstance(divisions, np.ndarray):\n divisions = divisions.tolist()\n\n divisions = list(divisions)\n divisions[0] = df.divisions[0]\n divisions[-1] = df.divisions[-1]\n\n return df.repartition(divisions=divisions)\n else:\n div, mod = divmod(npartitions, df.npartitions)\n nsplits = [div] * df.npartitions\n nsplits[-1] += mod\n return _split_partitions(df, nsplits, new_name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__repartition_from_boundaries__repartition_from_boundaries.return.new_dd_object_graph_new_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__repartition_from_boundaries__repartition_from_boundaries.return.new_dd_object_graph_new_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5995, "end_line": 6009, "span_ids": ["_repartition_from_boundaries"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _repartition_from_boundaries(df, new_partitions_boundaries, new_name):\n if not isinstance(new_partitions_boundaries, list):\n new_partitions_boundaries = list(new_partitions_boundaries)\n if new_partitions_boundaries[0] > 0:\n new_partitions_boundaries.insert(0, 0)\n if new_partitions_boundaries[-1] < df.npartitions:\n new_partitions_boundaries.append(df.npartitions)\n dsk = {}\n for i, (start, end) in enumerate(\n zip(new_partitions_boundaries, new_partitions_boundaries[1:])\n ):\n dsk[new_name, i] = (methods.concat, [(df._name, j) for j in range(start, end)])\n divisions = [df.divisions[i] for i in new_partitions_boundaries]\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])\n return new_dd_object(graph, new_name, df._meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__split_partitions__split_partitions.return.new_dd_object_graph_new_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__split_partitions__split_partitions.return.new_dd_object_graph_new_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6928, "end_line": 6962, "span_ids": ["_split_partitions"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _split_partitions(df, nsplits, new_name):\n \"\"\"Split a Dask dataframe into new partitions\n\n Parameters\n ----------\n df: DataFrame or Series\n nsplits: List[int]\n Number of target dataframes for each partition\n The length of nsplits should be the same as df.npartitions\n new_name: str\n\n See Also\n --------\n repartition_npartitions\n repartition_size\n \"\"\"\n if len(nsplits) != df.npartitions:\n raise ValueError(f\"nsplits should have len={df.npartitions}\")\n\n dsk = {}\n split_name = f\"split-{tokenize(df, nsplits)}\"\n j = 0\n for i, k in enumerate(nsplits):\n if k == 1:\n dsk[new_name, j] = (df._name, i)\n j += 1\n else:\n dsk[split_name, i] = (split_evenly, (df._name, i), k)\n for jj in range(k):\n dsk[new_name, j] = (getitem, (split_name, i), jj)\n j += 1\n\n divisions = [None] * (1 + sum(nsplits))\n graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])\n return new_dd_object(graph, new_name, df._meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_repartition.raise_ValueError_Data_mu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_repartition_repartition.raise_ValueError_Data_mu", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6965, "end_line": 7011, "span_ids": ["repartition"], "tokens": 435}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def repartition(df, divisions=None, force=False):\n \"\"\"Repartition dataframe along new divisions\n\n Dask.DataFrame objects are partitioned along their index. Often when\n multiple dataframes interact we need to align these partitionings. The\n ``repartition`` function constructs a new DataFrame object holding the same\n data but partitioned on different values. It does this by performing a\n sequence of ``loc`` and ``concat`` calls to split and merge the previous\n generation of partitions.\n\n Parameters\n ----------\n\n divisions : list\n List of partitions to be used\n force : bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions lower and upper bounds must be\n the same as the old divisions.\n\n Examples\n --------\n\n >>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP\n\n Also works on Pandas objects\n\n >>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP\n \"\"\"\n\n token = tokenize(df, divisions)\n if isinstance(df, _Frame):\n tmp = \"repartition-split-\" + token\n out = \"repartition-merge-\" + token\n dsk = repartition_divisions(\n df.divisions, divisions, df._name, tmp, out, force=force\n )\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[df])\n return new_dd_object(graph, out, df._meta, divisions)\n elif is_dataframe_like(df) or is_series_like(df):\n name = \"repartition-dataframe-\" + token\n from .utils import shard_df_on_index\n\n dfs = shard_df_on_index(df, divisions[1:-1])\n dsk = {(name, i): df for i, df in enumerate(dfs)}\n return new_dd_object(dsk, name, df, divisions)\n raise ValueError(\"Data must be DataFrame or Series\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__reduction_chunk__reduction_aggregate.return.aca_aggregate_x_kwargs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__reduction_chunk__reduction_aggregate.return.aca_aggregate_x_kwargs", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6098, "end_line": 6115, "span_ids": ["_reduction_chunk", "_reduction_aggregate", "_reduction_combine"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _reduction_chunk(x, aca_chunk=None, **kwargs):\n o = aca_chunk(x, **kwargs)\n # Return a dataframe so that the concatenated version is also a dataframe\n return o.to_frame().T if is_series_like(o) else o\n\n\ndef _reduction_combine(x, aca_combine=None, **kwargs):\n if isinstance(x, list):\n x = pd.Series(x)\n o = aca_combine(x, **kwargs)\n # Return a dataframe so that the concatenated version is also a dataframe\n return o.to_frame().T if is_series_like(o) else o\n\n\ndef _reduction_aggregate(x, aca_aggregate=None, **kwargs):\n if isinstance(x, list):\n x = pd.Series(x)\n return aca_aggregate(x, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_chunk_idxmaxmin_chunk.return.pd_DataFrame_idx_idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_chunk_idxmaxmin_chunk.return.pd_DataFrame_idx_idx", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6118, "end_line": 6127, "span_ids": ["idxmaxmin_chunk"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def idxmaxmin_chunk(x, fn=None, skipna=True):\n minmax = \"max\" if fn == \"idxmax\" else \"min\"\n if len(x) > 0:\n idx = getattr(x, fn)(skipna=skipna)\n value = getattr(x, minmax)(skipna=skipna)\n else:\n idx = value = pd.Series([], dtype=\"i8\")\n if is_series_like(idx):\n return pd.DataFrame({\"idx\": idx, \"value\": value})\n return pd.DataFrame({\"idx\": [idx], \"value\": [value]})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_row_idxmaxmin_row.return.pd_DataFrame_idx_idx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_row_idxmaxmin_row.return.pd_DataFrame_idx_idx_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6130, "end_line": 6138, "span_ids": ["idxmaxmin_row"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def idxmaxmin_row(x, fn=None, skipna=True):\n minmax = \"max\" if fn == \"idxmax\" else \"min\"\n if len(x) > 0:\n x = x.set_index(\"idx\")\n idx = [getattr(x.value, fn)(skipna=skipna)]\n value = [getattr(x.value, minmax)(skipna=skipna)]\n else:\n idx = value = pd.Series([], dtype=\"i8\")\n return pd.DataFrame({\"idx\": idx, \"value\": value})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_combine_safe_head.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_idxmaxmin_combine_safe_head.return.r", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 7057, "end_line": 7100, "span_ids": ["idxmaxmin_combine", "_count_aggregate", "safe_head", "idxmaxmin_agg", "_mode_aggregate"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def idxmaxmin_combine(x, fn=None, skipna=True):\n if len(x) <= 1:\n return x\n return (\n x.groupby(level=0)\n .apply(idxmaxmin_row, fn=fn, skipna=skipna)\n .reset_index(level=1, drop=True)\n )\n\n\ndef idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):\n res = idxmaxmin_combine(x, fn, skipna=skipna)[\"idx\"]\n if len(res) == 0:\n raise ValueError(\"attempt to get argmax of an empty sequence\")\n if scalar:\n return res[0]\n res.name = None\n return res\n\n\ndef _mode_aggregate(df, dropna):\n value_count_series = df.sum()\n max_val = value_count_series.max(skipna=dropna)\n mode_series = (\n value_count_series[value_count_series == max_val]\n .index.to_series()\n .sort_values()\n .reset_index(drop=True)\n )\n return mode_series\n\n\ndef _count_aggregate(x):\n return x.sum().astype(\"int64\")\n\n\ndef safe_head(df, n):\n r = M.head(df, n)\n if len(r) != n:\n warnings.warn(\n f\"Insufficient elements for `head`. {n} elements requested, only {len(r)} \"\n \"elements available. Try passing larger `npartitions` to `head`.\"\n )\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_maybe_shift_divisions_maybe_shift_divisions.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_maybe_shift_divisions_maybe_shift_divisions.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 7103, "end_line": 7132, "span_ids": ["maybe_shift_divisions"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def maybe_shift_divisions(df, periods, freq):\n \"\"\"Maybe shift divisions by periods of size freq\n\n Used to shift the divisions for the `shift` method. If freq isn't a fixed\n size (not anchored or relative), then the divisions are shifted\n appropriately. Otherwise the divisions are cleared.\n\n Parameters\n ----------\n df : dd.DataFrame, dd.Series, or dd.Index\n periods : int\n The number of periods to shift.\n freq : DateOffset, timedelta, or time rule string\n The frequency to shift by.\n \"\"\"\n if isinstance(freq, str):\n freq = pd.tseries.frequencies.to_offset(freq)\n\n is_offset = isinstance(freq, pd.DateOffset)\n if is_offset:\n if freq.is_anchored() or not hasattr(freq, \"delta\"):\n # Can't infer divisions on relative or anchored offsets, as\n # divisions may now split identical index value.\n # (e.g. index_partitions = [[1, 2, 3], [3, 4, 5]])\n return df.clear_divisions()\n if df.known_divisions:\n divs = pd.Series(range(len(df.divisions)), index=df.divisions)\n divisions = divs.shift(periods, freq=freq).index\n return df.__class__(df.dask, df._name, df._meta, divisions)\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_new_dd_object_new_dd_object.if_has_parallel_type_meta.else_.return.get_parallel_type_meta_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_new_dd_object_new_dd_object.if_has_parallel_type_meta.else_.return.get_parallel_type_meta_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6798, "end_line": 6822, "span_ids": ["new_dd_object"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def new_dd_object(dsk, name, meta, divisions, parent_meta=None):\n \"\"\"Generic constructor for dask.dataframe objects.\n\n Decides the appropriate output class based on the type of `meta` provided.\n \"\"\"\n if has_parallel_type(meta):\n return get_parallel_type(meta)(dsk, name, meta, divisions)\n elif is_arraylike(meta) and meta.shape:\n import dask.array as da\n\n chunks = ((np.nan,) * (len(divisions) - 1),) + tuple(\n (d,) for d in meta.shape[1:]\n )\n if len(chunks) > 1:\n layer = dsk.layers[name]\n if isinstance(layer, Blockwise):\n layer.new_axes[\"j\"] = chunks[1][0]\n layer.output_indices = layer.output_indices + (\"j\",)\n else:\n suffix = (0,) * (len(chunks) - 1)\n for i in range(len(chunks[0])):\n layer[(name, i) + suffix] = layer.pop((name, i))\n return da.Array(dsk, name=name, chunks=chunks, dtype=meta.dtype)\n else:\n return get_parallel_type(meta)(dsk, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_partitionwise_graph_partitionwise_graph.return.blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_partitionwise_graph_partitionwise_graph.return.blockwise_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 7382, "end_line": 7450, "span_ids": ["partitionwise_graph"], "tokens": 593}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def partitionwise_graph(func, layer_name, *args, **kwargs):\n \"\"\"\n Apply a function partition-wise across arguments to create layer of a graph\n\n This applies a function, ``func``, in an embarrassingly parallel fashion\n across partitions/chunks in the provided arguments. It handles Dataframes,\n Arrays, and scalars smoothly, and relies on the ``blockwise`` machinery\n to provide a nicely symbolic graph.\n\n It is most commonly used in other graph-building functions to create the\n appropriate layer of the resulting dataframe.\n\n Parameters\n ----------\n func: callable\n layer_name: str\n Descriptive name for the operation. Used as the output name\n in the resulting ``Blockwise`` graph layer.\n *args:\n **kwargs:\n\n Returns\n -------\n out: Blockwise graph\n\n Examples\n --------\n >>> subgraph = partitionwise_graph(function, x, y, z=123) # doctest: +SKIP\n >>> layer = partitionwise_graph(function, df, x, z=123) # doctest: +SKIP\n >>> graph = HighLevelGraph.from_collections(name, layer, dependencies=[df, x]) # doctest: +SKIP\n >>> result = new_dd_object(graph, name, metadata, df.divisions) # doctest: +SKIP\n\n See Also\n --------\n map_partitions\n \"\"\"\n pairs = []\n numblocks = {}\n for arg in args:\n if isinstance(arg, _Frame):\n pairs.extend([arg._name, \"i\"])\n numblocks[arg._name] = (arg.npartitions,)\n elif isinstance(arg, Scalar):\n pairs.extend([arg._name, \"i\"])\n numblocks[arg._name] = (1,)\n elif isinstance(arg, Array):\n if arg.ndim == 1:\n pairs.extend([arg.name, \"i\"])\n elif arg.ndim == 0:\n pairs.extend([arg.name, \"\"])\n elif arg.ndim == 2:\n pairs.extend([arg.name, \"ij\"])\n else:\n raise ValueError(\"Can't add multi-dimensional array to dataframes\")\n numblocks[arg._name] = arg.numblocks\n elif isinstance(arg, BlockwiseDep):\n if len(arg.numblocks) == 1:\n pairs.extend([arg, \"i\"])\n elif len(arg.numblocks) == 2:\n pairs.extend([arg, \"ij\"])\n else:\n raise ValueError(\n f\"BlockwiseDep arg {arg!r} has {len(arg.numblocks)} dimensions; only 1 or 2 are supported.\"\n )\n else:\n pairs.extend([arg, None])\n return blockwise(\n func, layer_name, \"i\", *pairs, numblocks=numblocks, concatenate=True, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_meta_warning_meta_warning.return.msg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_meta_warning_meta_warning.return.msg", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6394, "end_line": 6418, "span_ids": ["meta_warning"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def meta_warning(df):\n \"\"\"\n Provide an informative message when the user is asked to provide metadata\n \"\"\"\n if is_dataframe_like(df):\n meta_str = {k: str(v) for k, v in df.dtypes.to_dict().items()}\n elif is_series_like(df):\n meta_str = (df.name, str(df.dtype))\n else:\n meta_str = None\n msg = (\n \"\\nYou did not provide metadata, so Dask is running your \"\n \"function on a small dataset to guess output types. \"\n \"It is possible that Dask will guess incorrectly.\\n\"\n \"To provide an explicit output types or to silence this message, \"\n \"please provide the `meta=` keyword, as described in the map or \"\n \"apply function that you are using.\"\n )\n if meta_str:\n msg += (\n \"\\n\"\n \" Before: .apply(func)\\n\"\n \" After: .apply(func, meta=%s)\\n\" % str(meta_str)\n )\n return msg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_prefix_reduction_prefix_reduction.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_prefix_reduction_prefix_reduction.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6421, "end_line": 6480, "span_ids": ["prefix_reduction"], "tokens": 608}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def prefix_reduction(f, ddf, identity, **kwargs):\n \"\"\"Computes the prefix sums of f on df\n\n If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with\n partitions [f(identity, P1),\n f(f(identity, P1), P2),\n f(f(f(identity, P1), P2), P3),\n ...]\n\n Parameters\n ----------\n f : callable\n an associative function f\n ddf : dd.DataFrame\n identity : pd.DataFrame\n an identity element of f, that is f(identity, df) = f(df, identity) = df\n \"\"\"\n dsk = dict()\n name = \"prefix_reduction-\" + tokenize(f, ddf, identity, **kwargs)\n meta = ddf._meta\n n = len(ddf.divisions) - 1\n divisions = [None] * (n + 1)\n\n N = 1\n while N < n:\n N *= 2\n for i in range(n):\n dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, i), identity], kwargs)\n for i in range(n, N):\n dsk[(name, i, 1, 0)] = identity\n\n d = 1\n while d < N:\n for i in range(0, N, 2 * d):\n dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (\n apply,\n f,\n [(name, i + d - 1, d, 0), (name, i + 2 * d - 1, d, 0)],\n kwargs,\n )\n d *= 2\n\n dsk[(name, N - 1, N, 1)] = identity\n\n while d > 1:\n d //= 2\n for i in range(0, N, 2 * d):\n dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)\n dsk[(name, i + 2 * d - 1, d, 1)] = (\n apply,\n f,\n [(name, i + 2 * d - 1, 2 * d, 1), (name, i + d - 1, d, 0)],\n kwargs,\n )\n\n for i in range(n):\n dsk[(name, i)] = (apply, f, [(name, i, 1, 1), identity], kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_suffix_reduction_suffix_reduction.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_suffix_reduction_suffix_reduction.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6483, "end_line": 6544, "span_ids": ["suffix_reduction"], "tokens": 636}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def suffix_reduction(f, ddf, identity, **kwargs):\n \"\"\"Computes the suffix sums of f on df\n\n If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with\n partitions [f(P1, f(P2, ...f(Pn, identity)...)),\n f(P2, ...f(Pn, identity)...),\n ...f(Pn, identity)...,\n ...]\n\n Parameters\n ----------\n f : callable\n an associative function f\n ddf : dd.DataFrame\n identity : pd.DataFrame\n an identity element of f, that is f(identity, df) = f(df, identity) = df\n kwargs : ??\n keyword arguments of f ??\n \"\"\"\n dsk = dict()\n name = \"suffix_reduction-\" + tokenize(f, ddf, identity, **kwargs)\n meta = ddf._meta\n n = len(ddf.divisions) - 1\n divisions = [None] * (n + 1)\n\n N = 1\n while N < n:\n N *= 2\n for i in range(n):\n dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, n - 1 - i), identity], kwargs)\n for i in range(n, N):\n dsk[(name, i, 1, 0)] = identity\n\n d = 1\n while d < N:\n for i in range(0, N, 2 * d):\n dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (\n apply,\n f,\n [(name, i + 2 * d - 1, d, 0), (name, i + d - 1, d, 0)],\n kwargs,\n )\n d *= 2\n\n dsk[(name, N - 1, N, 1)] = identity\n\n while d > 1:\n d //= 2\n for i in range(0, N, 2 * d):\n dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)\n dsk[(name, i + 2 * d - 1, d, 1)] = (\n apply,\n f,\n [(name, i + d - 1, d, 0), (name, i + 2 * d - 1, 2 * d, 1)],\n kwargs,\n )\n\n for i in range(n):\n dsk[(name, i)] = (apply, f, [(name, n - 1 - i, 1, 1), identity], kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/extensions.py___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/extensions.py___", "embedding": null, "metadata": {"file_path": "dask/dataframe/extensions.py", "file_name": "extensions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 24, "span_ids": ["docstring"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nSupport for pandas ExtensionArray in dask.dataframe.\n\nSee :ref:`extensionarrays` for more.\n\"\"\"\nfrom ..utils import Dispatch\nfrom .accessor import (\n register_dataframe_accessor,\n register_index_accessor,\n register_series_accessor,\n)\n\nmake_array_nonempty = Dispatch(\"make_array_nonempty\")\nmake_scalar = Dispatch(\"make_scalar\")\n\n\n__all__ = [\n \"make_array_nonempty\",\n \"make_scalar\",\n \"register_dataframe_accessor\",\n \"register_index_accessor\",\n \"register_series_accessor\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__maybe_slice__is_aligned.if_is_series_like_by_or_.else_.return.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__maybe_slice__is_aligned.if_is_series_like_by_or_.else_.return.True", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 97, "end_line": 117, "span_ids": ["_is_aligned", "_maybe_slice"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _maybe_slice(grouped, columns):\n \"\"\"\n Slice columns if grouped is pd.DataFrameGroupBy\n \"\"\"\n # FIXME: update with better groupby object detection (i.e.: ngroups, get_group)\n if \"groupby\" in type(grouped).__name__.lower():\n if columns is not None:\n if isinstance(columns, (tuple, list, set, pd.Index)):\n columns = list(columns)\n return grouped[columns]\n return grouped\n\n\ndef _is_aligned(df, by):\n \"\"\"Check if ``df`` and ``by`` have aligned indices\"\"\"\n if is_series_like(by) or is_dataframe_like(by):\n return df.index.equals(by.index)\n elif isinstance(by, (list, tuple)):\n return all(_is_aligned(df, i) for i in by)\n else:\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_raise_unaligned__groupby_raise_unaligned.return.df_groupby_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_raise_unaligned__groupby_raise_unaligned.return.df_groupby_kwargs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 120, "end_line": 157, "span_ids": ["_groupby_raise_unaligned"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_raise_unaligned(df, **kwargs):\n \"\"\"Groupby, but raise if df and `by` key are unaligned.\n\n Pandas supports grouping by a column that doesn't align with the input\n frame/series/index. However, the reindexing does not seem to be\n threadsafe, and can result in incorrect results. Since grouping by an\n unaligned key is generally a bad idea, we just error loudly in dask.\n\n For more information see pandas GH issue #15244 and Dask GH issue #1876.\"\"\"\n by = kwargs.get(\"by\", None)\n if by is not None and not _is_aligned(df, by):\n msg = (\n \"Grouping by an unaligned column is unsafe and unsupported.\\n\"\n \"This can be caused by filtering only one of the object or\\n\"\n \"grouping key. For example, the following works in pandas,\\n\"\n \"but not in dask:\\n\"\n \"\\n\"\n \"df[df.foo < 0].groupby(df.bar)\\n\"\n \"\\n\"\n \"This can be avoided by either filtering beforehand, or\\n\"\n \"passing in the name of the column instead:\\n\"\n \"\\n\"\n \"df2 = df[df.foo < 0]\\n\"\n \"df2.groupby(df2.bar)\\n\"\n \"# or\\n\"\n \"df[df.foo < 0].groupby('bar')\\n\"\n \"\\n\"\n \"For more information see dask GH issue #1876.\"\n )\n raise ValueError(msg)\n elif by is not None and len(by):\n # since we're coming through apply, `by` will be a tuple.\n # Pandas treats tuples as a single key, and lists as multiple keys\n # We want multiple keys\n if isinstance(by, str):\n by = [by]\n kwargs.update(by=list(by))\n return df.groupby(**kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_apply__groupby_slice_apply.return.g_apply_func_args_kw": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_apply__groupby_slice_apply.return.g_apply_func_args_kw", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 165, "end_line": 175, "span_ids": ["_groupby_slice_apply"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_slice_apply(\n df, grouper, key, func, *args, group_keys=True, dropna=None, observed=None, **kwargs\n):\n # No need to use raise if unaligned here - this is only called after\n # shuffling, which makes everything aligned already\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n observed = {\"observed\": observed} if observed is not None else {}\n g = df.groupby(grouper, group_keys=group_keys, **observed, **dropna)\n if key:\n g = g[key]\n return g.apply(func, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_transform__groupby_slice_transform.return.g_transform_func_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_transform__groupby_slice_transform.return.g_transform_func_args_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 178, "end_line": 193, "span_ids": ["_groupby_slice_transform"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_slice_transform(\n df, grouper, key, func, *args, group_keys=True, dropna=None, observed=None, **kwargs\n):\n # No need to use raise if unaligned here - this is only called after\n # shuffling, which makes everything aligned already\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n observed = {\"observed\": observed} if observed is not None else {}\n g = df.groupby(grouper, group_keys=group_keys, **observed, **dropna)\n if key:\n g = g[key]\n\n # Cannot call transform on an empty dataframe\n if len(df) == 0:\n return g.apply(func, *args, **kwargs)\n\n return g.transform(func, *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_get_group__groupby_get_group.if_get_key_in_grouped_gro.else_.return.df_iloc_0_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_get_group__groupby_get_group.if_get_key_in_grouped_gro.else_.return.df_iloc_0_0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 192, "end_line": 207, "span_ids": ["_groupby_get_group"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_get_group(df, by_key, get_key, columns):\n # SeriesGroupBy may pass df which includes group key\n grouped = _groupby_raise_unaligned(df, by=by_key)\n\n if get_key in grouped.groups:\n if is_dataframe_like(df):\n grouped = grouped[columns]\n return grouped.get_group(get_key)\n\n else:\n # to create empty DataFrame/Series, which has the same\n # dtype as the original\n if is_dataframe_like(df):\n # may be SeriesGroupBy\n df = df[columns]\n return df.iloc[0:0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_31_Aggregation.__init__.self.__name__.name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_31_Aggregation.__init__.self.__name__.name", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 214, "end_line": 276, "span_ids": ["Aggregation", "_groupby_get_group", "Aggregation.__init__"], "tokens": 513}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Aggregation\n###############################################################\n\n\nclass Aggregation:\n \"\"\"User defined groupby-aggregation.\n\n This class allows users to define their own custom aggregation in terms of\n operations on Pandas dataframes in a map-reduce style. You need to specify\n what operation to do on each chunk of data, how to combine those chunks of\n data together, and then how to finalize the result.\n\n See :ref:`dataframe.groupby.aggregate` for more.\n\n Parameters\n ----------\n name : str\n the name of the aggregation. It should be unique, since intermediate\n result will be identified by this name.\n chunk : callable\n a function that will be called with the grouped column of each\n partition. It can either return a single series or a tuple of series.\n The index has to be equal to the groups.\n agg : callable\n a function that will be called to aggregate the results of each chunk.\n Again the argument(s) will be grouped series. If ``chunk`` returned a\n tuple, ``agg`` will be called with all of them as individual positional\n arguments.\n finalize : callable\n an optional finalizer that will be called with the results from the\n aggregation.\n\n Examples\n --------\n We could implement ``sum`` as follows:\n\n >>> custom_sum = dd.Aggregation(\n ... name='custom_sum',\n ... chunk=lambda s: s.sum(),\n ... agg=lambda s0: s0.sum()\n ... ) # doctest: +SKIP\n >>> df.groupby('g').agg(custom_sum) # doctest: +SKIP\n\n We can implement ``mean`` as follows:\n\n >>> custom_mean = dd.Aggregation(\n ... name='custom_mean',\n ... chunk=lambda s: (s.count(), s.sum()),\n ... agg=lambda count, sum: (count.sum(), sum.sum()),\n ... finalize=lambda count, sum: sum / count,\n ... ) # doctest: +SKIP\n >>> df.groupby('g').agg(custom_mean) # doctest: +SKIP\n\n Though of course, both of these are built-in and so you don't need to\n implement them yourself.\n \"\"\"\n\n def __init__(self, name, chunk, agg, finalize=None):\n self.chunk = chunk\n self.agg = agg\n self.finalize = finalize\n self.__name__ = name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_chunk__var_combine.return.g_groupby_level_levels_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_chunk__var_combine.return.g_groupby_level_levels_s", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 312, "end_line": 333, "span_ids": ["_var_chunk", "_var_combine"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _var_chunk(df, *by):\n if is_series_like(df):\n df = df.to_frame()\n\n df = df.copy()\n\n g = _groupby_raise_unaligned(df, by=by)\n x = g.sum()\n\n n = g[x.columns].count().rename(columns=lambda c: (c, \"-count\"))\n\n cols = x.columns\n df[cols] = df[cols] ** 2\n\n g2 = _groupby_raise_unaligned(df, by=by)\n x2 = g2.sum().rename(columns=lambda c: (c, \"-x2\"))\n\n return concat([x, x2, n], axis=1)\n\n\ndef _var_combine(g, levels, sort=False):\n return g.groupby(level=levels, sort=sort).sum()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_agg__cov_combine.return.g": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__var_agg__cov_combine.return.g", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 338, "end_line": 358, "span_ids": ["_cov_combine", "_var_agg"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _var_agg(g, levels, ddof, sort=False):\n g = g.groupby(level=levels, sort=sort).sum()\n nc = len(g.columns)\n x = g[g.columns[: nc // 3]]\n # chunks columns are tuples (value, name), so we just keep the value part\n x2 = g[g.columns[nc // 3 : 2 * nc // 3]].rename(columns=lambda c: c[0])\n n = g[g.columns[-nc // 3 :]].rename(columns=lambda c: c[0])\n\n # TODO: replace with _finalize_var?\n result = x2 - x**2 / n\n div = n - ddof\n div[div < 0] = 0\n result /= div\n result[(n - ddof) == 0] = np.nan\n assert is_dataframe_like(result)\n result[result < 0] = 0 # avoid rounding errors that take us to zero\n return result\n\n\ndef _cov_combine(g, levels):\n return g", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_finalizer__cov_finalizer.return.pd_Series_vals_index_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_finalizer__cov_finalizer.return.pd_Series_vals_index_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 359, "end_line": 391, "span_ids": ["_cov_finalizer"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cov_finalizer(df, cols, std=False):\n vals = []\n num_elements = len(list(it.product(cols, repeat=2)))\n num_cols = len(cols)\n vals = list(range(num_elements))\n col_idx_mapping = dict(zip(cols, range(num_cols)))\n for i, j in it.combinations_with_replacement(df[cols].columns, 2):\n x = col_idx_mapping[i]\n y = col_idx_mapping[j]\n idx = x + num_cols * y\n mul_col = f\"{i}{j}\"\n ni = df[\"%s-count\" % i]\n nj = df[\"%s-count\" % j]\n\n n = np.sqrt(ni * nj)\n div = n - 1\n div[div < 0] = 0\n val = (df[mul_col] - df[i] * df[j] / n).values[0] / div.values[0]\n if std:\n ii = f\"{i}{i}\"\n jj = f\"{j}{j}\"\n std_val_i = (df[ii] - (df[i] ** 2) / ni).values[0] / div.values[0]\n std_val_j = (df[jj] - (df[j] ** 2) / nj).values[0] / div.values[0]\n val = val / np.sqrt(std_val_i * std_val_j)\n\n vals[idx] = val\n if i != j:\n idx = num_cols * x + y\n vals[idx] = val\n\n level_1 = cols\n index = pd.MultiIndex.from_product([level_1, level_1])\n return pd.Series(vals, index=index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_agg__cov_agg.return.s_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_agg__cov_agg.return.s_result", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 441, "end_line": 495, "span_ids": ["_cov_agg"], "tokens": 462}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cov_agg(_t, levels, ddof, std=False, sort=False):\n sums = []\n muls = []\n counts = []\n\n # sometime we get a series back from concat combiner\n t = list(_t)\n\n cols = t[0][0].columns\n for x, mul, n, col_mapping in t:\n sums.append(x)\n muls.append(mul)\n counts.append(n)\n col_mapping = col_mapping\n\n total_sums = concat(sums).groupby(level=levels, sort=sort).sum()\n total_muls = concat(muls).groupby(level=levels, sort=sort).sum()\n total_counts = concat(counts).groupby(level=levels).sum()\n result = (\n concat([total_sums, total_muls, total_counts], axis=1)\n .groupby(level=levels)\n .apply(_cov_finalizer, cols=cols, std=std)\n )\n\n inv_col_mapping = {v: k for k, v in col_mapping.items()}\n idx_vals = result.index.names\n idx_mapping = list()\n\n # when index is None we probably have selected a particular column\n # df.groupby('a')[['b']].cov()\n if len(idx_vals) == 1 and all(n is None for n in idx_vals):\n idx_vals = list(inv_col_mapping.keys() - set(total_sums.columns))\n\n for idx, val in enumerate(idx_vals):\n idx_name = inv_col_mapping.get(val, val)\n idx_mapping.append(idx_name)\n\n if len(result.columns.levels[0]) < len(col_mapping):\n # removing index from col_mapping (produces incorrect multiindexes)\n try:\n col_mapping.pop(idx_name)\n except KeyError:\n # when slicing the col_map will not have the index\n pass\n\n keys = list(col_mapping.keys())\n for level in range(len(result.columns.levels)):\n result.columns = result.columns.set_levels(keys, level=level)\n\n result.index.set_names(idx_mapping, inplace=True)\n\n # stacking can lead to a sorted index\n s_result = result.stack(dropna=False)\n assert is_dataframe_like(s_result)\n return s_result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_34__nunique_df_chunk.return.grouped": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_None_34__nunique_df_chunk.return.grouped", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 510, "end_line": 536, "span_ids": ["_drop_duplicates_reindex", "_cov_agg", "_nunique_df_chunk"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# nunique\n###############################################################\ndef _drop_duplicates_reindex(df):\n # Fix index in a groupby().apply() context\n # https://github.com/dask/dask/issues/8137\n # https://github.com/pandas-dev/pandas/issues/43568\n result = df.drop_duplicates()\n result.index = [0] * len(result)\n return result\n\n\ndef _nunique_df_chunk(df, *by, **kwargs):\n name = kwargs.pop(\"name\")\n\n g = _groupby_raise_unaligned(df, by=by)\n if len(df) > 0:\n grouped = (\n g[[name]].apply(_drop_duplicates_reindex).reset_index(level=-1, drop=True)\n )\n else:\n # Manually create empty version, since groupby-apply for empty frame\n # results in df with no columns\n grouped = g[[name]].nunique()\n grouped = grouped.astype(df.dtypes[grouped.columns].to_dict())\n\n return grouped", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_spec__normalize_spec.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_spec__normalize_spec.return.res", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 578, "end_line": 661, "span_ids": ["_normalize_spec"], "tokens": 820}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _normalize_spec(spec, non_group_columns):\n \"\"\"\n Return a list of ``(result_column, func, input_column)`` tuples.\n\n Spec can be\n\n - a function\n - a list of functions\n - a dictionary that maps input-columns to functions\n - a dictionary that maps input-columns to a lists of functions\n - a dictionary that maps input-columns to a dictionaries that map\n output-columns to functions.\n\n The non-group columns are a list of all column names that are not used in\n the groupby operation.\n\n Usually, the result columns are mutli-level names, returned as tuples.\n If only a single function is supplied or dictionary mapping columns\n to single functions, simple names are returned as strings (see the first\n two examples below).\n\n Examples\n --------\n >>> _normalize_spec('mean', ['a', 'b', 'c'])\n [('a', 'mean', 'a'), ('b', 'mean', 'b'), ('c', 'mean', 'c')]\n\n >>> spec = collections.OrderedDict([('a', 'mean'), ('b', 'count')])\n >>> _normalize_spec(spec, ['a', 'b', 'c'])\n [('a', 'mean', 'a'), ('b', 'count', 'b')]\n\n >>> _normalize_spec(['var', 'mean'], ['a', 'b', 'c'])\n ... # doctest: +NORMALIZE_WHITESPACE\n [(('a', 'var'), 'var', 'a'), (('a', 'mean'), 'mean', 'a'), \\\n (('b', 'var'), 'var', 'b'), (('b', 'mean'), 'mean', 'b'), \\\n (('c', 'var'), 'var', 'c'), (('c', 'mean'), 'mean', 'c')]\n\n >>> spec = collections.OrderedDict([('a', 'mean'), ('b', ['sum', 'count'])])\n >>> _normalize_spec(spec, ['a', 'b', 'c'])\n ... # doctest: +NORMALIZE_WHITESPACE\n [(('a', 'mean'), 'mean', 'a'), (('b', 'sum'), 'sum', 'b'), \\\n (('b', 'count'), 'count', 'b')]\n\n >>> spec = collections.OrderedDict()\n >>> spec['a'] = ['mean', 'size']\n >>> spec['b'] = collections.OrderedDict([('e', 'count'), ('f', 'var')])\n >>> _normalize_spec(spec, ['a', 'b', 'c'])\n ... # doctest: +NORMALIZE_WHITESPACE\n [(('a', 'mean'), 'mean', 'a'), (('a', 'size'), 'size', 'a'), \\\n (('b', 'e'), 'count', 'b'), (('b', 'f'), 'var', 'b')]\n \"\"\"\n if not isinstance(spec, dict):\n spec = collections.OrderedDict(zip(non_group_columns, it.repeat(spec)))\n\n res = []\n\n if isinstance(spec, dict):\n for input_column, subspec in spec.items():\n if isinstance(subspec, dict):\n res.extend(\n ((input_column, result_column), func, input_column)\n for result_column, func in subspec.items()\n )\n\n else:\n if not isinstance(subspec, list):\n subspec = [subspec]\n\n res.extend(\n ((input_column, funcname(func)), func, input_column)\n for func in subspec\n )\n\n else:\n raise ValueError(f\"unsupported agg spec of type {type(spec)}\")\n\n compounds = (list, tuple, dict)\n use_flat_columns = not any(\n isinstance(subspec, compounds) for subspec in spec.values()\n )\n\n if use_flat_columns:\n res = [(input_col, func, input_col) for (_, func, input_col) in res]\n\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args__build_agg_args.return.chunks_aggs_finalizers": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args__build_agg_args.return.chunks_aggs_finalizers", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 664, "end_line": 720, "span_ids": ["_build_agg_args"], "tokens": 459}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args(spec):\n \"\"\"\n Create transformation functions for a normalized aggregate spec.\n\n Parameters\n ----------\n spec: a list of (result-column, aggregation-function, input-column) triples.\n To work with all argument forms understood by pandas use\n ``_normalize_spec`` to normalize the argment before passing it on to\n ``_build_agg_args``.\n\n Returns\n -------\n chunk_funcs: a list of (intermediate-column, function, keyword) triples\n that are applied on grouped chunks of the initial dataframe.\n\n agg_funcs: a list of (intermediate-column, functions, keyword) triples that\n are applied on the grouped concatination of the preprocessed chunks.\n\n finalizers: a list of (result-column, function, keyword) triples that are\n applied after the ``agg_funcs``. They are used to create final results\n from intermediate representations.\n \"\"\"\n known_np_funcs = {np.min: \"min\", np.max: \"max\"}\n\n # check that there are no name conflicts for a single input column\n by_name = {}\n for _, func, input_column in spec:\n key = funcname(known_np_funcs.get(func, func)), input_column\n by_name.setdefault(key, []).append((func, input_column))\n\n for funcs in by_name.values():\n if len(funcs) != 1:\n raise ValueError(f\"conflicting aggregation functions: {funcs}\")\n\n chunks = {}\n aggs = {}\n finalizers = []\n\n for (result_column, func, input_column) in spec:\n if not isinstance(func, Aggregation):\n func = funcname(known_np_funcs.get(func, func))\n\n impls = _build_agg_args_single(result_column, func, input_column)\n\n # overwrite existing result-columns, generate intermediates only once\n for spec in impls[\"chunk_funcs\"]:\n chunks[spec[0]] = spec\n for spec in impls[\"aggregate_funcs\"]:\n aggs[spec[0]] = spec\n\n finalizers.append(impls[\"finalizer\"])\n\n chunks = sorted(chunks.values())\n aggs = sorted(aggs.values())\n\n return chunks, aggs, finalizers", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_simple__build_agg_args_simple.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_simple__build_agg_args_simple.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 747, "end_line": 767, "span_ids": ["_build_agg_args_simple"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_simple(result_column, func, input_column, impl_pair):\n intermediate = _make_agg_id(func, input_column)\n chunk_impl, agg_impl = impl_pair\n\n return dict(\n chunk_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(column=input_column, func=chunk_impl),\n )\n ],\n aggregate_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(column=intermediate, func=agg_impl),\n )\n ],\n finalizer=(result_column, itemgetter(intermediate), dict()),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_var__build_agg_args_var.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_var__build_agg_args_var.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 770, "end_line": 790, "span_ids": ["_build_agg_args_var"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_var(result_column, func, input_column):\n int_sum = _make_agg_id(\"sum\", input_column)\n int_sum2 = _make_agg_id(\"sum2\", input_column)\n int_count = _make_agg_id(\"count\", input_column)\n\n return dict(\n chunk_funcs=[\n (int_sum, _apply_func_to_column, dict(column=input_column, func=M.sum)),\n (int_count, _apply_func_to_column, dict(column=input_column, func=M.count)),\n (int_sum2, _compute_sum_of_squares, dict(column=input_column)),\n ],\n aggregate_funcs=[\n (col, _apply_func_to_column, dict(column=col, func=M.sum))\n for col in (int_sum, int_count, int_sum2)\n ],\n finalizer=(\n result_column,\n _finalize_var,\n dict(sum_column=int_sum, count_column=int_count, sum2_column=int_sum2),\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_std__build_agg_args_mean.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_std__build_agg_args_mean.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 793, "end_line": 820, "span_ids": ["_build_agg_args_std", "_build_agg_args_mean"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_std(result_column, func, input_column):\n impls = _build_agg_args_var(result_column, func, input_column)\n\n result_column, _, kwargs = impls[\"finalizer\"]\n impls[\"finalizer\"] = (result_column, _finalize_std, kwargs)\n\n return impls\n\n\ndef _build_agg_args_mean(result_column, func, input_column):\n int_sum = _make_agg_id(\"sum\", input_column)\n int_count = _make_agg_id(\"count\", input_column)\n\n return dict(\n chunk_funcs=[\n (int_sum, _apply_func_to_column, dict(column=input_column, func=M.sum)),\n (int_count, _apply_func_to_column, dict(column=input_column, func=M.count)),\n ],\n aggregate_funcs=[\n (col, _apply_func_to_column, dict(column=col, func=M.sum))\n for col in (int_sum, int_count)\n ],\n finalizer=(\n result_column,\n _finalize_mean,\n dict(sum_column=int_sum, count_column=int_count),\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_custom__build_agg_args_custom.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_custom__build_agg_args_custom.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 823, "end_line": 844, "span_ids": ["_build_agg_args_custom"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_custom(result_column, func, input_column):\n col = _make_agg_id(funcname(func), input_column)\n\n if func.finalize is None:\n finalizer = (result_column, operator.itemgetter(col), dict())\n\n else:\n finalizer = (\n result_column,\n _apply_func_to_columns,\n dict(func=func.finalize, prefix=col),\n )\n\n return dict(\n chunk_funcs=[\n (col, _apply_func_to_column, dict(func=func.chunk, column=input_column))\n ],\n aggregate_funcs=[\n (col, _apply_func_to_columns, dict(func=func.agg, prefix=col))\n ],\n finalizer=finalizer,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__compute_sum_of_squares__compute_sum_of_squares.return.df_groupby_keys_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__compute_sum_of_squares__compute_sum_of_squares.return.df_groupby_keys_sum_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 897, "end_line": 906, "span_ids": ["_compute_sum_of_squares"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compute_sum_of_squares(grouped, column):\n # Note: CuDF cannot use `groupby.apply`.\n # Need to unpack groupby to compute sum of squares\n if hasattr(grouped, \"grouper\"):\n keys = grouped.grouper\n else:\n # Handle CuDF groupby object (different from pandas)\n keys = grouped.grouping.keys\n df = grouped.obj[column].pow(2) if column else grouped.obj.pow(2)\n return df.groupby(keys).sum()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__agg_finalize__cumcount_aggregate.return.a_add_b_fill_value_fill_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__agg_finalize__cumcount_aggregate.return.a_add_b_fill_value_fill_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 950, "end_line": 1023, "span_ids": ["_agg_finalize", "_cum_agg_filled", "_finalize_var", "_finalize_std", "_apply_func_to_column", "_cumcount_aggregate", "_apply_func_to_columns", "_finalize_mean", "_cum_agg_aligned"], "tokens": 533}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _agg_finalize(df, aggregate_funcs, finalize_funcs, level, sort=False, **kwargs):\n # finish the final aggregation level\n df = _groupby_apply_funcs(\n df, funcs=aggregate_funcs, level=level, sort=sort, **kwargs\n )\n\n # and finalize the result\n result = collections.OrderedDict()\n for result_column, func, finalize_kwargs in finalize_funcs:\n result[result_column] = func(df, **finalize_kwargs)\n\n return df.__class__(result)\n\n\ndef _apply_func_to_column(df_like, column, func):\n if column is None:\n return func(df_like)\n\n return func(df_like[column])\n\n\ndef _apply_func_to_columns(df_like, prefix, func):\n if is_dataframe_like(df_like):\n columns = df_like.columns\n else:\n # handle GroupBy objects\n columns = df_like.obj.columns\n\n columns = sorted(col for col in columns if col.startswith(prefix))\n\n columns = [df_like[col] for col in columns]\n return func(*columns)\n\n\ndef _finalize_mean(df, sum_column, count_column):\n return df[sum_column] / df[count_column]\n\n\ndef _finalize_var(df, count_column, sum_column, sum2_column, ddof=1):\n n = df[count_column]\n x = df[sum_column]\n x2 = df[sum2_column]\n\n result = x2 - x**2 / n\n div = n - ddof\n div[div < 0] = 0\n result /= div\n result[(n - ddof) == 0] = np.nan\n\n return result\n\n\ndef _finalize_std(df, count_column, sum_column, sum2_column, ddof=1):\n result = _finalize_var(df, count_column, sum_column, sum2_column, ddof)\n return np.sqrt(result)\n\n\ndef _cum_agg_aligned(part, cum_last, index, columns, func, initial):\n align = cum_last.reindex(part.set_index(index).index, fill_value=initial)\n align.index = part.index\n return func(part[columns], align)\n\n\ndef _cum_agg_filled(a, b, func, initial):\n union = a.index.union(b.index)\n return func(\n a.reindex(union, fill_value=initial),\n b.reindex(union, fill_value=initial),\n fill_value=initial,\n )\n\n\ndef _cumcount_aggregate(a, b, fill_value=None):\n return a.add(b, fill_value=fill_value) + 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._meta_nonempty__GroupBy._meta_nonempty.return._maybe_slice_grouped_sel": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._meta_nonempty__GroupBy._meta_nonempty.return._maybe_slice_grouped_sel", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1127, "end_line": 1152, "span_ids": ["_GroupBy._meta_nonempty"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @property\n def _meta_nonempty(self):\n \"\"\"\n Return a pd.DataFrameGroupBy / pd.SeriesGroupBy which contains sample data.\n \"\"\"\n sample = self.obj._meta_nonempty\n\n if isinstance(self.by, list):\n by_meta = [\n item._meta_nonempty if isinstance(item, Series) else item\n for item in self.by\n ]\n\n elif isinstance(self.by, Series):\n by_meta = self.by._meta_nonempty\n\n else:\n by_meta = self.by\n\n grouped = sample.groupby(\n by_meta,\n group_keys=self.group_keys,\n **self.observed,\n **self.dropna,\n )\n return _maybe_slice(grouped, self._slice)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._aca_agg__GroupBy._aca_agg.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._aca_agg__GroupBy._aca_agg.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1154, "end_line": 1202, "span_ids": ["_GroupBy._aca_agg"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n def _aca_agg(\n self,\n token,\n func,\n aggfunc=None,\n meta=None,\n split_every=None,\n split_out=1,\n chunk_kwargs={},\n aggregate_kwargs={},\n ):\n if aggfunc is None:\n aggfunc = func\n\n if meta is None:\n meta = func(self._meta_nonempty)\n\n columns = meta.name if is_series_like(meta) else meta.columns\n\n token = self._token_prefix + token\n levels = _determine_levels(self.by)\n\n return aca(\n [self.obj, self.by]\n if not isinstance(self.by, list)\n else [self.obj] + self.by,\n chunk=_apply_chunk,\n chunk_kwargs=dict(\n chunk=func,\n columns=columns,\n **self.observed,\n **chunk_kwargs,\n **self.dropna,\n ),\n aggregate=_groupby_aggregate,\n meta=meta,\n token=token,\n split_every=split_every,\n aggregate_kwargs=dict(\n aggfunc=aggfunc,\n levels=levels,\n **self.observed,\n **aggregate_kwargs,\n **self.dropna,\n ),\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._cum_agg__GroupBy._cum_agg.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy._cum_agg__GroupBy._cum_agg.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1214, "end_line": 1301, "span_ids": ["_GroupBy._cum_agg"], "tokens": 695}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n def _cum_agg(self, token, chunk, aggregate, initial):\n \"\"\"Wrapper for cumulative groupby operation\"\"\"\n meta = chunk(self._meta)\n columns = meta.name if is_series_like(meta) else meta.columns\n by = self.by if isinstance(self.by, list) else [self.by]\n\n name = self._token_prefix + token\n name_part = name + \"-map\"\n name_last = name + \"-take-last\"\n name_cum = name + \"-cum-last\"\n\n # cumulate each partitions\n cumpart_raw = map_partitions(\n _apply_chunk,\n self.obj,\n *by,\n chunk=chunk,\n columns=columns,\n token=name_part,\n meta=meta,\n **self.dropna,\n )\n\n cumpart_raw_frame = (\n cumpart_raw.to_frame() if is_series_like(meta) else cumpart_raw\n )\n\n cumpart_ext = cumpart_raw_frame.assign(\n **{\n i: self.obj[i]\n if np.isscalar(i) and i in getattr(self.obj, \"columns\", [])\n else self.obj.index\n for i in by\n }\n )\n\n # Use pd.Grouper objects to specify that we are grouping by columns.\n # Otherwise, pandas will throw an ambiguity warning if the\n # DataFrame's index (self.obj.index) was included in the grouping\n # specification (self.by). See pandas #14432\n by_groupers = [pd.Grouper(key=ind) for ind in by]\n cumlast = map_partitions(\n _apply_chunk,\n cumpart_ext,\n *by_groupers,\n columns=0 if columns is None else columns,\n chunk=M.last,\n meta=meta,\n token=name_last,\n **self.dropna,\n )\n\n # aggregate cumulated partitions and its previous last element\n _hash = tokenize(self, token, chunk, aggregate, initial)\n name += \"-\" + _hash\n name_cum += \"-\" + _hash\n dask = {}\n dask[(name, 0)] = (cumpart_raw._name, 0)\n\n for i in range(1, self.obj.npartitions):\n # store each cumulative step to graph to reduce computation\n if i == 1:\n dask[(name_cum, i)] = (cumlast._name, i - 1)\n else:\n # aggregate with previous cumulation results\n dask[(name_cum, i)] = (\n _cum_agg_filled,\n (name_cum, i - 1),\n (cumlast._name, i - 1),\n aggregate,\n initial,\n )\n dask[(name, i)] = (\n _cum_agg_aligned,\n (cumpart_ext._name, i),\n (name_cum, i),\n by,\n 0 if columns is None else columns,\n aggregate,\n initial,\n )\n\n dependencies = [cumpart_raw]\n if self.obj.npartitions > 1:\n dependencies += [cumpart_ext, cumlast]\n\n graph = HighLevelGraph.from_collections(name, dask, dependencies=dependencies)\n return new_dd_object(graph, name, chunk(self._meta), self.obj.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cumsum__GroupBy.mean.return.s_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cumsum__GroupBy.mean.return.s_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1319, "end_line": 1409, "span_ids": ["_GroupBy.cumprod", "_GroupBy.idxmin", "_GroupBy.mean", "_GroupBy.sum", "_GroupBy.max", "_GroupBy.count", "_GroupBy.idxmax", "_GroupBy.cumcount", "_GroupBy.cumsum", "_GroupBy.prod", "_GroupBy.min"], "tokens": 754}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @derived_from(pd.core.groupby.GroupBy)\n def cumsum(self, axis=0):\n if axis:\n return self.obj.cumsum(axis=axis)\n else:\n return self._cum_agg(\"cumsum\", chunk=M.cumsum, aggregate=M.add, initial=0)\n\n @derived_from(pd.core.groupby.GroupBy)\n def cumprod(self, axis=0):\n if axis:\n return self.obj.cumprod(axis=axis)\n else:\n return self._cum_agg(\"cumprod\", chunk=M.cumprod, aggregate=M.mul, initial=1)\n\n @derived_from(pd.core.groupby.GroupBy)\n def cumcount(self, axis=None):\n return self._cum_agg(\n \"cumcount\", chunk=M.cumcount, aggregate=_cumcount_aggregate, initial=-1\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def sum(self, split_every=None, split_out=1, min_count=None):\n result = self._aca_agg(\n token=\"sum\", func=M.sum, split_every=split_every, split_out=split_out\n )\n if min_count:\n return result.where(self.count() >= min_count, other=np.NaN)\n else:\n return result\n\n @derived_from(pd.core.groupby.GroupBy)\n def prod(self, split_every=None, split_out=1, min_count=None):\n result = self._aca_agg(\n token=\"prod\", func=M.prod, split_every=split_every, split_out=split_out\n )\n if min_count:\n return result.where(self.count() >= min_count, other=np.NaN)\n else:\n return result\n\n @derived_from(pd.core.groupby.GroupBy)\n def min(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"min\", func=M.min, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def max(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"max\", func=M.max, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.DataFrame)\n def idxmin(self, split_every=None, split_out=1, axis=None, skipna=True):\n return self._aca_agg(\n token=\"idxmin\",\n func=M.idxmin,\n aggfunc=M.first,\n split_every=split_every,\n split_out=split_out,\n chunk_kwargs=dict(skipna=skipna),\n )\n\n @derived_from(pd.DataFrame)\n def idxmax(self, split_every=None, split_out=1, axis=None, skipna=True):\n return self._aca_agg(\n token=\"idxmax\",\n func=M.idxmax,\n aggfunc=M.first,\n split_every=split_every,\n split_out=split_out,\n chunk_kwargs=dict(skipna=skipna),\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def count(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"count\",\n func=M.count,\n aggfunc=M.sum,\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def mean(self, split_every=None, split_out=1):\n s = self.sum(split_every=split_every, split_out=split_out)\n c = self.count(split_every=split_every, split_out=split_out)\n if is_dataframe_like(s):\n c = c[s.columns]\n return s / c", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.size__GroupBy.var.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.size__GroupBy.var.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1428, "end_line": 1462, "span_ids": ["_GroupBy.var", "_GroupBy.size"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @derived_from(pd.core.groupby.GroupBy)\n def size(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"size\",\n func=M.size,\n aggfunc=M.sum,\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def var(self, ddof=1, split_every=None, split_out=1):\n levels = _determine_levels(self.by)\n result = aca(\n [self.obj, self.by]\n if not isinstance(self.by, list)\n else [self.obj] + self.by,\n chunk=_var_chunk,\n aggregate=_var_agg,\n combine=_var_combine,\n token=self._token_prefix + \"var\",\n aggregate_kwargs={\"ddof\": ddof, \"levels\": levels},\n combine_kwargs={\"levels\": levels},\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )\n\n if isinstance(self.obj, Series):\n result = result[result.columns[0]]\n if self._slice:\n result = result[self._slice]\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.std__GroupBy.corr.return.self_cov_split_every_spli": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.std__GroupBy.corr.return.self_cov_split_every_spli", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1447, "end_line": 1458, "span_ids": ["_GroupBy.std", "_GroupBy.corr"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @derived_from(pd.core.groupby.GroupBy)\n def std(self, ddof=1, split_every=None, split_out=1):\n v = self.var(ddof, split_every=split_every, split_out=split_out)\n result = map_partitions(np.sqrt, v, meta=v)\n return result\n\n @derived_from(pd.DataFrame)\n def corr(self, ddof=1, split_every=None, split_out=1):\n \"\"\"Groupby correlation:\n corr(X, Y) = cov(X, Y) / (std_x * std_y)\n \"\"\"\n return self.cov(split_every=split_every, split_out=split_out, std=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cov__GroupBy.cov.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.cov__GroupBy.cov.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1477, "end_line": 1520, "span_ids": ["_GroupBy.cov"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @derived_from(pd.DataFrame)\n def cov(self, ddof=1, split_every=None, split_out=1, std=False):\n \"\"\"Groupby covariance is accomplished by\n\n 1. Computing intermediate values for sum, count, and the product of\n all columns: a b c -> a*a, a*b, b*b, b*c, c*c.\n\n 2. The values are then aggregated and the final covariance value is calculated:\n cov(X, Y) = X*Y - Xbar * Ybar\n\n When `std` is True calculate Correlation\n \"\"\"\n\n levels = _determine_levels(self.by)\n\n is_mask = any(is_series_like(s) for s in self.by)\n if self._slice:\n if is_mask:\n self.obj = self.obj[self._slice]\n else:\n sliced_plus = list(self._slice) + list(self.by)\n self.obj = self.obj[sliced_plus]\n\n result = aca(\n [self.obj, self.by]\n if not isinstance(self.by, list)\n else [self.obj] + self.by,\n chunk=_cov_chunk,\n aggregate=_cov_agg,\n combine=_cov_combine,\n token=self._token_prefix + \"cov\",\n aggregate_kwargs={\"ddof\": ddof, \"levels\": levels, \"std\": std},\n combine_kwargs={\"levels\": levels},\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )\n\n if isinstance(self.obj, Series):\n result = result[result.columns[0]]\n if self._slice:\n result = result[self._slice]\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.first__GroupBy.last.return.self__aca_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.first__GroupBy.last.return.self__aca_agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1505, "end_line": 1515, "span_ids": ["_GroupBy.last", "_GroupBy.first"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @derived_from(pd.core.groupby.GroupBy)\n def first(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"first\", func=M.first, split_every=split_every, split_out=split_out\n )\n\n @derived_from(pd.core.groupby.GroupBy)\n def last(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"last\", func=M.last, split_every=split_every, split_out=split_out\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.get_group__GroupBy.get_group.return.map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.get_group__GroupBy.get_group.return.map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1534, "end_line": 1551, "span_ids": ["_GroupBy.get_group"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @derived_from(pd.core.groupby.GroupBy)\n def get_group(self, key):\n token = self._token_prefix + \"get_group\"\n\n meta = self._meta.obj\n if is_dataframe_like(meta) and self._slice is not None:\n meta = meta[self._slice]\n columns = meta.columns if is_dataframe_like(meta) else meta.name\n\n return map_partitions(\n _groupby_get_group,\n self.obj,\n self.by,\n key,\n columns,\n meta=meta,\n token=token,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.aggregate__GroupBy.aggregate.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.aggregate__GroupBy.aggregate.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1553, "end_line": 1642, "span_ids": ["_GroupBy.aggregate"], "tokens": 685}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n def aggregate(self, arg, split_every, split_out=1):\n if isinstance(self.obj, DataFrame):\n if isinstance(self.by, tuple) or np.isscalar(self.by):\n group_columns = {self.by}\n\n elif isinstance(self.by, list):\n group_columns = {\n i for i in self.by if isinstance(i, tuple) or np.isscalar(i)\n }\n\n else:\n group_columns = set()\n\n if self._slice:\n # pandas doesn't exclude the grouping column in a SeriesGroupBy\n # like df.groupby('a')['a'].agg(...)\n non_group_columns = self._slice\n if not isinstance(non_group_columns, list):\n non_group_columns = [non_group_columns]\n else:\n # NOTE: this step relies on the by normalization to replace\n # series with their name.\n non_group_columns = [\n col for col in self.obj.columns if col not in group_columns\n ]\n\n spec = _normalize_spec(arg, non_group_columns)\n\n elif isinstance(self.obj, Series):\n if isinstance(arg, (list, tuple, dict)):\n # implementation detail: if self.obj is a series, a pseudo column\n # None is used to denote the series itself. This pseudo column is\n # removed from the result columns before passing the spec along.\n spec = _normalize_spec({None: arg}, [])\n spec = [\n (result_column, func, input_column)\n for ((_, result_column), func, input_column) in spec\n ]\n\n else:\n spec = _normalize_spec({None: arg}, [])\n spec = [\n (self.obj.name, func, input_column)\n for (_, func, input_column) in spec\n ]\n\n else:\n raise ValueError(f\"aggregate on unknown object {self.obj}\")\n\n chunk_funcs, aggregate_funcs, finalizers = _build_agg_args(spec)\n\n if isinstance(self.by, (tuple, list)) and len(self.by) > 1:\n levels = list(range(len(self.by)))\n else:\n levels = 0\n\n if not isinstance(self.by, list):\n chunk_args = [self.obj, self.by]\n\n else:\n chunk_args = [self.obj] + self.by\n\n if not PANDAS_GT_110 and self.dropna:\n raise NotImplementedError(\n \"dropna is not a valid argument for dask.groupby.agg\"\n f\"if pandas < 1.1.0. Pandas version is {pd.__version__}\"\n )\n\n return aca(\n chunk_args,\n chunk=_groupby_apply_funcs,\n chunk_kwargs=dict(funcs=chunk_funcs, **self.observed, **self.dropna),\n combine=_groupby_apply_funcs,\n combine_kwargs=dict(\n funcs=aggregate_funcs, level=levels, **self.observed, **self.dropna\n ),\n aggregate=_agg_finalize,\n aggregate_kwargs=dict(\n aggregate_funcs=aggregate_funcs,\n finalize_funcs=finalizers,\n level=levels,\n **self.observed,\n **self.dropna,\n ),\n token=\"aggregate\",\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.apply__GroupBy.apply.return.df3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.apply__GroupBy.apply.return.df3", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1644, "end_line": 1728, "span_ids": ["_GroupBy.apply"], "tokens": 695}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @insert_meta_param_description(pad=12)\n def apply(self, func, *args, **kwargs):\n \"\"\"Parallel version of pandas GroupBy.apply\n\n This mimics the pandas version except for the following:\n\n 1. If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n 2. Dask's GroupBy.apply is not appropriate for aggregations. For custom\n aggregations, use :class:`dask.dataframe.groupby.Aggregation`.\n\n .. warning::\n\n Pandas' groupby-apply can be used to to apply arbitrary functions,\n including aggregations that result in one row per group. Dask's\n groupby-apply will apply ``func`` once on each group, doing a shuffle\n if needed, such that each group is contained in one partition.\n When ``func`` is a reduction, e.g., you'll end up with one row\n per group. To apply a custom aggregation with Dask,\n use :class:`dask.dataframe.groupby.Aggregation`.\n\n Parameters\n ----------\n func: function\n Function to apply\n args, kwargs : Scalar, Delayed or object\n Arguments and keywords to pass to the function.\n $META\n\n Returns\n -------\n applied : Series or DataFrame depending on columns keyword\n \"\"\"\n meta = kwargs.get(\"meta\", no_default)\n\n if meta is no_default:\n with raise_on_meta_error(f\"groupby.apply({funcname(func)})\", udf=True):\n meta_args, meta_kwargs = _extract_meta((args, kwargs), nonempty=True)\n meta = self._meta_nonempty.apply(func, *meta_args, **meta_kwargs)\n\n msg = (\n \"`meta` is not specified, inferred from partial data. \"\n \"Please provide `meta` if the result is unexpected.\\n\"\n \" Before: .apply(func)\\n\"\n \" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\\n\"\n \" or: .apply(func, meta=('x', 'f8')) for series result\"\n )\n warnings.warn(msg, stacklevel=2)\n\n meta = make_meta(meta, parent_meta=self._meta.obj)\n\n # Validate self.by\n if isinstance(self.by, list) and any(\n isinstance(item, Series) for item in self.by\n ):\n raise NotImplementedError(\n \"groupby-apply with a multiple Series is currently not supported\"\n )\n\n df = self.obj\n should_shuffle = not (df.known_divisions and df._contains_index_name(self.by))\n\n if should_shuffle:\n df2, by = self._shuffle(meta)\n else:\n df2 = df\n by = self.by\n\n # Perform embarrassingly parallel groupby-apply\n kwargs[\"meta\"] = meta\n df3 = map_partitions(\n _groupby_slice_apply,\n df2,\n by,\n self._slice,\n func,\n token=funcname(func),\n *args,\n group_keys=self.group_keys,\n **self.observed,\n **self.dropna,\n **kwargs,\n )\n\n return df3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.transform__GroupBy.transform.return.df3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.transform__GroupBy.transform.return.df3", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1730, "end_line": 1814, "span_ids": ["_GroupBy.transform"], "tokens": 691}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @insert_meta_param_description(pad=12)\n def transform(self, func, *args, **kwargs):\n \"\"\"Parallel version of pandas GroupBy.transform\n\n This mimics the pandas version except for the following:\n\n 1. If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n 2. Dask's GroupBy.transform is not appropriate for aggregations. For custom\n aggregations, use :class:`dask.dataframe.groupby.Aggregation`.\n\n .. warning::\n\n Pandas' groupby-transform can be used to to apply arbitrary functions,\n including aggregations that result in one row per group. Dask's\n groupby-transform will apply ``func`` once on each group, doing a shuffle\n if needed, such that each group is contained in one partition.\n When ``func`` is a reduction, e.g., you'll end up with one row\n per group. To apply a custom aggregation with Dask,\n use :class:`dask.dataframe.groupby.Aggregation`.\n\n Parameters\n ----------\n func: function\n Function to apply\n args, kwargs : Scalar, Delayed or object\n Arguments and keywords to pass to the function.\n $META\n\n Returns\n -------\n applied : Series or DataFrame depending on columns keyword\n \"\"\"\n meta = kwargs.get(\"meta\", no_default)\n\n if meta is no_default:\n with raise_on_meta_error(f\"groupby.transform({funcname(func)})\", udf=True):\n meta_args, meta_kwargs = _extract_meta((args, kwargs), nonempty=True)\n meta = self._meta_nonempty.transform(func, *meta_args, **meta_kwargs)\n\n msg = (\n \"`meta` is not specified, inferred from partial data. \"\n \"Please provide `meta` if the result is unexpected.\\n\"\n \" Before: .transform(func)\\n\"\n \" After: .transform(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\\n\"\n \" or: .transform(func, meta=('x', 'f8')) for series result\"\n )\n warnings.warn(msg, stacklevel=2)\n\n meta = make_meta(meta, parent_meta=self._meta.obj)\n\n # Validate self.by\n if isinstance(self.by, list) and any(\n isinstance(item, Series) for item in self.by\n ):\n raise NotImplementedError(\n \"groupby-transform with a multiple Series is currently not supported\"\n )\n\n df = self.obj\n should_shuffle = not (df.known_divisions and df._contains_index_name(self.by))\n\n if should_shuffle:\n df2, by = self._shuffle(meta)\n else:\n df2 = df\n by = self.by\n\n # Perform embarrassingly parallel groupby-transform\n kwargs[\"meta\"] = meta\n df3 = map_partitions(\n _groupby_slice_transform,\n df2,\n by,\n self._slice,\n func,\n token=funcname(func),\n *args,\n group_keys=self.group_keys,\n **self.observed,\n **self.dropna,\n **kwargs,\n )\n\n return df3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_DataFrameGroupBy_DataFrameGroupBy.agg.return.self_aggregate_arg_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_DataFrameGroupBy_DataFrameGroupBy.agg.return.self_aggregate_arg_split", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1967, "end_line": 2008, "span_ids": ["DataFrameGroupBy.__getitem__", "DataFrameGroupBy.__dir__", "DataFrameGroupBy.__getattr__", "DataFrameGroupBy.aggregate", "DataFrameGroupBy.agg", "DataFrameGroupBy"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrameGroupBy(_GroupBy):\n _token_prefix = \"dataframe-groupby-\"\n\n def __getitem__(self, key):\n if isinstance(key, list):\n g = DataFrameGroupBy(\n self.obj, by=self.by, slice=key, sort=self.sort, **self.dropna\n )\n else:\n g = SeriesGroupBy(\n self.obj, by=self.by, slice=key, sort=self.sort, **self.dropna\n )\n\n # error is raised from pandas\n g._meta = g._meta[key]\n return g\n\n def __dir__(self):\n return sorted(\n set(\n dir(type(self))\n + list(self.__dict__)\n + list(filter(M.isidentifier, self.obj.columns))\n )\n )\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError as e:\n raise AttributeError(e) from e\n\n @derived_from(pd.core.groupby.DataFrameGroupBy)\n def aggregate(self, arg, split_every=None, split_out=1):\n if arg == \"size\":\n return self.size()\n\n return super().aggregate(arg, split_every=split_every, split_out=split_out)\n\n @derived_from(pd.core.groupby.DataFrameGroupBy)\n def agg(self, arg, split_every=None, split_out=1):\n return self.aggregate(arg, split_every=split_every, split_out=split_out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.nunique_SeriesGroupBy.nunique.return.aca_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.nunique_SeriesGroupBy.nunique.return.aca_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2036, "end_line": 2072, "span_ids": ["SeriesGroupBy.nunique"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SeriesGroupBy(_GroupBy):\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def nunique(self, split_every=None, split_out=1):\n \"\"\"\n Examples\n --------\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> d = {'col1': [1, 2, 3, 4], 'col2': [5, 6, 7, 8]}\n >>> df = pd.DataFrame(data=d)\n >>> ddf = dd.from_pandas(df, 2)\n >>> ddf.groupby(['col1']).col2.nunique().compute()\n \"\"\"\n name = self._meta.obj.name\n levels = _determine_levels(self.by)\n\n if isinstance(self.obj, DataFrame):\n chunk = _nunique_df_chunk\n\n else:\n chunk = _nunique_series_chunk\n\n return aca(\n [self.obj, self.by]\n if not isinstance(self.by, list)\n else [self.obj] + self.by,\n chunk=chunk,\n aggregate=_nunique_df_aggregate,\n combine=_nunique_df_combine,\n token=\"series-groupby-nunique\",\n chunk_kwargs={\"levels\": levels, \"name\": name},\n aggregate_kwargs={\"levels\": levels, \"name\": name},\n combine_kwargs={\"levels\": levels},\n split_every=split_every,\n split_out=split_out,\n split_out_setup=split_out_on_index,\n sort=self.sort,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_compute_hll_array_compute_hll_array.return.series_reindex_np_arange_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_compute_hll_array_compute_hll_array.return.series_reindex_np_arange_", "embedding": null, "metadata": {"file_path": "dask/dataframe/hyperloglog.py", "file_name": "hyperloglog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 25, "end_line": 49, "span_ids": ["compute_hll_array"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute_hll_array(obj, b):\n # b is the number of bits\n\n if not 8 <= b <= 16:\n raise ValueError(\"b should be between 8 and 16\")\n num_bits_discarded = 32 - b\n m = 1 << b\n\n # Get an array of the hashes\n hashes = hash_pandas_object(obj, index=False)\n if isinstance(hashes, pd.Series):\n hashes = hashes._values\n hashes = hashes.astype(np.uint32)\n\n # Of the first b bits, which is the first nonzero?\n j = hashes >> num_bits_discarded\n first_bit = compute_first_bit(hashes)\n\n # Pandas can do the max aggregation\n df = pd.DataFrame({\"j\": j, \"first_bit\": first_bit})\n series = df.groupby(\"j\").max()[\"first_bit\"]\n\n # Return a dense array so we can concat them and get a result\n # that is easy to deal with\n return series.reindex(np.arange(m), fill_value=0).values.astype(np.uint8)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_reduce_state_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py_reduce_state_", "embedding": null, "metadata": {"file_path": "dask/dataframe/hyperloglog.py", "file_name": "hyperloglog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 52, "end_line": 81, "span_ids": ["reduce_state", "estimate_count"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduce_state(Ms, b):\n m = 1 << b\n\n # We concatenated all of the states, now we need to get the max\n # value for each j in both\n Ms = Ms.reshape((len(Ms) // m), m)\n return Ms.max(axis=0)\n\n\ndef estimate_count(Ms, b):\n m = 1 << b\n\n # Combine one last time\n M = reduce_state(Ms, b)\n\n # Estimate cardinality, no adjustments\n alpha = 0.7213 / (1 + 1.079 / m)\n E = alpha * m / (2.0 ** -(M.astype(\"f8\"))).sum() * m\n # ^^^^ starts as unsigned, need a signed type for\n # negation operator to do something useful\n\n # Apply adjustments for small / big cardinalities, if applicable\n if E < 2.5 * m:\n V = (M == 0).sum()\n if V:\n return m * np.log(m / V)\n if E > 2**32 / 30.0:\n return -(2**32) * np.log1p(-E / 2**32)\n return E", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__iLocIndexer__iLocIndexer._iloc.return.self_obj_map_partitions_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__iLocIndexer__iLocIndexer._iloc.return.self_obj_map_partitions_m", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 73, "span_ids": ["_iLocIndexer._iloc", "_iLocIndexer", "_iLocIndexer.__getitem__", "_iLocIndexer._meta_indexer"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _iLocIndexer(_IndexerBase):\n @property\n def _meta_indexer(self):\n return self.obj._meta.iloc\n\n def __getitem__(self, key):\n\n # dataframe\n msg = (\n \"'DataFrame.iloc' only supports selecting columns. \"\n \"It must be used like 'df.iloc[:, column_indexer]'.\"\n )\n if not isinstance(key, tuple):\n raise NotImplementedError(msg)\n\n if len(key) > 2:\n raise ValueError(\"Too many indexers\")\n\n iindexer, cindexer = key\n\n if iindexer != slice(None):\n raise NotImplementedError(msg)\n\n if not self.obj.columns.is_unique:\n # if there are any duplicate column names, do an iloc\n return self._iloc(iindexer, cindexer)\n else:\n # otherwise dispatch to dask.dataframe.core.DataFrame.__getitem__\n col_names = self.obj.columns[cindexer]\n return self.obj.__getitem__(col_names)\n\n def _iloc(self, iindexer, cindexer):\n assert iindexer == slice(None)\n meta = self._make_meta(iindexer, cindexer)\n\n return self.obj.map_partitions(methods.iloc, cindexer, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer__LocIndexer.__getitem__.return.self__loc_iindexer_cinde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer__LocIndexer.__getitem__.return.self__loc_iindexer_cinde", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 77, "end_line": 99, "span_ids": ["_LocIndexer.__getitem__", "_LocIndexer._meta_indexer", "_LocIndexer"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n \"\"\"Helper class for the .loc accessor\"\"\"\n\n @property\n def _meta_indexer(self):\n return self.obj._meta.loc\n\n def __getitem__(self, key):\n\n if isinstance(key, tuple):\n # multi-dimensional selection\n if len(key) > self.obj.ndim:\n # raise from pandas\n msg = \"Too many indexers\"\n raise pd.core.indexing.IndexingError(msg)\n\n iindexer = key[0]\n cindexer = key[1]\n else:\n # if self.obj is Series, cindexer is always None\n iindexer = key\n cindexer = None\n return self._loc(iindexer, cindexer)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc__LocIndexer._loc.if_self_obj_known_divisio.else_.return.self_obj_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc__LocIndexer._loc.if_self_obj_known_divisio.else_.return.self_obj_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 102, "end_line": 140, "span_ids": ["_LocIndexer._loc"], "tokens": 388}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _loc(self, iindexer, cindexer):\n \"\"\"Helper function for the .loc accessor\"\"\"\n if isinstance(iindexer, Series):\n return self._loc_series(iindexer, cindexer)\n elif isinstance(iindexer, Array):\n return self._loc_array(iindexer, cindexer)\n elif callable(iindexer):\n return self._loc(iindexer(self.obj), cindexer)\n\n if self.obj.known_divisions:\n iindexer = self._maybe_partial_time_string(iindexer)\n\n if isinstance(iindexer, slice):\n return self._loc_slice(iindexer, cindexer)\n elif isinstance(iindexer, (list, np.ndarray)):\n return self._loc_list(iindexer, cindexer)\n elif is_series_like(iindexer) and not is_bool_dtype(iindexer.dtype):\n return self._loc_list(iindexer.values, cindexer)\n else:\n # element should raise KeyError\n return self._loc_element(iindexer, cindexer)\n else:\n if isinstance(iindexer, (list, np.ndarray)) or (\n is_series_like(iindexer) and not is_bool_dtype(iindexer.dtype)\n ):\n # applying map_partitions to each partition\n # results in duplicated NaN rows\n msg = (\n \"Cannot index with list against unknown division. \"\n \"Try setting divisions using ``ddf.set_index``\"\n )\n raise KeyError(msg)\n elif not isinstance(iindexer, slice):\n iindexer = slice(iindexer, iindexer)\n\n meta = self._make_meta(iindexer, cindexer)\n return self.obj.map_partitions(\n methods.try_loc, iindexer, cindexer, meta=meta\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._maybe_partial_time_string__LocIndexer._loc_array.return.self__loc_series_iindexer": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._maybe_partial_time_string__LocIndexer._loc_array.return.self__loc_series_iindexer", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 142, "end_line": 164, "span_ids": ["_LocIndexer._loc_array", "_LocIndexer._maybe_partial_time_string", "_LocIndexer._loc_series"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _maybe_partial_time_string(self, iindexer):\n \"\"\"\n Convert index-indexer for partial time string slicing\n if obj.index is DatetimeIndex / PeriodIndex\n \"\"\"\n idx = meta_nonempty(self.obj._meta.index)\n iindexer = _maybe_partial_time_string(idx, iindexer)\n return iindexer\n\n def _loc_series(self, iindexer, cindexer):\n if not is_bool_dtype(iindexer.dtype):\n raise KeyError(\n \"Cannot index with non-boolean dask Series. Try passing computed \"\n \"values instead (e.g. ``ddf.loc[iindexer.compute()]``)\"\n )\n meta = self._make_meta(iindexer, cindexer)\n return self.obj.map_partitions(\n methods.loc, iindexer, cindexer, token=\"loc-series\", meta=meta\n )\n\n def _loc_array(self, iindexer, cindexer):\n iindexer_series = iindexer.to_dask_dataframe(\"_\", self.obj.index)\n return self._loc_series(iindexer_series, cindexer)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_list__LocIndexer._loc_list.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_list__LocIndexer._loc_list.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 172, "span_ids": ["_LocIndexer._loc_list"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _loc_list(self, iindexer, cindexer):\n name = \"loc-%s\" % tokenize(iindexer, self.obj)\n parts = self._get_partitions(iindexer)\n meta = self._make_meta(iindexer, cindexer)\n\n if len(iindexer):\n dsk = {}\n divisions = []\n items = sorted(parts.items())\n for i, (div, indexer) in enumerate(items):\n dsk[name, i] = (methods.loc, (self._name, div), indexer, cindexer)\n # append minimum value as division\n divisions.append(sorted(indexer)[0])\n # append maximum value of the last division\n divisions.append(sorted(items[-1][1])[-1])\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self.obj])\n else:\n divisions = [None, None]\n dsk = {(name, 0): meta.head(0)}\n graph = HighLevelGraph.from_collections(name, dsk)\n return new_dd_object(graph, name, meta=meta, divisions=divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_element__LocIndexer._coerce_loc_index.return._coerce_loc_index_self_ob": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_element__LocIndexer._coerce_loc_index.return._coerce_loc_index_self_ob", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 174, "end_line": 202, "span_ids": ["_LocIndexer._loc_element", "_LocIndexer._coerce_loc_index", "_LocIndexer._get_partitions"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _loc_element(self, iindexer, cindexer):\n name = \"loc-%s\" % tokenize(iindexer, self.obj)\n part = self._get_partitions(iindexer)\n\n if iindexer < self.obj.divisions[0] or iindexer > self.obj.divisions[-1]:\n raise KeyError(\"the label [%s] is not in the index\" % str(iindexer))\n\n dsk = {\n (name, 0): (\n methods.loc,\n (self._name, part),\n slice(iindexer, iindexer),\n cindexer,\n )\n }\n\n meta = self._make_meta(iindexer, cindexer)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self.obj])\n return new_dd_object(graph, name, meta=meta, divisions=[iindexer, iindexer])\n\n def _get_partitions(self, keys):\n if isinstance(keys, (list, np.ndarray)):\n return _partitions_of_index_values(self.obj.divisions, keys)\n else:\n # element\n return _partition_of_index_value(self.obj.divisions, keys)\n\n def _coerce_loc_index(self, key):\n return _coerce_loc_index(self.obj.divisions, key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_slice__LocIndexer._loc_slice.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__LocIndexer._loc_slice__LocIndexer._loc_slice.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 204, "end_line": 283, "span_ids": ["_LocIndexer._loc_slice"], "tokens": 625}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LocIndexer(_IndexerBase):\n\n def _loc_slice(self, iindexer, cindexer):\n name = \"loc-%s\" % tokenize(iindexer, cindexer, self)\n\n assert isinstance(iindexer, slice)\n assert iindexer.step in (None, 1)\n\n if iindexer.start is not None:\n start = self._get_partitions(iindexer.start)\n else:\n start = 0\n if iindexer.stop is not None:\n stop = self._get_partitions(iindexer.stop)\n else:\n stop = self.obj.npartitions - 1\n\n if iindexer.start is None and self.obj.known_divisions:\n istart = self.obj.divisions[0]\n else:\n istart = self._coerce_loc_index(iindexer.start)\n if iindexer.stop is None and self.obj.known_divisions:\n istop = self.obj.divisions[-1]\n else:\n istop = self._coerce_loc_index(iindexer.stop)\n\n if stop == start:\n dsk = {\n (name, 0): (\n methods.loc,\n (self._name, start),\n slice(iindexer.start, iindexer.stop),\n cindexer,\n )\n }\n divisions = [istart, istop]\n else:\n dsk = {\n (name, 0): (\n methods.loc,\n (self._name, start),\n slice(iindexer.start, None),\n cindexer,\n )\n }\n for i in range(1, stop - start):\n if cindexer is None:\n dsk[name, i] = (self._name, start + i)\n else:\n dsk[name, i] = (\n methods.loc,\n (self._name, start + i),\n slice(None, None),\n cindexer,\n )\n\n dsk[name, stop - start] = (\n methods.loc,\n (self._name, stop),\n slice(None, iindexer.stop),\n cindexer,\n )\n\n if iindexer.start is None:\n div_start = self.obj.divisions[0]\n else:\n div_start = max(istart, self.obj.divisions[start])\n\n if iindexer.stop is None:\n div_stop = self.obj.divisions[-1]\n else:\n div_stop = min(istop, self.obj.divisions[stop + 1])\n\n divisions = (\n (div_start,) + self.obj.divisions[start + 1 : stop + 1] + (div_stop,)\n )\n\n assert len(divisions) == len(dsk) + 1\n\n meta = self._make_meta(iindexer, cindexer)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self.obj])\n return new_dd_object(graph, name, meta=meta, divisions=divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partition_of_index_value__partition_of_index_value.return.min_len_divisions_2_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partition_of_index_value__partition_of_index_value.return.min_len_divisions_2_m", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 286, "end_line": 303, "span_ids": ["_partition_of_index_value"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _partition_of_index_value(divisions, val):\n \"\"\"In which partition does this value lie?\n\n >>> _partition_of_index_value([0, 5, 10], 3)\n 0\n >>> _partition_of_index_value([0, 5, 10], 8)\n 1\n >>> _partition_of_index_value([0, 5, 10], 100)\n 1\n >>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions\n 1\n \"\"\"\n if divisions[0] is None:\n msg = \"Can not use loc on DataFrame without known divisions\"\n raise ValueError(msg)\n val = _coerce_loc_index(divisions, val)\n i = bisect.bisect_right(divisions, val)\n return min(len(divisions) - 2, max(0, i - 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_pandas_read_text_pandas_read_text.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_pandas_read_text_pandas_read_text.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 144, "end_line": 193, "span_ids": ["pandas_read_text"], "tokens": 324}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pandas_read_text(\n reader,\n b,\n header,\n kwargs,\n dtypes=None,\n columns=None,\n write_header=True,\n enforce=False,\n path=None,\n):\n \"\"\"Convert a block of bytes to a Pandas DataFrame\n\n Parameters\n ----------\n reader : callable\n ``pd.read_csv`` or ``pd.read_table``.\n b : bytestring\n The content to be parsed with ``reader``\n header : bytestring\n An optional header to prepend to ``b``\n kwargs : dict\n A dictionary of keyword arguments to be passed to ``reader``\n dtypes : dict\n dtypes to assign to columns\n path : tuple\n A tuple containing path column name, path to file, and an ordered list of paths.\n\n See Also\n --------\n dask.dataframe.csv.read_pandas_from_bytes\n \"\"\"\n bio = BytesIO()\n if write_header and not b.startswith(header.rstrip()):\n bio.write(header)\n bio.write(b)\n bio.seek(0)\n df = reader(bio, **kwargs)\n if dtypes:\n coerce_dtypes(df, dtypes)\n\n if enforce and columns and (list(df.columns) != list(columns)):\n raise ValueError(\"Columns do not match\", df.columns, columns)\n if path:\n colname, path, paths = path\n code = paths.index(path)\n df = df.assign(\n **{colname: pd.Categorical.from_codes(np.full(len(df), code), paths)}\n )\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_coerce_dtypes_coerce_dtypes.if_bad_dtypes_or_bad_date.raise_ValueError_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_coerce_dtypes_coerce_dtypes.if_bad_dtypes_or_bad_date.raise_ValueError_msg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 196, "end_line": 284, "span_ids": ["coerce_dtypes"], "tokens": 804}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def coerce_dtypes(df, dtypes):\n \"\"\"Coerce dataframe to dtypes safely\n\n Operates in place\n\n Parameters\n ----------\n df: Pandas DataFrame\n dtypes: dict like {'x': float}\n \"\"\"\n bad_dtypes = []\n bad_dates = []\n errors = []\n for c in df.columns:\n if c in dtypes and df.dtypes[c] != dtypes[c]:\n actual = df.dtypes[c]\n desired = dtypes[c]\n if is_float_dtype(actual) and is_integer_dtype(desired):\n bad_dtypes.append((c, actual, desired))\n elif is_object_dtype(actual) and is_datetime64_any_dtype(desired):\n # This can only occur when parse_dates is specified, but an\n # invalid date is encountered. Pandas then silently falls back\n # to object dtype. Since `object_array.astype(datetime)` will\n # silently overflow, error here and report.\n bad_dates.append(c)\n else:\n try:\n df[c] = df[c].astype(dtypes[c])\n except Exception as e:\n bad_dtypes.append((c, actual, desired))\n errors.append((c, e))\n\n if bad_dtypes:\n if errors:\n ex = \"\\n\".join(\n f\"- {c}\\n {e!r}\" for c, e in sorted(errors, key=lambda x: str(x[0]))\n )\n exceptions = (\n \"The following columns also raised exceptions on \"\n \"conversion:\\n\\n%s\\n\\n\"\n ) % ex\n extra = \"\"\n else:\n exceptions = \"\"\n # All mismatches are int->float, also suggest `assume_missing=True`\n extra = (\n \"\\n\\nAlternatively, provide `assume_missing=True` \"\n \"to interpret\\n\"\n \"all unspecified integer columns as floats.\"\n )\n\n bad_dtypes = sorted(bad_dtypes, key=lambda x: str(x[0]))\n table = asciitable([\"Column\", \"Found\", \"Expected\"], bad_dtypes)\n dtype_kw = \"dtype={%s}\" % \",\\n \".join(\n f\"{k!r}: '{v}'\" for (k, v, _) in bad_dtypes\n )\n\n dtype_msg = (\n \"{table}\\n\\n\"\n \"{exceptions}\"\n \"Usually this is due to dask's dtype inference failing, and\\n\"\n \"*may* be fixed by specifying dtypes manually by adding:\\n\\n\"\n \"{dtype_kw}\\n\\n\"\n \"to the call to `read_csv`/`read_table`.\"\n \"{extra}\"\n ).format(table=table, exceptions=exceptions, dtype_kw=dtype_kw, extra=extra)\n else:\n dtype_msg = None\n\n if bad_dates:\n also = \" also \" if bad_dtypes else \" \"\n cols = \"\\n\".join(\"- %s\" % c for c in bad_dates)\n date_msg = (\n \"The following columns{also}failed to properly parse as dates:\\n\\n\"\n \"{cols}\\n\\n\"\n \"This is usually due to an invalid value in that column. To\\n\"\n \"diagnose and fix it's recommended to drop these columns from the\\n\"\n \"`parse_dates` keyword, and manually convert them to dates later\\n\"\n \"using `dd.to_datetime`.\"\n ).format(also=also, cols=cols)\n else:\n date_msg = None\n\n if bad_dtypes or bad_dates:\n rule = \"\\n\\n%s\\n\\n\" % (\"-\" * 61)\n msg = \"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\\n\\n%s\" % (\n rule.join(filter(None, [dtype_msg, date_msg]))\n )\n raise ValueError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_READ_DOC_TEMPLATE_READ_DOC_TEMPLATE._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_READ_DOC_TEMPLATE_READ_DOC_TEMPLATE._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 653, "end_line": 725, "span_ids": ["impl:7"], "tokens": 833}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "READ_DOC_TEMPLATE = \"\"\"\nRead {file_type} files into a Dask.DataFrame\n\nThis parallelizes the :func:`pandas.{reader}` function in the following ways:\n\n- It supports loading many files at once using globstrings:\n\n >>> df = dd.{reader}('myfiles.*.csv') # doctest: +SKIP\n\n- In some cases it can break up large files:\n\n >>> df = dd.{reader}('largefile.csv', blocksize=25e6) # 25MB chunks # doctest: +SKIP\n\n- It can read CSV files from external resources (e.g. S3, HDFS) by\n providing a URL:\n\n >>> df = dd.{reader}('s3://bucket/myfiles.*.csv') # doctest: +SKIP\n >>> df = dd.{reader}('hdfs:///myfiles.*.csv') # doctest: +SKIP\n >>> df = dd.{reader}('hdfs://namenode.example.com/myfiles.*.csv') # doctest: +SKIP\n\nInternally ``dd.{reader}`` uses :func:`pandas.{reader}` and supports many of the\nsame keyword arguments with the same performance guarantees. See the docstring\nfor :func:`pandas.{reader}` for more information on available keyword arguments.\n\nParameters\n----------\nurlpath : string or list\n Absolute or relative filepath(s). Prefix with a protocol like ``s3://``\n to read from alternative filesystems. To read from multiple files you\n can pass a globstring or a list of paths, with the caveat that they\n must all have the same protocol.\nblocksize : str, int or None, optional\n Number of bytes by which to cut up larger files. Default value is computed\n based on available physical memory and the number of cores, up to a maximum\n of 64MB. Can be a number like ``64000000`` or a string like ``\"64MB\"``. If\n ``None``, a single block is used for each file.\nsample : int, optional\n Number of bytes to use when determining dtypes\nassume_missing : bool, optional\n If True, all integer columns that aren't specified in ``dtype`` are assumed\n to contain missing values, and are converted to floats. Default is False.\nstorage_options : dict, optional\n Extra options that make sense for a particular storage connection, e.g.\n host, port, username, password, etc.\ninclude_path_column : bool or str, optional\n Whether or not to include the path to each particular file. If True a new\n column is added to the dataframe called ``path``. If str, sets new column\n name. Default is False.\n**kwargs\n Extra keyword arguments to forward to :func:`pandas.{reader}`.\n\nNotes\n-----\nDask dataframe tries to infer the ``dtype`` of each column by reading a sample\nfrom the start of the file (or of the first file if it's a glob). Usually this\nworks fine, but if the ``dtype`` is different later in the file (or in other\nfiles) this can cause issues. For example, if all the rows in the sample had\ninteger dtypes, but later on there was a ``NaN``, then this would error at\ncompute time. To fix this, you have a few options:\n\n- Provide explicit dtypes for the offending columns using the ``dtype``\n keyword. This is the recommended solution.\n\n- Use the ``assume_missing`` keyword to assume that all columns inferred as\n integers contain missing values, and convert them to floats.\n\n- Increase the size of the sample using the ``sample`` keyword.\n\nIt should also be noted that this function may fail if a {file_type} file\nincludes quoted strings that contain the line terminator. To get around this\nyou can specify ``blocksize=None`` to not split files into multiple partitions,\nat the cost of reduced parallelism.\n\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv_to_csv._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv_to_csv._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 675, "end_line": 787, "span_ids": ["to_csv"], "tokens": 955}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_csv(\n df,\n filename,\n single_file=False,\n encoding=\"utf-8\",\n mode=\"wt\",\n name_function=None,\n compression=None,\n compute=True,\n scheduler=None,\n storage_options=None,\n header_first_partition_only=None,\n compute_kwargs=None,\n **kwargs,\n):\n \"\"\"\n Store Dask DataFrame to CSV files\n\n One filename per partition will be created. You can specify the\n filenames in a variety of ways.\n\n Use a globstring::\n\n >>> df.to_csv('/path/to/data/export-*.csv') # doctest: +SKIP\n\n The * will be replaced by the increasing sequence 0, 1, 2, ...\n\n ::\n\n /path/to/data/export-0.csv\n /path/to/data/export-1.csv\n\n Use a globstring and a ``name_function=`` keyword argument. The\n name_function function should expect an integer and produce a string.\n Strings produced by name_function must preserve the order of their\n respective partition indices.\n\n >>> from datetime import date, timedelta\n >>> def name(i):\n ... return str(date(2015, 1, 1) + i * timedelta(days=1))\n\n >>> name(0)\n '2015-01-01'\n >>> name(15)\n '2015-01-16'\n\n >>> df.to_csv('/path/to/data/export-*.csv', name_function=name) # doctest: +SKIP\n\n ::\n\n /path/to/data/export-2015-01-01.csv\n /path/to/data/export-2015-01-02.csv\n ...\n\n You can also provide an explicit list of paths::\n\n >>> paths = ['/path/to/data/alice.csv', '/path/to/data/bob.csv', ...] # doctest: +SKIP\n >>> df.to_csv(paths) # doctest: +SKIP\n\n Parameters\n ----------\n df : dask.DataFrame\n Data to save\n filename : string\n Path glob indicating the naming scheme for the output files\n single_file : bool, default False\n Whether to save everything into a single CSV file. Under the\n single file mode, each partition is appended at the end of the\n specified CSV file. Note that not all filesystems support the\n append mode and thus the single file mode, especially on cloud\n storage systems such as S3 or GCS. A warning will be issued when\n writing to a file that is not backed by a local filesystem.\n encoding : string, optional\n A string representing the encoding to use in the output file,\n defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.\n mode : str\n Python write mode, default 'w'\n name_function : callable, default None\n Function accepting an integer (partition index) and producing a\n string to replace the asterisk in the given filename globstring.\n Should preserve the lexicographic order of partitions. Not\n supported when `single_file` is `True`.\n compression : string, optional\n a string representing the compression to use in the output file,\n allowed values are 'gzip', 'bz2', 'xz',\n only used when the first argument is a filename\n compute : bool\n If true, immediately executes. If False, returns a set of delayed\n objects, which can be computed at a later time.\n storage_options : dict\n Parameters passed on to the backend filesystem class.\n header_first_partition_only : boolean, default None\n If set to `True`, only write the header row in the first output\n file. By default, headers are written to all partitions under\n the multiple file mode (`single_file` is `False`) and written\n only once under the single file mode (`single_file` is `True`).\n It must not be `False` under the single file mode.\n compute_kwargs : dict, optional\n Options to be passed in to the compute method\n kwargs : dict, optional\n Additional parameters to pass to `pd.DataFrame.to_csv()`\n\n Returns\n -------\n The names of the file written if they were computed right away\n If not, the delayed tasks associated to the writing of the files\n\n Raises\n ------\n ValueError\n If `header_first_partition_only` is set to `False` or\n `name_function` is specified when `single_file` is `True`.\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv.if_single_file_and_name_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_to_csv.if_single_file_and_name_f_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 886, "end_line": 965, "span_ids": ["impl:15", "to_csv"], "tokens": 691}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_csv(\n df,\n filename,\n single_file=False,\n encoding=\"utf-8\",\n mode=\"wt\",\n name_function=None,\n compression=None,\n compute=True,\n scheduler=None,\n storage_options=None,\n header_first_partition_only=None,\n compute_kwargs=None,\n **kwargs,\n):\n if single_file and name_function is not None:\n raise ValueError(\"name_function is not supported under the single file mode\")\n if header_first_partition_only is None:\n header_first_partition_only = single_file\n elif not header_first_partition_only and single_file:\n raise ValueError(\n \"header_first_partition_only cannot be False in the single file mode.\"\n )\n file_options = dict(\n compression=compression,\n encoding=encoding,\n newline=\"\",\n **(storage_options or {}),\n )\n to_csv_chunk = delayed(_write_csv, pure=False)\n dfs = df.to_delayed()\n if single_file:\n first_file = open_file(filename, mode=mode, **file_options)\n if not isinstance(first_file.fs, fsspec.implementations.local.LocalFileSystem):\n warn(\"Appending data to a network storage system may not work.\")\n value = to_csv_chunk(dfs[0], first_file, **kwargs)\n append_mode = mode.replace(\"w\", \"\") + \"a\"\n append_file = open_file(filename, mode=append_mode, **file_options)\n kwargs[\"header\"] = False\n for d in dfs[1:]:\n value = to_csv_chunk(d, append_file, depend_on=value, **kwargs)\n values = [value]\n files = [first_file]\n else:\n files = open_files(\n filename,\n mode=mode,\n name_function=name_function,\n num=df.npartitions,\n **file_options,\n )\n values = [to_csv_chunk(dfs[0], files[0], **kwargs)]\n if header_first_partition_only:\n kwargs[\"header\"] = False\n values.extend(\n [to_csv_chunk(d, f, **kwargs) for d, f in zip(dfs[1:], files[1:])]\n )\n if compute:\n if compute_kwargs is None:\n compute_kwargs = dict()\n\n if scheduler is not None:\n warn(\n \"The 'scheduler' keyword argument for `to_csv()` is deprecated and\"\n \"will be removed in a future version. \"\n \"Please use the `compute_kwargs` argument instead. \"\n f\"For example, df.to_csv(..., compute_kwargs={{scheduler: {scheduler}}})\",\n FutureWarning,\n )\n\n if (\n scheduler is not None\n and compute_kwargs.get(\"scheduler\") is not None\n and compute_kwargs.get(\"scheduler\") != scheduler\n ):\n raise ValueError(\n f\"Differing values for 'scheduler' have been passed in.\\n\"\n f\"scheduler argument: {scheduler}\\n\"\n f\"via compute_kwargs: {compute_kwargs.get('scheduler')}\"\n )\n\n if scheduler is not None and compute_kwargs.get(\"scheduler\") is None:\n compute_kwargs[\"scheduler\"] = scheduler\n\n import dask\n\n return list(dask.compute(*values, **compute_kwargs))\n else:\n return values\n\n\nfrom ..core import _Frame\n\n_Frame.to_csv.__doc__ = to_csv.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_part_make_timeseries_part.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_part_make_timeseries_part.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 67, "end_line": 81, "span_ids": ["make_timeseries_part"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_timeseries_part(start, end, dtypes, freq, state_data, kwargs):\n index = pd.date_range(start=start, end=end, freq=freq, name=\"timestamp\")\n state = np.random.RandomState(state_data)\n columns = {}\n for k, dt in dtypes.items():\n kws = {\n kk.rsplit(\"_\", 1)[1]: v\n for kk, v in kwargs.items()\n if kk.rsplit(\"_\", 1)[0] == k\n }\n columns[k] = make[dt](len(index), state, **kws)\n df = pd.DataFrame(columns, index=index, columns=sorted(columns))\n if df.index[-1] == end:\n df = df.iloc[:-1]\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf_to_hdf._Store_Dask_Dataframe_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf_to_hdf._Store_Dask_Dataframe_t", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 35, "end_line": 133, "span_ids": ["to_hdf"], "tokens": 862}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_hdf(\n df,\n path,\n key,\n mode=\"a\",\n append=False,\n scheduler=None,\n name_function=None,\n compute=True,\n lock=None,\n dask_kwargs={},\n **kwargs,\n):\n \"\"\"Store Dask Dataframe to Hierarchical Data Format (HDF) files\n\n This is a parallel version of the Pandas function of the same name. Please\n see the Pandas docstring for more detailed information about shared keyword\n arguments.\n\n This function differs from the Pandas version by saving the many partitions\n of a Dask DataFrame in parallel, either to many files, or to many datasets\n within the same file. You may specify this parallelism with an asterix\n ``*`` within the filename or datapath, and an optional ``name_function``.\n The asterix will be replaced with an increasing sequence of integers\n starting from ``0`` or with the result of calling ``name_function`` on each\n of those integers.\n\n This function only supports the Pandas ``'table'`` format, not the more\n specialized ``'fixed'`` format.\n\n Parameters\n ----------\n path : string, pathlib.Path\n Path to a target filename. Supports strings, ``pathlib.Path``, or any\n object implementing the ``__fspath__`` protocol. May contain a ``*`` to\n denote many filenames.\n key : string\n Datapath within the files. May contain a ``*`` to denote many locations\n name_function : function\n A function to convert the ``*`` in the above options to a string.\n Should take in a number from 0 to the number of partitions and return a\n string. (see examples below)\n compute : bool\n Whether or not to execute immediately. If False then this returns a\n ``dask.Delayed`` value.\n lock : Lock, optional\n Lock to use to prevent concurrency issues. By default a\n ``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``\n will be used depending on your scheduler if a lock is required. See\n dask.utils.get_scheduler_lock for more information about lock\n selection.\n scheduler : string\n The scheduler to use, like \"threads\" or \"processes\"\n **other:\n See pandas.to_hdf for more information\n\n Examples\n --------\n Save Data to a single file\n\n >>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP\n\n Save data to multiple datapaths within the same file:\n\n >>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP\n\n Save data to multiple files:\n\n >>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP\n\n Save data to multiple files, using the multiprocessing scheduler:\n\n >>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP\n\n Specify custom naming scheme. This writes files as\n '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..\n\n >>> from datetime import date, timedelta\n >>> base = date(year=2000, month=1, day=1)\n >>> def name_function(i):\n ... ''' Convert integer 0 to n to a string '''\n ... return base + timedelta(days=i)\n\n >>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP\n\n Returns\n -------\n filenames : list\n Returned if ``compute`` is True. List of file names that each partition\n is saved to.\n delayed : dask.Delayed\n Returned if ``compute`` is False. Delayed object to execute ``to_hdf``\n when computed.\n\n See Also\n --------\n read_hdf:\n to_parquet:\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.name_to_hdf.for_i_in_range_0_df_npar.filenames_append_fmt_obj_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.name_to_hdf.for_i_in_range_0_df_npar.filenames_append_fmt_obj_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 134, "end_line": 228, "span_ids": ["to_hdf"], "tokens": 773}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_hdf(\n df,\n path,\n key,\n mode=\"a\",\n append=False,\n scheduler=None,\n name_function=None,\n compute=True,\n lock=None,\n dask_kwargs={},\n **kwargs,\n):\n name = \"to-hdf-\" + uuid.uuid1().hex\n\n pd_to_hdf = getattr(df._partition_type, \"to_hdf\")\n\n single_file = True\n single_node = True\n\n path = stringify_path(path)\n\n # if path is string, format using i_name\n if isinstance(path, str):\n if path.count(\"*\") + key.count(\"*\") > 1:\n raise ValueError(\n \"A maximum of one asterisk is accepted in file path and dataset key\"\n )\n\n fmt_obj = lambda path, i_name: path.replace(\"*\", i_name)\n\n if \"*\" in path:\n single_file = False\n else:\n if key.count(\"*\") > 1:\n raise ValueError(\"A maximum of one asterisk is accepted in dataset key\")\n\n fmt_obj = lambda path, _: path\n\n if \"*\" in key:\n single_node = False\n\n if \"format\" in kwargs and kwargs[\"format\"] not in [\"t\", \"table\"]:\n raise ValueError(\"Dask only support 'table' format in hdf files.\")\n\n if mode not in (\"a\", \"w\", \"r+\"):\n raise ValueError(\"Mode must be one of 'a', 'w' or 'r+'\")\n\n if name_function is None:\n name_function = build_name_function(df.npartitions - 1)\n\n # we guarantee partition order is preserved when its saved and read\n # so we enforce name_function to maintain the order of its input.\n if not (single_file and single_node):\n formatted_names = [name_function(i) for i in range(df.npartitions)]\n if formatted_names != sorted(formatted_names):\n warn(\n \"To preserve order between partitions name_function \"\n \"must preserve the order of its input\"\n )\n\n # If user did not specify scheduler and write is sequential default to the\n # sequential scheduler. otherwise let the _get method choose the scheduler\n if (\n scheduler is None\n and not config.get(\"scheduler\", None)\n and single_node\n and single_file\n ):\n scheduler = \"single-threaded\"\n\n # handle lock default based on whether we're writing to a single entity\n _actual_get = get_scheduler(collections=[df], scheduler=scheduler)\n if lock is None:\n if not single_node:\n lock = True\n elif not single_file and _actual_get is not multiprocessing.get:\n # if we're writing to multiple files with the multiprocessing\n # scheduler we don't need to lock\n lock = True\n else:\n lock = False\n if lock:\n lock = get_scheduler_lock(df, scheduler=scheduler)\n\n kwargs.update({\"format\": \"table\", \"mode\": mode, \"append\": append})\n\n dsk = dict()\n\n i_name = name_function(0)\n dsk[(name, 0)] = (\n _pd_to_hdf,\n pd_to_hdf,\n lock,\n [(df._name, 0), fmt_obj(path, i_name), key.replace(\"*\", i_name)],\n kwargs,\n )\n\n kwargs2 = kwargs.copy()\n if single_file:\n kwargs2[\"mode\"] = \"a\"\n if single_node:\n kwargs2[\"append\"] = True\n\n filenames = []\n for i in range(0, df.npartitions):\n i_name = name_function(i)\n filenames.append(fmt_obj(path, i_name))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.for_i_in_range_1_df_npar_to_hdf.if_compute_.else_.return.delayed_Delayed_k_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_to_hdf.for_i_in_range_1_df_npar_to_hdf.if_compute_.else_.return.delayed_Delayed_k_dsk_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 230, "end_line": 256, "span_ids": ["to_hdf"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_hdf(\n df,\n path,\n key,\n mode=\"a\",\n append=False,\n scheduler=None,\n name_function=None,\n compute=True,\n lock=None,\n dask_kwargs={},\n **kwargs,\n):\n # ... other code\n\n for i in range(1, df.npartitions):\n i_name = name_function(i)\n task = (\n _pd_to_hdf,\n pd_to_hdf,\n lock,\n [(df._name, i), fmt_obj(path, i_name), key.replace(\"*\", i_name)],\n kwargs2,\n )\n if single_file:\n link_dep = i - 1 if single_node else 0\n task = (_link, (name, link_dep), task)\n dsk[(name, i)] = task\n\n dsk = merge(df.dask, dsk)\n if single_file and single_node:\n keys = [(name, df.npartitions - 1)]\n else:\n keys = [(name, i) for i in range(df.npartitions)]\n\n if compute:\n compute_as_if_collection(\n DataFrame, dsk, keys, scheduler=scheduler, **dask_kwargs\n )\n return filenames\n else:\n return delayed([Delayed(k, dsk) for k in keys])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_dont_use_fixed_error_message_read_hdf_error_msg._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_dont_use_fixed_error_message_read_hdf_error_msg._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 261, "end_line": 272, "span_ids": ["impl"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "dont_use_fixed_error_message = \"\"\"\nThis HDFStore is not partitionable and can only be use monolithically with\npandas. In the future when creating HDFStores use the ``format='table'``\noption to ensure that your dataset can be parallelized\"\"\"\n\nread_hdf_error_msg = \"\"\"\nThe start and stop keywords are not supported when reading from more than\none file/dataset.\n\nThe combination is ambiguous because it could be interpreted as the starting\nand stopping index per file, or starting and stopping index of the global\ndataset.\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf_read_hdf.if_chunksize_0_.raise_ValueError_Chunksi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf_read_hdf.if_chunksize_0_.raise_ValueError_Chunksi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 310, "end_line": 402, "span_ids": ["read_hdf"], "tokens": 762}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_hdf(\n pattern,\n key,\n start=0,\n stop=None,\n columns=None,\n chunksize=1000000,\n sorted_index=False,\n lock=True,\n mode=\"r\",\n):\n \"\"\"\n Read HDF files into a Dask DataFrame\n\n Read hdf files into a dask dataframe. This function is like\n ``pandas.read_hdf``, except it can read from a single large file, or from\n multiple files, or from multiple keys from the same file.\n\n Parameters\n ----------\n pattern : string, pathlib.Path, list\n File pattern (string), pathlib.Path, buffer to read from, or list of\n file paths. Can contain wildcards.\n key : group identifier in the store. Can contain wildcards\n start : optional, integer (defaults to 0), row number to start at\n stop : optional, integer (defaults to None, the last row), row number to\n stop at\n columns : list of columns, optional\n A list of columns that if not None, will limit the return\n columns (default is None)\n chunksize : positive integer, optional\n Maximal number of rows per partition (default is 1000000).\n sorted_index : boolean, optional\n Option to specify whether or not the input hdf files have a sorted\n index (default is False).\n lock : boolean, optional\n Option to use a lock to prevent concurrency issues (default is True).\n mode : {'a', 'r', 'r+'}, default 'r'. Mode to use when opening file(s).\n 'r'\n Read-only; no data can be modified.\n 'a'\n Append; an existing file is opened for reading and writing,\n and if the file does not exist it is created.\n 'r+'\n It is similar to 'a', but the file must already exist.\n\n Returns\n -------\n dask.DataFrame\n\n Examples\n --------\n Load single file\n\n >>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP\n\n Load multiple files\n\n >>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP\n\n >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP\n\n Load multiple datasets\n\n >>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP\n \"\"\"\n if lock is True:\n lock = get_scheduler_lock()\n\n key = key if key.startswith(\"/\") else \"/\" + key\n # Convert path-like objects to a string\n pattern = stringify_path(pattern)\n\n if isinstance(pattern, str):\n paths = sorted(glob(pattern))\n else:\n paths = pattern\n\n if not isinstance(pattern, str) and len(paths) == 0:\n raise ValueError(\"No files provided\")\n if not paths or len(paths) == 0:\n raise OSError(f\"File(s) not found: {pattern}\")\n for path in paths:\n try:\n exists = os.path.exists(path)\n except (ValueError, TypeError):\n exists = False\n if not exists:\n raise OSError(f\"File not found or insufficient permissions: {path}\")\n if (start != 0 or stop is not None) and len(paths) > 1:\n raise NotImplementedError(read_hdf_error_msg)\n if chunksize <= 0:\n raise ValueError(\"Chunksize must be a positive integer\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_array_from_array.return.new_dd_object_dsk_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_array_from_array.return.new_dd_object_dsk_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 80, "end_line": 130, "span_ids": ["from_array"], "tokens": 432}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_array(x, chunksize=50000, columns=None, meta=None):\n \"\"\"Read any sliceable array into a Dask Dataframe\n\n Uses getitem syntax to pull slices out of the array. The array need not be\n a NumPy array but must support slicing syntax\n\n x[50000:100000]\n\n and have 2 dimensions:\n\n x.ndim == 2\n\n or have a record dtype:\n\n x.dtype == [('name', 'O'), ('balance', 'i8')]\n\n Parameters\n ----------\n x : array_like\n chunksize : int, optional\n The number of rows per partition to use.\n columns : list or string, optional\n list of column names if DataFrame, single string if Series\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to use for partitions of\n the Dask dataframe. By default, pandas DataFrame is used.\n\n Returns\n -------\n dask.DataFrame or dask.Series\n A dask DataFrame/Series\n \"\"\"\n if isinstance(x, da.Array):\n return from_dask_array(x, columns=columns, meta=meta)\n\n meta = _meta_from_array(x, columns, meta=meta)\n\n divisions = tuple(range(0, len(x), chunksize))\n divisions = divisions + (len(x) - 1,)\n token = tokenize(x, chunksize, columns)\n name = \"from_array-\" + token\n\n dsk = {}\n for i in range(0, int(ceil(len(x) / chunksize))):\n data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))\n if is_series_like(meta):\n dsk[name, i] = (type(meta), data, None, meta.dtype, meta.name)\n else:\n dsk[name, i] = (type(meta), data, None, meta.columns)\n return new_dd_object(dsk, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_bcolz_from_bcolz.if_index_.else_.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_bcolz_from_bcolz.if_index_.else_.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 235, "end_line": 321, "span_ids": ["from_bcolz"], "tokens": 691}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@_deprecated(after_version=\"2022.02.1\")\ndef from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock, **kwargs):\n \"\"\"Read BColz CTable into a Dask Dataframe\n\n BColz is a fast on-disk compressed column store with careful attention\n given to compression. https://bcolz.readthedocs.io/en/latest/\n\n Parameters\n ----------\n x : bcolz.ctable\n chunksize : int, optional\n The size(rows) of blocks to pull out from ctable.\n categorize : bool, defaults to True\n Automatically categorize all string dtypes\n index : string, optional\n Column to make the index\n lock: bool or Lock\n Lock to use when reading or False for no lock (not-thread-safe)\n\n See Also\n --------\n from_array: more generic function not optimized for bcolz\n \"\"\"\n if lock is True:\n lock = Lock()\n\n import bcolz\n\n import dask.array as da\n\n if isinstance(x, str):\n x = bcolz.ctable(rootdir=x)\n bc_chunklen = max(x[name].chunklen for name in x.names)\n if chunksize is None and bc_chunklen > 10000:\n chunksize = bc_chunklen\n\n categories = dict()\n if categorize:\n for name in x.names:\n if (\n np.issubdtype(x.dtype[name], np.string_)\n or np.issubdtype(x.dtype[name], np.unicode_)\n or np.issubdtype(x.dtype[name], np.object_)\n ):\n a = da.from_array(x[name], chunks=(chunksize * len(x.names),))\n categories[name] = da.unique(a).compute()\n\n columns = tuple(x.dtype.names)\n divisions = tuple(range(0, len(x), chunksize))\n divisions = divisions + (len(x) - 1,)\n if x.rootdir:\n token = tokenize(\n (x.rootdir, os.path.getmtime(x.rootdir)),\n chunksize,\n categorize,\n index,\n kwargs,\n )\n else:\n token = tokenize(\n (id(x), x.shape, x.dtype), chunksize, categorize, index, kwargs\n )\n new_name = \"from_bcolz-\" + token\n\n dsk = {\n (new_name, i): (\n dataframe_from_ctable,\n x,\n (slice(i * chunksize, (i + 1) * chunksize),),\n columns,\n categories,\n lock,\n )\n for i in range(0, int(ceil(len(x) / chunksize)))\n }\n\n meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock)\n result = DataFrame(dsk, new_name, meta, divisions)\n\n if index:\n assert index in x.names\n a = da.from_array(x[index], chunks=(chunksize * len(x.names),))\n q = np.linspace(0, 100, len(x) // chunksize + 2)\n divisions = tuple(da.percentile(a, q).compute())\n return set_partition(result, index, divisions, **kwargs)\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_dataframe_from_ctable_dataframe_from_ctable.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_dataframe_from_ctable_dataframe_from_ctable.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 320, "end_line": 393, "span_ids": ["dataframe_from_ctable"], "tokens": 555}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):\n \"\"\"Get DataFrame from bcolz.ctable\n\n Parameters\n ----------\n x: bcolz.ctable\n slc: slice\n columns: list of column names or None\n\n >>> import bcolz\n >>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])\n >>> dataframe_from_ctable(x, slice(1, 3))\n a b\n 1 2 20\n 2 3 30\n\n >>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])\n b\n 1 20\n 2 30\n\n >>> dataframe_from_ctable(x, slice(1, 3), columns='b')\n 1 20\n 2 30\n Name: b, dtype: int...\n\n \"\"\"\n import bcolz\n\n if columns is None:\n columns = x.dtype.names\n if isinstance(columns, tuple):\n columns = list(columns)\n\n x = x[columns]\n if type(slc) is slice:\n start = slc.start\n stop = slc.stop if slc.stop < len(x) else len(x)\n else:\n start = slc[0].start\n stop = slc[0].stop if slc[0].stop < len(x) else len(x)\n idx = pd.Index(range(start, stop))\n\n if lock:\n lock.acquire()\n try:\n if isinstance(x, bcolz.ctable):\n chunks = [x[name][slc] for name in columns]\n if categories is not None:\n chunks = [\n pd.Categorical.from_codes(\n np.searchsorted(categories[name], chunk), categories[name], True\n )\n if name in categories\n else chunk\n for name, chunk in zip(columns, chunks)\n ]\n result = pd.DataFrame(\n dict(zip(columns, chunks)), columns=columns, index=idx\n )\n\n elif isinstance(x, bcolz.carray):\n chunk = x[slc]\n if categories is not None and columns and columns in categories:\n chunk = pd.Categorical.from_codes(\n np.searchsorted(categories[columns], chunk),\n categories[columns],\n True,\n )\n result = pd.Series(chunk, name=columns, index=idx)\n finally:\n if lock:\n lock.release()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array_from_dask_array.dsk._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array_from_dask_array.dsk._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 396, "end_line": 474, "span_ids": ["from_dask_array"], "tokens": 737}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_dask_array(x, columns=None, index=None, meta=None):\n \"\"\"Create a Dask DataFrame from a Dask Array.\n\n Converts a 2d array into a DataFrame and a 1d array into a Series.\n\n Parameters\n ----------\n x : da.Array\n columns : list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether `x` has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divisions is created.\n\n Specifying `index` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to be returned.\n By default, pandas DataFrame is used.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import dask.dataframe as dd\n >>> x = da.ones((4, 2), chunks=(2, 2))\n >>> df = dd.io.from_dask_array(x, columns=['a', 'b'])\n >>> df.compute()\n a b\n 0 1.0 1.0\n 1 1.0 1.0\n 2 1.0 1.0\n 3 1.0 1.0\n\n See Also\n --------\n dask.bag.to_dataframe: from dask.bag\n dask.dataframe._Frame.values: Reverse conversion\n dask.dataframe._Frame.to_records: Reverse conversion\n \"\"\"\n meta = _meta_from_array(x, columns, index, meta=meta)\n\n if x.ndim == 2 and len(x.chunks[1]) > 1:\n x = x.rechunk({1: x.shape[1]})\n\n name = \"from-dask-array\" + tokenize(x, columns)\n to_merge = []\n\n if index is not None:\n if not isinstance(index, Index):\n raise ValueError(\"'index' must be an instance of dask.dataframe.Index\")\n if index.npartitions != x.numblocks[0]:\n msg = (\n \"The index and array have different numbers of blocks. \"\n \"({} != {})\".format(index.npartitions, x.numblocks[0])\n )\n raise ValueError(msg)\n divisions = index.divisions\n to_merge.append(ensure_dict(index.dask))\n index = index.__dask_keys__()\n\n elif np.isnan(sum(x.shape)):\n divisions = [None] * (len(x.chunks[0]) + 1)\n index = [None] * len(x.chunks[0])\n else:\n divisions = [0]\n for c in x.chunks[0]:\n divisions.append(divisions[-1] + c)\n index = [\n (np.arange, a, b, 1, \"i8\") for a, b in zip(divisions[:-1], divisions[1:])\n ]\n divisions[-1] -= 1\n\n dsk = {}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_bag_to_bag.return.Bag_dsk_name_df_npartit": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_bag_to_bag.return.Bag_dsk_name_df_npartit", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 518, "end_line": 543, "span_ids": ["to_bag"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_bag(df, index=False, format=\"tuple\"):\n \"\"\"Create Dask Bag from a Dask DataFrame\n\n Parameters\n ----------\n index : bool, optional\n If True, the elements are tuples of ``(index, value)``, otherwise\n they're just the ``value``. Default is False.\n format : {\"tuple\", \"dict\"},optional\n Whether to return a bag of tuples or dictionaries.\n\n Examples\n --------\n >>> bag = df.to_bag() # doctest: +SKIP\n \"\"\"\n from ...bag.core import Bag\n\n if not isinstance(df, (DataFrame, Series)):\n raise TypeError(\"df must be either DataFrame or Series\")\n name = \"to_bag-\" + tokenize(df, index, format)\n dsk = {\n (name, i): (_df_to_bag, block, index, format)\n for (i, block) in enumerate(df.__dask_keys__())\n }\n dsk.update(df.__dask_optimize__(df.__dask_graph__(), df.__dask_keys__()))\n return Bag(dsk, name, df.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_records_to_records.return.df_map_partitions_M_to_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_to_records_to_records.return.df_map_partitions_M_to_re", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 546, "end_line": 562, "span_ids": ["to_records"], "tokens": 106}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_records(df):\n \"\"\"Create Dask Array from a Dask Dataframe\n\n Warning: This creates a dask.array without precise shape information.\n Operations that depend on shape information, like slicing or reshaping,\n will not work.\n\n Examples\n --------\n >>> df.to_records() # doctest: +SKIP\n\n See Also\n --------\n dask.dataframe._Frame.values\n dask.dataframe.from_dask_array\n \"\"\"\n return df.map_partitions(M.to_records)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_delayed_from_delayed.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_delayed_from_delayed.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 565, "end_line": 636, "span_ids": ["from_delayed"], "tokens": 592}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef from_delayed(\n dfs, meta=None, divisions=None, prefix=\"from-delayed\", verify_meta=True\n):\n \"\"\"Create Dask DataFrame from many Dask Delayed objects\n\n Parameters\n ----------\n dfs : list of Delayed or Future\n An iterable of ``dask.delayed.Delayed`` objects, such as come from\n ``dask.delayed`` or an iterable of ``distributed.Future`` objects,\n such as come from ``client.submit`` interface. These comprise the individual\n partitions of the resulting dataframe.\n $META\n divisions : tuple, str, optional\n Partition boundaries along the index.\n For tuple, see https://docs.dask.org/en/latest/dataframe-design.html#partitions\n For string 'sorted' will compute the delayed values to find index\n values. Assumes that the indexes are mutually sorted.\n If None, then won't use index information\n prefix : str, optional\n Prefix to prepend to the keys.\n verify_meta : bool, optional\n If True check that the partitions have consistent metadata, defaults to True.\n \"\"\"\n from dask.delayed import Delayed\n\n if isinstance(dfs, Delayed):\n dfs = [dfs]\n dfs = [\n delayed(df) if not isinstance(df, Delayed) and hasattr(df, \"key\") else df\n for df in dfs\n ]\n\n for df in dfs:\n if not isinstance(df, Delayed):\n raise TypeError(\"Expected Delayed object, got %s\" % type(df).__name__)\n\n if meta is None:\n meta = delayed(make_meta)(dfs[0]).compute()\n else:\n meta = make_meta(meta)\n\n if not dfs:\n dfs = [delayed(make_meta)(meta)]\n\n name = prefix + \"-\" + tokenize(*dfs)\n dsk = {}\n if verify_meta:\n for (i, df) in enumerate(dfs):\n dsk[(name, i)] = (check_meta, df.key, meta, \"from_delayed\")\n else:\n for (i, df) in enumerate(dfs):\n dsk[(name, i)] = df.key\n\n if divisions is None or divisions == \"sorted\":\n divs = [None] * (len(dfs) + 1)\n else:\n divs = tuple(divisions)\n if len(divs) != len(dfs) + 1:\n raise ValueError(\"divisions should be a tuple of len(dfs) + 1\")\n\n df = new_dd_object(\n HighLevelGraph.from_collections(name, dsk, dfs), name, meta, divs\n )\n\n if divisions == \"sorted\":\n from ..shuffle import compute_and_set_divisions\n\n df = compute_and_set_divisions(df)\n\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_sorted_division_locations_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_sorted_division_locations_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 616, "end_line": 666, "span_ids": ["sorted_division_locations", "impl:4"], "tokens": 450}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sorted_division_locations(seq, npartitions=None, chunksize=None):\n \"\"\"Find division locations and values in sorted list\n\n Examples\n --------\n\n >>> L = ['A', 'B', 'C', 'D', 'E', 'F']\n >>> sorted_division_locations(L, chunksize=2)\n (['A', 'C', 'E', 'F'], [0, 2, 4, 6])\n\n >>> sorted_division_locations(L, chunksize=3)\n (['A', 'D', 'F'], [0, 3, 6])\n\n >>> L = ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'C']\n >>> sorted_division_locations(L, chunksize=3)\n (['A', 'B', 'C'], [0, 4, 8])\n\n >>> sorted_division_locations(L, chunksize=2)\n (['A', 'B', 'C'], [0, 4, 8])\n\n >>> sorted_division_locations(['A'], chunksize=2)\n (['A', 'A'], [0, 1])\n \"\"\"\n if (npartitions is None) == (chunksize is None):\n raise ValueError(\"Exactly one of npartitions and chunksize must be specified.\")\n\n if npartitions:\n chunksize = ceil(len(seq) / npartitions)\n\n positions = [0]\n values = [seq[0]]\n for pos in range(0, len(seq), chunksize):\n if pos <= positions[-1]:\n continue\n while pos + 1 < len(seq) and seq[pos - 1] == seq[pos]:\n pos += 1\n values.append(seq[pos])\n if pos == len(seq) - 1:\n pos += 1\n positions.append(pos)\n\n if positions[-1] != len(seq):\n positions.append(len(seq))\n values.append(seq[-1])\n\n return values, positions\n\n\nDataFrame.to_records.__doc__ = to_records.__doc__\nDataFrame.to_bag.__doc__ = to_bag.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/__init__.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/__init__.py__", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["imports"], "tokens": 20}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .core import create_metadata_file, read_parquet, read_parquet_part, to_parquet", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet_to_parquet._Store_Dask_dataframe_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet_to_parquet._Store_Dask_dataframe_t", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 450, "end_line": 573, "span_ids": ["to_parquet"], "tokens": 1309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_parquet(\n df,\n path,\n engine=\"auto\",\n compression=\"default\",\n write_index=True,\n append=False,\n overwrite=False,\n ignore_divisions=False,\n partition_on=None,\n storage_options=None,\n custom_metadata=None,\n write_metadata_file=True,\n compute=True,\n compute_kwargs=None,\n schema=None,\n name_function=None,\n **kwargs,\n):\n \"\"\"Store Dask.dataframe to Parquet files\n\n Notes\n -----\n Each partition will be written to a separate file.\n\n Parameters\n ----------\n df : dask.dataframe.DataFrame\n path : string or pathlib.Path\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'\n Parquet library to use. If only one library is installed, it will use\n that one; if both, it will use 'fastparquet'.\n compression : string or dict, default 'default'\n Either a string like ``\"snappy\"`` or a dictionary mapping column names\n to compressors like ``{\"name\": \"gzip\", \"values\": \"snappy\"}``. The\n default is ``\"default\"``, which uses the default compression for\n whichever engine is selected.\n write_index : boolean, default True\n Whether or not to write the index. Defaults to True.\n append : bool, default False\n If False (default), construct data-set from scratch. If True, add new\n row-group(s) to an existing data-set. In the latter case, the data-set\n must exist, and the schema must match the input data.\n overwrite : bool, default False\n Whether or not to remove the contents of `path` before writing the dataset.\n The default is False. If True, the specified path must correspond to\n a directory (but not the current working directory). This option cannot\n be set to True if `append=True`.\n NOTE: `overwrite=True` will remove the original data even if the current\n write operation fails. Use at your own risk.\n ignore_divisions : bool, default False\n If False (default) raises error when previous divisions overlap with\n the new appended divisions. Ignored if append=False.\n partition_on : list, default None\n Construct directory-based partitioning by splitting on these fields'\n values. Each dask partition will result in one or more datafiles,\n there will be no global groupby.\n storage_options : dict, default None\n Key/value pairs to be passed on to the file-system backend, if any.\n custom_metadata : dict, default None\n Custom key/value metadata to include in all footer metadata (and\n in the global \"_metadata\" file, if applicable). Note that the custom\n metadata may not contain the reserved b\"pandas\" key.\n write_metadata_file : bool, default True\n Whether to write the special \"_metadata\" file.\n compute : bool, default True\n If :obj:`True` (default) then the result is computed immediately. If :obj:`False`\n then a ``dask.dataframe.Scalar`` object is returned for future computation.\n compute_kwargs : dict, default True\n Options to be passed in to the compute method\n schema : Schema object, dict, or {\"infer\", None}, default None\n Global schema to use for the output dataset. Alternatively, a `dict`\n of pyarrow types can be specified (e.g. `schema={\"id\": pa.string()}`).\n For this case, fields excluded from the dictionary will be inferred\n from `_meta_nonempty`. If \"infer\", the first non-empty and non-null\n partition will be used to infer the type for \"object\" columns. If\n None (default), we let the backend infer the schema for each distinct\n output partition. If the partitions produce inconsistent schemas,\n pyarrow will throw an error when writing the shared _metadata file.\n Note that this argument is ignored by the \"fastparquet\" engine.\n name_function : callable, default None\n Function to generate the filename for each output partition.\n The function should accept an integer (partition index) as input and\n return a string which will be used as the filename for the corresponding\n partition. Should preserve the lexicographic order of partitions.\n If not specified, files will created using the convention\n ``part.0.parquet``, ``part.1.parquet``, ``part.2.parquet``, ...\n and so on for each partition in the DataFrame.\n **kwargs :\n Extra options to be passed on to the specific backend.\n\n Examples\n --------\n >>> df = dd.read_csv(...) # doctest: +SKIP\n >>> df.to_parquet('/path/to/output/', ...) # doctest: +SKIP\n\n By default, files will be created in the specified output directory using the\n convention ``part.0.parquet``, ``part.1.parquet``, ``part.2.parquet``, ... and so on for\n each partition in the DataFrame. To customize the names of each file, you can use the\n ``name_function=`` keyword argument. The function passed to ``name_function`` will be\n used to generate the filename for each partition and should expect a partition's index\n integer as input and return a string which will be used as the filename for the corresponding\n partition. Strings produced by ``name_function`` must preserve the order of their respective\n partition indices.\n\n For example:\n\n >>> name_function = lambda x: f\"data-{x}.parquet\"\n >>> df.to_parquet('/path/to/output/', name_function=name_function) # doctest: +SKIP\n\n will result in the following files being created::\n\n /path/to/output/\n \u251c\u2500\u2500 data-0.parquet\n \u251c\u2500\u2500 data-1.parquet\n \u251c\u2500\u2500 data-2.parquet\n \u2514\u2500\u2500 ...\n\n See Also\n --------\n read_parquet: Read parquet data to dask.dataframe\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters_apply_filters._Apply_filters_onto_par": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters_apply_filters._Apply_filters_onto_par", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 922, "end_line": 951, "span_ids": ["apply_filters"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_filters(parts, statistics, filters):\n \"\"\"Apply filters onto parts/statistics pairs\n\n Parameters\n ----------\n parts: list\n Tokens corresponding to row groups to read in the future\n statistics: List[dict]\n List of statistics for each part, including min and max values\n filters: Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]]\n List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This\n implements partition-level (hive) filtering only, i.e., to prevent the\n loading of some row-groups and/or files.\n\n Predicates can be expressed in disjunctive normal form (DNF). This means\n that the innermost tuple describes a single column predicate. These\n inner predicates are combined with an AND conjunction into a larger\n predicate. The outer-most list then combines all of the combined\n filters with an OR disjunction.\n\n Predicates can also be expressed as a List[Tuple]. These are evaluated\n as an AND conjunction. To express OR in predictates, one must use the\n (preferred) List[List[Tuple]] notation.\n\n Note that the \"fastparquet\" engine does not currently support DNF for\n the filtering of partitioned columns (List[Tuple] is required).\n Returns\n -------\n parts, statistics: the same as the input, but possibly a subset\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters.apply_conjunction_apply_filters.return.out_parts_out_statistics": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_apply_filters.apply_conjunction_apply_filters.return.out_parts_out_statistics", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1079, "end_line": 1124, "span_ids": ["apply_filters"], "tokens": 331}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_filters(parts, statistics, filters):\n\n def apply_conjunction(parts, statistics, conjunction):\n for column, operator, value in conjunction:\n out_parts = []\n out_statistics = []\n for part, stats in zip(parts, statistics):\n if \"filter\" in stats and stats[\"filter\"]:\n continue # Filtered by engine\n try:\n c = toolz.groupby(\"name\", stats[\"columns\"])[column][0]\n min = c[\"min\"]\n max = c[\"max\"]\n except KeyError:\n out_parts.append(part)\n out_statistics.append(stats)\n else:\n if (\n operator in (\"==\", \"=\")\n and min <= value <= max\n or operator == \"<\"\n and min < value\n or operator == \"<=\"\n and min <= value\n or operator == \">\"\n and max > value\n or operator == \">=\"\n and max >= value\n or operator == \"in\"\n and any(min <= item <= max for item in value)\n ):\n out_parts.append(part)\n out_statistics.append(stats)\n\n parts, statistics = out_parts, out_statistics\n\n return parts, statistics\n\n conjunction, *disjunction = filters if isinstance(filters[0], list) else [filters]\n\n out_parts, out_statistics = apply_conjunction(parts, statistics, conjunction)\n for conjunction in disjunction:\n for part, stats in zip(*apply_conjunction(parts, statistics, conjunction)):\n if part not in out_parts:\n out_parts.append(part)\n out_statistics.append(stats)\n\n return out_parts, out_statistics", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_process_statistics_process_statistics.return.parts_divisions_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_process_statistics_process_statistics.return.parts_divisions_index_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1127, "end_line": 1217, "span_ids": ["process_statistics"], "tokens": 760}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def process_statistics(\n parts,\n statistics,\n filters,\n index,\n chunksize,\n split_row_groups,\n fs,\n aggregation_depth,\n):\n \"\"\"Process row-group column statistics in metadata\n Used in read_parquet.\n \"\"\"\n index_in_columns = False\n if statistics and len(parts) != len(statistics):\n # It is up to the Engine to guarantee that these\n # lists are the same length (if statistics are defined).\n # This misalignment may be indicative of a bug or\n # incorrect read_parquet usage, so throw a warning.\n warnings.warn(\n f\"Length of partition statistics ({len(statistics)}) \"\n f\"does not match the partition count ({len(parts)}). \"\n f\"This may indicate a bug or incorrect read_parquet \"\n f\"usage. We must ignore the statistics and disable: \"\n f\"filtering, divisions, and/or file aggregation.\"\n )\n statistics = []\n\n if statistics:\n result = list(\n zip(\n *[\n (part, stats)\n for part, stats in zip(parts, statistics)\n if stats[\"num-rows\"] > 0\n ]\n )\n )\n parts, statistics = result or [[], []]\n if filters:\n parts, statistics = apply_filters(parts, statistics, filters)\n\n # Aggregate parts/statistics if we are splitting by row-group\n if chunksize or (split_row_groups and int(split_row_groups) > 1):\n parts, statistics = aggregate_row_groups(\n parts, statistics, chunksize, split_row_groups, fs, aggregation_depth\n )\n\n out = sorted_columns(statistics)\n\n if index and isinstance(index, str):\n index = [index]\n if index and out:\n # Only one valid column\n out = [o for o in out if o[\"name\"] in index]\n if index is not False and len(out) == 1:\n # Use only sorted column with statistics as the index\n divisions = out[0][\"divisions\"]\n if index is None:\n index_in_columns = True\n index = [out[0][\"name\"]]\n elif index != [out[0][\"name\"]]:\n raise ValueError(f\"Specified index is invalid.\\nindex: {index}\")\n elif index is not False and len(out) > 1:\n if any(o[\"name\"] == NONE_LABEL for o in out):\n # Use sorted column matching NONE_LABEL as the index\n [o] = [o for o in out if o[\"name\"] == NONE_LABEL]\n divisions = o[\"divisions\"]\n if index is None:\n index = [o[\"name\"]]\n index_in_columns = True\n elif index != [o[\"name\"]]:\n raise ValueError(f\"Specified index is invalid.\\nindex: {index}\")\n else:\n # Multiple sorted columns found, cannot autodetect the index\n warnings.warn(\n \"Multiple sorted columns found %s, cannot\\n \"\n \"autodetect index. Will continue without an index.\\n\"\n \"To pick an index column, use the index= keyword; to \\n\"\n \"silence this warning use index=False.\"\n \"\" % [o[\"name\"] for o in out],\n RuntimeWarning,\n )\n index = False\n divisions = [None] * (len(parts) + 1)\n else:\n divisions = [None] * (len(parts) + 1)\n else:\n divisions = [None] * (len(parts) + 1)\n\n return parts, divisions, index, index_in_columns", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_set_index_columns_set_index_columns.return.meta_index_columns": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_set_index_columns_set_index_columns.return.meta_index_columns", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1061, "end_line": 1116, "span_ids": ["set_index_columns"], "tokens": 444}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed):\n \"\"\"Handle index/column arguments, and modify `meta`\n Used in read_parquet.\n \"\"\"\n ignore_index_column_intersection = False\n if columns is None:\n # User didn't specify columns, so ignore any intersection\n # of auto-detected values with the index (if necessary)\n ignore_index_column_intersection = True\n # Do not allow \"un-named\" fields to be read in as columns.\n # These were intended to be un-named indices at write time.\n _index = index or []\n columns = [\n c for c in meta.columns if c not in (None, NONE_LABEL) or c in _index\n ]\n\n if not set(columns).issubset(set(meta.columns)):\n raise ValueError(\n \"The following columns were not found in the dataset %s\\n\"\n \"The following columns were found %s\"\n % (set(columns) - set(meta.columns), meta.columns)\n )\n\n if index:\n if isinstance(index, str):\n index = [index]\n if isinstance(columns, str):\n columns = [columns]\n\n if ignore_index_column_intersection:\n columns = [col for col in columns if col not in index]\n if set(index).intersection(columns):\n if auto_index_allowed:\n raise ValueError(\n \"Specified index and column arguments must not intersect\"\n \" (set index=False or remove the detected index from columns).\\n\"\n \"index: {} | column: {}\".format(index, columns)\n )\n else:\n raise ValueError(\n \"Specified index and column arguments must not intersect.\\n\"\n \"index: {} | column: {}\".format(index, columns)\n )\n\n # Leaving index as a column in `meta`, because the index\n # will be reset below (in case the index was detected after\n # meta was created)\n if index_in_columns:\n meta = meta[columns + index]\n else:\n meta = meta[columns]\n\n else:\n meta = meta[list(columns)]\n\n return meta, index, columns", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_aggregate_row_groups_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_aggregate_row_groups_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1278, "end_line": 1376, "span_ids": ["aggregate_row_groups", "impl:12"], "tokens": 852}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def aggregate_row_groups(\n parts, stats, chunksize, split_row_groups, fs, aggregation_depth\n):\n if not stats or not stats[0].get(\"file_path_0\", None):\n return parts, stats\n\n parts_agg = []\n stats_agg = []\n\n use_row_group_criteria = split_row_groups and int(split_row_groups) > 1\n use_chunksize_criteria = bool(chunksize)\n if use_chunksize_criteria:\n chunksize = parse_bytes(chunksize)\n next_part, next_stat = [parts[0].copy()], stats[0].copy()\n for i in range(1, len(parts)):\n stat, part = stats[i], parts[i]\n\n # Criteria #1 for aggregating parts: parts are within the same file\n same_path = stat[\"file_path_0\"] == next_stat[\"file_path_0\"]\n multi_path_allowed = False\n\n if aggregation_depth:\n\n # Criteria #2 for aggregating parts: The part does not include\n # row-group information, or both parts include the same kind\n # of row_group aggregation (all None, or all indices)\n multi_path_allowed = len(part[\"piece\"]) == 1\n if not (same_path or multi_path_allowed):\n rgs = set(list(part[\"piece\"][1]) + list(next_part[-1][\"piece\"][1]))\n multi_path_allowed = (rgs == {None}) or (None not in rgs)\n\n # Criteria #3 for aggregating parts: The parts share a\n # directory at the \"depth\" allowed by `aggregation_depth`\n if not same_path and multi_path_allowed:\n if aggregation_depth is True:\n multi_path_allowed = True\n elif isinstance(aggregation_depth, int):\n # Make sure files share the same directory\n root = stat[\"file_path_0\"].split(fs.sep)[:-aggregation_depth]\n next_root = next_stat[\"file_path_0\"].split(fs.sep)[\n :-aggregation_depth\n ]\n multi_path_allowed = root == next_root\n else:\n raise ValueError(\n f\"{aggregation_depth} not supported for `aggregation_depth`\"\n )\n\n def _check_row_group_criteria(stat, next_stat):\n if use_row_group_criteria:\n return (next_stat[\"num-row-groups\"] + stat[\"num-row-groups\"]) <= int(\n split_row_groups\n )\n else:\n return False\n\n def _check_chunksize_criteria(stat, next_stat):\n if use_chunksize_criteria:\n return (\n next_stat[\"total_byte_size\"] + stat[\"total_byte_size\"]\n ) <= chunksize\n else:\n return False\n\n stat[\"num-row-groups\"] = stat.get(\"num-row-groups\", 1)\n next_stat[\"num-row-groups\"] = next_stat.get(\"num-row-groups\", 1)\n\n if (same_path or multi_path_allowed) and (\n _check_row_group_criteria(stat, next_stat)\n or _check_chunksize_criteria(stat, next_stat)\n ):\n\n # Update part list\n next_part.append(part)\n\n # Update Statistics\n next_stat[\"total_byte_size\"] += stat[\"total_byte_size\"]\n next_stat[\"num-rows\"] += stat[\"num-rows\"]\n next_stat[\"num-row-groups\"] += stat[\"num-row-groups\"]\n for col, col_add in zip(next_stat[\"columns\"], stat[\"columns\"]):\n if col[\"name\"] != col_add[\"name\"]:\n raise ValueError(\"Columns are different!!\")\n if \"min\" in col:\n col[\"min\"] = min(col[\"min\"], col_add[\"min\"])\n if \"max\" in col:\n col[\"max\"] = max(col[\"max\"], col_add[\"max\"])\n else:\n parts_agg.append(next_part)\n stats_agg.append(next_stat)\n next_part, next_stat = [part.copy()], stat.copy()\n\n parts_agg.append(next_part)\n stats_agg.append(next_stat)\n\n return parts_agg, stats_agg\n\n\nDataFrame.to_parquet.__doc__ = to_parquet.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.initialize_write_FastParquetEngine.initialize_write.return._fmd_schema_i_offset_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.initialize_write_FastParquetEngine.initialize_write.return._fmd_schema_i_offset_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1132, "end_line": 1225, "span_ids": ["FastParquetEngine.initialize_write"], "tokens": 702}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n schema=None,\n object_encoding=\"utf8\",\n index_cols=None,\n custom_metadata=None,\n **kwargs,\n ):\n if index_cols is None:\n index_cols = []\n if append and division_info is None:\n ignore_divisions = True\n fs.mkdirs(path, exist_ok=True)\n if object_encoding == \"infer\" or (\n isinstance(object_encoding, dict) and \"infer\" in object_encoding.values()\n ):\n raise ValueError(\n '\"infer\" not allowed as object encoding, '\n \"because this required data in memory.\"\n )\n\n if append:\n try:\n # to append to a dataset without _metadata, need to load\n # _common_metadata or any data file here\n pf = fastparquet.api.ParquetFile(path, open_with=fs.open)\n except (OSError, ValueError):\n # append for create\n append = False\n if append:\n if pf.file_scheme not in [\"hive\", \"empty\", \"flat\"]:\n raise ValueError(\n \"Requested file scheme is hive, but existing file scheme is not.\"\n )\n elif (set(pf.columns) != set(df.columns) - set(partition_on)) or (\n set(partition_on) != set(pf.cats)\n ):\n raise ValueError(\n \"Appended columns not the same.\\n\"\n \"Previous: {} | New: {}\".format(pf.columns, list(df.columns))\n )\n elif (pd.Series(pf.dtypes).loc[pf.columns] != df[pf.columns].dtypes).any():\n raise ValueError(\n \"Appended dtypes differ.\\n{}\".format(\n set(pf.dtypes.items()) ^ set(df.dtypes.items())\n )\n )\n else:\n df = df[pf.columns + partition_on]\n\n fmd = pf.fmd\n i_offset = fastparquet.writer.find_max_part(fmd.row_groups)\n if not ignore_divisions:\n if not set(index_cols).intersection([division_info[\"name\"]]):\n ignore_divisions = True\n if not ignore_divisions:\n minmax = fastparquet.api.sorted_partitioned_columns(pf)\n old_end = minmax[index_cols[0]][\"max\"][-1]\n divisions = division_info[\"divisions\"]\n if divisions[0] < old_end:\n raise ValueError(\n \"Appended divisions overlapping with previous ones.\"\n \"\\n\"\n \"Previous: {} | New: {}\".format(old_end, divisions[0])\n )\n else:\n fmd = fastparquet.writer.make_metadata(\n df._meta,\n object_encoding=object_encoding,\n index_cols=index_cols,\n ignore_columns=partition_on,\n **kwargs,\n )\n i_offset = 0\n if custom_metadata is not None:\n kvm = fmd.key_value_metadata or []\n kvm.extend(\n [\n fastparquet.parquet_thrift.KeyValue(key=key, value=value)\n for key, value in custom_metadata.items()\n ]\n )\n fmd.key_value_metadata = kvm\n\n schema = None # ArrowEngine compatibility\n return (fmd, schema, i_offset)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_partition_FastParquetEngine.write_partition.if_return_metadata_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_partition_FastParquetEngine.write_partition.if_return_metadata_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1135, "end_line": 1196, "span_ids": ["FastParquetEngine.write_partition"], "tokens": 400}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def write_partition(\n cls,\n df,\n path,\n fs,\n filename,\n partition_on,\n return_metadata,\n fmd=None,\n compression=None,\n custom_metadata=None,\n **kwargs,\n ):\n # Update key/value metadata if necessary\n fmd = copy.copy(fmd)\n for s in fmd.schema:\n if isinstance(s.name, bytes):\n # can be coerced to bytes on copy\n s.name = s.name.decode()\n if custom_metadata and fmd is not None:\n fmd.key_value_metadata = fmd.key_value_metadata + (\n [\n fastparquet.parquet_thrift.KeyValue(key=key, value=value)\n for key, value in custom_metadata.items()\n ]\n )\n\n if not len(df):\n # Write nothing for empty partitions\n rgs = []\n elif partition_on:\n mkdirs = lambda x: fs.mkdirs(x, exist_ok=True)\n if parse_version(fastparquet.__version__) >= parse_version(\"0.1.4\"):\n rgs = partition_on_columns(\n df, partition_on, path, filename, fmd, compression, fs.open, mkdirs\n )\n else:\n rgs = partition_on_columns(\n df,\n partition_on,\n path,\n filename,\n fmd,\n fs.sep,\n compression,\n fs.open,\n mkdirs,\n )\n else:\n with fs.open(fs.sep.join([path, filename]), \"wb\") as fil:\n fmd.num_rows = len(df)\n rg = make_part_file(\n fil, df, fmd.schema, compression=compression, fmd=fmd\n )\n for chunk in rg.columns:\n chunk.file_path = filename\n rgs = [rg]\n if return_metadata:\n return rgs\n else:\n return []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_metadata_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.write_metadata_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1290, "end_line": 1311, "span_ids": ["FastParquetEngine.write_metadata"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs):\n _meta = copy.copy(meta)\n rgs = meta.row_groups\n if parts:\n for rg in parts:\n if rg is not None:\n if isinstance(rg, list):\n for r in rg:\n rgs.append(r)\n else:\n rgs.append(rg)\n _meta.row_groups = rgs\n fn = fs.sep.join([path, \"_metadata\"])\n fastparquet.writer.write_common_metadata(\n fn, _meta, open_with=fs.open, no_row_groups=False\n )\n\n # if appending, could skip this, but would need to check existence\n fn = fs.sep.join([path, \"_common_metadata\"])\n fastparquet.writer.write_common_metadata(fn, _meta, open_with=fs.open)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_re_Engine.read_metadata.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_re_Engine.read_metadata.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 75, "span_ids": ["imports", "Engine.read_metadata", "Engine"], "tokens": 561}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import re\n\nimport pandas as pd\n\nfrom .... import config\nfrom ....core import flatten\nfrom ....utils import natural_sort_key\nfrom ..utils import _is_local_fs\n\n\nclass Engine:\n \"\"\"The API necessary to provide a new Parquet reader/writer\"\"\"\n\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n **kwargs,\n ):\n \"\"\"Gather metadata about a Parquet Dataset to prepare for a read\n\n This function is called once in the user's Python session to gather\n important metadata about the parquet dataset.\n\n Parameters\n ----------\n fs: FileSystem\n paths: List[str]\n A list of paths to files (or their equivalents)\n categories: list, dict or None\n Column(s) containing categorical data.\n index: str, List[str], or False\n The column name(s) to be used as the index.\n If set to ``None``, pandas metadata (if available) can be used\n to reset the value in this function\n gather_statistics: bool\n Whether or not to gather statistics data. If ``None``, we only\n gather statistics data if there is a _metadata file available to\n query (cheaply)\n filters: list\n List of filters to apply, like ``[('x', '>', 0), ...]``.\n **kwargs: dict (of dicts)\n User-specified arguments to pass on to backend.\n Top level key can be used by engine to select appropriate dict.\n\n Returns\n -------\n meta: pandas.DataFrame\n An empty DataFrame object to use for metadata.\n Should have appropriate column names and dtypes but need not have\n any actual data\n statistics: Optional[List[Dict]]\n Either None, if no statistics were found, or a list of dictionaries\n of statistics data, one dict for every partition (see the next\n return value). The statistics should look like the following:\n\n [\n {'num-rows': 1000, 'columns': [\n {'name': 'id', 'min': 0, 'max': 100},\n {'name': 'x', 'min': 0.0, 'max': 1.0},\n ]},\n ...\n ]\n parts: List[object]\n A list of objects to be passed to ``Engine.read_partition``.\n Each object should represent a piece of data (usually a row-group).\n The type of each object can be anything, as long as the\n engine's read_partition function knows how to interpret it.\n \"\"\"\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.read_partition_Engine.read_partition.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.read_partition_Engine.read_partition.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 70, "end_line": 95, "span_ids": ["Engine.read_partition"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def read_partition(cls, fs, piece, columns, index, **kwargs):\n \"\"\"Read a single piece of a Parquet dataset into a Pandas DataFrame\n\n This function is called many times in individual tasks\n\n Parameters\n ----------\n fs: FileSystem\n piece: object\n This is some token that is returned by Engine.read_metadata.\n Typically it represents a row group in a Parquet dataset\n columns: List[str]\n List of column names to pull out of that row group\n index: str, List[str], or False\n The index name(s).\n **kwargs:\n Includes `\"kwargs\"` values stored within the `parts` output\n of `engine.read_metadata`. May also include arguments to be\n passed to the backend (if stored under a top-level `\"read\"` key).\n\n Returns\n -------\n A Pandas DataFrame\n \"\"\"\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.initialize_write_Engine.initialize_write.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.initialize_write_Engine.initialize_write.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 104, "end_line": 144, "span_ids": ["Engine.initialize_write"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n **kwargs,\n ):\n \"\"\"Perform engine-specific initialization steps for this dataset\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n fs: FileSystem\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n append: bool\n If True, may use existing metadata (if any) and perform checks\n against the new data being stored.\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n ignore_divisions: bool\n Whether or not to ignore old divisions when appending. Otherwise,\n overlapping divisions will lead to an error being raised.\n division_info: dict\n Dictionary containing the divisions and corresponding column name.\n **kwargs: dict\n Other keyword arguments (including `index_cols`)\n\n Returns\n -------\n tuple:\n engine-specific instance\n list of filenames, one per partition\n \"\"\"\n raise NotImplementedError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_partition_Engine.write_partition.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_partition_Engine.write_partition.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 139, "end_line": 170, "span_ids": ["Engine.write_partition"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def write_partition(\n cls, df, path, fs, filename, partition_on, return_metadata, **kwargs\n ):\n \"\"\"\n Output a partition of a dask.DataFrame. This will correspond to\n one output file, unless partition_on is set, in which case, it will\n correspond to up to one file in each sub-directory.\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n fs: FileSystem\n filename: str\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n return_metadata : bool\n Whether to return list of instances from this write, one for each\n output file. These will be passed to write_metadata if an output\n metadata file is requested.\n **kwargs: dict\n Other keyword arguments (including `fmd` and `index_cols`)\n\n Returns\n -------\n List of metadata-containing instances (if `return_metadata` is `True`)\n or empty list\n \"\"\"\n raise NotImplementedError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_metadata_Engine.write_metadata.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.write_metadata_Engine.write_metadata.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 172, "end_line": 196, "span_ids": ["Engine.write_metadata"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs):\n \"\"\"\n Write the shared metadata file for a parquet dataset.\n\n Parameters\n ----------\n parts: List\n Contains metadata objects to write, of the type undrestood by the\n specific implementation\n meta: non-chunk metadata\n Details that do not depend on the specifics of each chunk write,\n typically the schema and pandas metadata, in a format the writer\n can use.\n fs: FileSystem\n path: str\n Output file to write to, usually ``\"_metadata\"`` in the root of\n the output dataset\n append: boolean\n Whether or not to consolidate new metadata with existing (True)\n or start from scratch (False)\n **kwargs: dict\n Other keyword arguments (including `compression`)\n \"\"\"\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata__parse_pandas_metadata._0_8_0_allows_for_dupli": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata__parse_pandas_metadata._0_8_0_allows_for_dupli", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 199, "end_line": 257, "span_ids": ["_parse_pandas_metadata"], "tokens": 573}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _parse_pandas_metadata(pandas_metadata):\n \"\"\"Get the set of names from the pandas metadata section\n\n Parameters\n ----------\n pandas_metadata : dict\n Should conform to the pandas parquet metadata spec\n\n Returns\n -------\n index_names : list\n List of strings indicating the actual index names\n column_names : list\n List of strings indicating the actual column names\n storage_name_mapping : dict\n Pairs of storage names (e.g. the field names for\n PyArrow) and actual names. The storage and field names will\n differ for index names for certain writers (pyarrow > 0.8).\n column_indexes_names : list\n The names for ``df.columns.name`` or ``df.columns.names`` for\n a MultiIndex in the columns\n\n Notes\n -----\n This should support metadata written by at least\n\n * fastparquet>=0.1.3\n * pyarrow>=0.7.0\n \"\"\"\n index_storage_names = [\n n[\"name\"] if isinstance(n, dict) else n\n for n in pandas_metadata[\"index_columns\"]\n ]\n index_name_xpr = re.compile(r\"__index_level_\\d+__\")\n\n # older metadatas will not have a 'field_name' field so we fall back\n # to the 'name' field\n pairs = [\n (x.get(\"field_name\", x[\"name\"]), x[\"name\"]) for x in pandas_metadata[\"columns\"]\n ]\n\n # Need to reconcile storage and real names. These will differ for\n # pyarrow, which uses __index_leveL_d__ for the storage name of indexes.\n # The real name may be None (e.g. `df.index.name` is None).\n pairs2 = []\n for storage_name, real_name in pairs:\n if real_name and index_name_xpr.match(real_name):\n real_name = None\n pairs2.append((storage_name, real_name))\n index_names = [name for (storage_name, name) in pairs2 if name != storage_name]\n\n # column_indexes represents df.columns.name\n # It was added to the spec after pandas 0.21.0+, and implemented\n # in PyArrow 0.8. It was added to fastparquet in 0.3.1.\n column_index_names = pandas_metadata.get(\"column_indexes\", [{\"name\": None}])\n column_index_names = [x[\"name\"] for x in column_index_names]\n\n # Now we need to disambiguate between columns and index names. PyArrow\n # 0.8.0+ allows for duplicates between df.index.names and df.columns\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata.if_not_index_names___parse_pandas_metadata.return.index_names_column_names": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__parse_pandas_metadata.if_not_index_names___parse_pandas_metadata.return.index_names_column_names", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 258, "end_line": 280, "span_ids": ["_parse_pandas_metadata"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _parse_pandas_metadata(pandas_metadata):\n # ... other code\n if not index_names:\n # For PyArrow < 0.8, Any fastparquet. This relies on the facts that\n # 1. Those versions used the real index name as the index storage name\n # 2. Those versions did not allow for duplicate index / column names\n # So we know that if a name is in index_storage_names, it must be an\n # index name\n if index_storage_names and isinstance(index_storage_names[0], dict):\n # Cannot handle dictionary case\n index_storage_names = []\n index_names = list(index_storage_names) # make a copy\n index_storage_names2 = set(index_storage_names)\n column_names = [\n name for (storage_name, name) in pairs if name not in index_storage_names2\n ]\n else:\n # For newer PyArrows the storage names differ from the index names\n # iff it's an index level. Though this is a fragile assumption for\n # other systems...\n column_names = [name for (storage_name, name) in pairs2 if name == storage_name]\n\n storage_name_mapping = dict(pairs2) # TODO: handle duplicates gracefully\n\n return index_names, column_names, storage_name_mapping, column_index_names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__normalize_index_columns__normalize_index_columns.return.column_names_index_names": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__normalize_index_columns__normalize_index_columns.return.column_names_index_names", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 283, "end_line": 342, "span_ids": ["_normalize_index_columns"], "tokens": 478}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _normalize_index_columns(user_columns, data_columns, user_index, data_index):\n \"\"\"Normalize user and file-provided column and index names\n\n Parameters\n ----------\n user_columns : None, str or list of str\n data_columns : list of str\n user_index : None, str, or list of str\n data_index : list of str\n\n Returns\n -------\n column_names : list of str\n index_names : list of str\n \"\"\"\n specified_columns = user_columns is not None\n specified_index = user_index is not None\n\n if user_columns is None:\n user_columns = list(data_columns)\n elif isinstance(user_columns, str):\n user_columns = [user_columns]\n else:\n user_columns = list(user_columns)\n\n if user_index is None:\n user_index = data_index\n elif user_index is False:\n # When index is False, use no index and all fields should be treated as\n # columns (unless `columns` provided).\n user_index = []\n data_columns = data_index + data_columns\n elif isinstance(user_index, str):\n user_index = [user_index]\n else:\n user_index = list(user_index)\n\n if specified_index and not specified_columns:\n # Only `index` provided. Use specified index, and all column fields\n # that weren't specified as indices\n index_names = user_index\n column_names = [x for x in data_columns if x not in index_names]\n elif specified_columns and not specified_index:\n # Only `columns` provided. Use specified columns, and all index fields\n # that weren't specified as columns\n column_names = user_columns\n index_names = [x for x in data_index if x not in column_names]\n elif specified_index and specified_columns:\n # Both `index` and `columns` provided. Use as specified, but error if\n # they intersect.\n column_names = user_columns\n index_names = user_index\n if set(column_names).intersection(index_names):\n raise ValueError(\"Specified index and column names must not intersect\")\n else:\n # Use default columns and index from the metadata\n column_names = data_columns\n index_names = data_index\n\n return column_names, index_names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths._join_path.abs_prefix__analyze_paths._join_path.return.joined": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths._join_path.abs_prefix__analyze_paths._join_path.return.joined", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 364, "end_line": 403, "span_ids": ["_analyze_paths"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _analyze_paths(file_list, fs, root=False):\n\n def _join_path(*path):\n # ... other code\n\n abs_prefix = \"\"\n if path and path[0]:\n if path[0][0] == \"/\":\n abs_prefix = \"/\"\n path = list(path)\n path[0] = path[0][1:]\n elif fs.sep == \"\\\\\" and path[0][1:].startswith(\":/\"):\n # If windows, then look for the \"c:/\" prefix\n abs_prefix = path[0][0:3]\n path = list(path)\n path[0] = path[0][3:]\n\n _scrubbed = []\n for i, p in enumerate(path):\n _scrubbed.extend(_scrub(i, p).split(\"/\"))\n simpler = []\n for s in _scrubbed:\n if s == \".\":\n pass\n elif s == \"..\":\n if simpler:\n if simpler[-1] == \"..\":\n simpler.append(s)\n else:\n simpler.pop()\n elif abs_prefix:\n raise Exception(\"can not get parent of root\")\n else:\n simpler.append(s)\n else:\n simpler.append(s)\n\n if not simpler:\n if abs_prefix:\n joined = abs_prefix\n else:\n joined = \".\"\n else:\n joined = abs_prefix + (\"/\".join(simpler))\n return joined\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql_to_sql._Store_Dask_Dataframe_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql_to_sql._Store_Dask_Dataframe_t", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 680, "end_line": 803, "span_ids": ["to_sql"], "tokens": 1099}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_sql(\n df,\n name: str,\n uri: str,\n schema=None,\n if_exists: str = \"fail\",\n index: bool = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n compute=True,\n parallel=False,\n engine_kwargs=None,\n):\n \"\"\"Store Dask Dataframe to a SQL table\n\n An empty table is created based on the \"meta\" DataFrame (and conforming to the caller's \"if_exists\" preference), and\n then each block calls pd.DataFrame.to_sql (with `if_exists=\"append\"`).\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : str\n Name of SQL table.\n uri : string\n Full sqlalchemy URI for the database connection\n schema : str, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : str or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Specify the number of rows in each batch to be written at a time.\n By default, all rows will be written at once.\n dtype : dict or scalar, optional\n Specifying the datatype for columns. If a dictionary is used, the\n keys should be the column names and the values should be the\n SQLAlchemy types or strings for the sqlite3 legacy mode. If a\n scalar is provided, it will be applied to all columns.\n method : {None, 'multi', callable}, optional\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method `.\n compute : bool, default True\n When true, call dask.compute and perform the load into SQL; otherwise, return a Dask object (or array of\n per-block objects when parallel=True)\n parallel : bool, default False\n When true, have each block append itself to the DB table concurrently. This can result in DB rows being in a\n different order than the source DataFrame's corresponding rows. When false, load each block into the SQL DB in\n sequence.\n engine_kwargs : dict or None\n Specific db engine parameters for sqlalchemy\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is 'fail' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] https://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n Create a table from scratch with 4 rows.\n\n >>> import pandas as pd\n >>> df = pd.DataFrame([ {'i':i, 's':str(i)*2 } for i in range(4) ])\n >>> from dask.dataframe import from_pandas\n >>> ddf = from_pandas(df, npartitions=2)\n >>> ddf # doctest: +SKIP\n Dask DataFrame Structure:\n i s\n npartitions=2\n 0 int64 object\n 2 ... ...\n 3 ... ...\n Dask Name: from_pandas, 2 tasks\n\n >>> from dask.utils import tmpfile\n >>> from sqlalchemy import create_engine\n >>> with tmpfile() as f:\n ... db = 'sqlite:///%s' %f\n ... ddf.to_sql('test', db)\n ... engine = create_engine(db, echo=False)\n ... result = engine.execute(\"SELECT * FROM test\").fetchall()\n >>> result\n [(0, 0, '00'), (1, 1, '11'), (2, 2, '22'), (3, 3, '33')]\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_simple_test_text_blocks_to_pandas_simple.assert_eq_df_amount_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_simple_test_text_blocks_to_pandas_simple.assert_eq_df_amount_sum_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 178, "end_line": 194, "span_ids": ["test_text_blocks_to_pandas_simple"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@csv_and_table\ndef test_text_blocks_to_pandas_simple(reader, files):\n blocks = [[files[k]] for k in sorted(files)]\n kwargs = {}\n head = pandas_read_text(reader, files[\"2014-01-01.csv\"], b\"\", {})\n header = files[\"2014-01-01.csv\"].split(b\"\\n\")[0] + b\"\\n\"\n\n df = text_blocks_to_pandas(reader, blocks, header, head, kwargs)\n assert isinstance(df, dd.DataFrame)\n assert list(df.columns) == [\"name\", \"amount\", \"id\"]\n\n values = text_blocks_to_pandas(reader, blocks, header, head, kwargs)\n assert isinstance(values, dd.DataFrame)\n assert hasattr(values, \"dask\")\n assert len(values.dask) == 3\n\n assert_eq(df.amount.sum(), 100 + 200 + 300 + 400 + 500 + 600)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_kwargs_test_text_blocks_to_pandas_kwargs.assert_result_columns_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_kwargs_test_text_blocks_to_pandas_kwargs.assert_result_columns_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 197, "end_line": 208, "span_ids": ["test_text_blocks_to_pandas_kwargs"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@csv_and_table\ndef test_text_blocks_to_pandas_kwargs(reader, files):\n blocks = [files[k] for k in sorted(files)]\n blocks = [[b] for b in blocks]\n kwargs = {\"usecols\": [\"name\", \"id\"]}\n head = pandas_read_text(reader, files[\"2014-01-01.csv\"], b\"\", kwargs)\n header = files[\"2014-01-01.csv\"].split(b\"\\n\")[0] + b\"\\n\"\n\n df = text_blocks_to_pandas(reader, blocks, header, head, kwargs)\n assert list(df.columns) == [\"name\", \"id\"]\n result = df.compute()\n assert (result.columns == df.columns).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_blocked_test_text_blocks_to_pandas_blocked.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_text_blocks_to_pandas_blocked_test_text_blocks_to_pandas_blocked.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 211, "end_line": 235, "span_ids": ["test_text_blocks_to_pandas_blocked"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@csv_and_table\ndef test_text_blocks_to_pandas_blocked(reader, files):\n header = files[\"2014-01-01.csv\"].split(b\"\\n\")[0] + b\"\\n\"\n blocks = []\n for k in sorted(files):\n b = files[k]\n lines = b.split(b\"\\n\")\n blocks.append([b\"\\n\".join(bs) for bs in partition_all(2, lines)])\n\n df = text_blocks_to_pandas(reader, blocks, header, expected.head(), {})\n assert_eq(\n df.compute().reset_index(drop=True),\n expected.reset_index(drop=True),\n check_dtype=False,\n )\n\n expected2 = expected[[\"name\", \"id\"]]\n df = text_blocks_to_pandas(\n reader, blocks, header, expected2.head(), {\"usecols\": [\"name\", \"id\"]}\n )\n assert_eq(\n df.compute().reset_index(drop=True),\n expected2.reset_index(drop=True),\n check_dtype=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_test_skiprows.with_filetexts_files_mod.assert_eq_df_expected_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_test_skiprows.with_filetexts_files_mod.assert_eq_df_expected_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 239, "end_line": 249, "span_ids": ["test_skiprows"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files\",\n [(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],\n)\ndef test_skiprows(dd_read, pd_read, files):\n files = {name: comment_header + b\"\\n\" + content for name, content in files.items()}\n skip = len(comment_header.splitlines())\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\", skiprows=skip)\n expected_df = pd.concat([pd_read(n, skiprows=skip) for n in sorted(files)])\n assert_eq(df, expected_df, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_as_list_test_skiprows_as_list.with_filetexts_files_mod.assert_eq_df_expected_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skiprows_as_list_test_skiprows_as_list.with_filetexts_files_mod.assert_eq_df_expected_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 284, "end_line": 300, "span_ids": ["test_skiprows_as_list"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files,units\",\n [\n (dd.read_csv, pd.read_csv, csv_files, csv_units_row),\n (dd.read_table, pd.read_table, tsv_files, tsv_units_row),\n ],\n)\ndef test_skiprows_as_list(dd_read, pd_read, files, units):\n files = {\n name: (comment_header + b\"\\n\" + content.replace(b\"\\n\", b\"\\n\" + units, 1))\n for name, content in files.items()\n }\n skip = [0, 1, 2, 3, 5]\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\", skiprows=skip)\n expected_df = pd.concat([pd_read(n, skiprows=skip) for n in sorted(files)])\n assert_eq(df, expected_df, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_blocks_tsv_blocks._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_blocks_tsv_blocks._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 272, "end_line": 280, "span_ids": ["impl:33"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "csv_blocks = [\n [b\"aa,bb\\n1,1.0\\n2,2.0\", b\"10,20\\n30,40\"],\n [b\"aa,bb\\n1,1.0\\n2,2.0\", b\"10,20\\n30,40\"],\n]\n\ntsv_blocks = [\n [b\"aa\\tbb\\n1\\t1.0\\n2\\t2.0\", b\"10\\t20\\n30\\t40\"],\n [b\"aa\\tbb\\n1\\t1.0\\n2\\t2.0\", b\"10\\t20\\n30\\t40\"],\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_dtypes_test_enforce_dtypes.assert_all_df_dtypes_to_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_dtypes_test_enforce_dtypes.assert_all_df_dtypes_to_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 314, "end_line": 322, "span_ids": ["test_enforce_dtypes"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"reader,blocks\", [(pd.read_csv, csv_blocks), (pd.read_table, tsv_blocks)]\n)\ndef test_enforce_dtypes(reader, blocks):\n head = reader(BytesIO(blocks[0][0]), header=0)\n header = blocks[0][0].split(b\"\\n\")[0] + b\"\\n\"\n dfs = text_blocks_to_pandas(reader, blocks, header, head, {})\n dfs = dask.compute(dfs, scheduler=\"sync\")\n assert all(df.dtypes.to_dict() == head.dtypes.to_dict() for df in dfs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_columns_test_enforce_columns.with_pytest_raises_ValueE.dask_compute_dfs_schedu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_enforce_columns_test_enforce_columns.with_pytest_raises_ValueE.dask_compute_dfs_schedu", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 325, "end_line": 335, "span_ids": ["test_enforce_columns"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"reader,blocks\", [(pd.read_csv, csv_blocks), (pd.read_table, tsv_blocks)]\n)\ndef test_enforce_columns(reader, blocks):\n # Replace second header with different column name\n blocks = [blocks[0], [blocks[1][0].replace(b\"a\", b\"A\"), blocks[1][1]]]\n head = reader(BytesIO(blocks[0][0]), header=0)\n header = blocks[0][0].split(b\"\\n\")[0] + b\"\\n\"\n with pytest.raises(ValueError):\n dfs = text_blocks_to_pandas(reader, blocks, header, head, {}, enforce=True)\n dask.compute(*dfs, scheduler=\"sync\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py___test_read_csv.with_filetext_text_as_fn.assert_eq_result_pd_read": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py___test_read_csv.with_filetext_text_as_fn.assert_eq_result_pd_read", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 338, "end_line": 357, "span_ids": ["test_read_csv", "test_enforce_columns"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#############################\n# read_csv and read_table #\n#############################\n\n\n@pytest.mark.parametrize(\n \"dd_read,pd_read,text,sep\",\n [\n (dd.read_csv, pd.read_csv, csv_text, \",\"),\n (dd.read_table, pd.read_table, tsv_text, \"\\t\"),\n (dd.read_table, pd.read_table, tsv_text2, r\"\\s+\"),\n ],\n)\ndef test_read_csv(dd_read, pd_read, text, sep):\n with filetext(text) as fn:\n f = dd_read(fn, blocksize=30, lineterminator=os.linesep, sep=sep)\n assert list(f.columns) == [\"name\", \"amount\"]\n # index may be different\n result = f.compute(scheduler=\"sync\").reset_index(drop=True)\n assert_eq(result, pd_read(fn, sep=sep))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_large_skiprows_test_read_csv_large_skiprows.with_filetext_text_as_fn.assert_eq_actual_pd_read": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_large_skiprows_test_read_csv_large_skiprows.with_filetext_text_as_fn.assert_eq_actual_pd_read", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 360, "end_line": 371, "span_ids": ["test_read_csv_large_skiprows"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,text,skip\",\n [\n (dd.read_csv, pd.read_csv, csv_text, 7),\n (dd.read_table, pd.read_table, tsv_text, [1, 13]),\n ],\n)\ndef test_read_csv_large_skiprows(dd_read, pd_read, text, skip):\n names = [\"name\", \"amount\"]\n with filetext(text) as fn:\n actual = dd_read(fn, skiprows=skip, names=names)\n assert_eq(actual, pd_read(fn, skiprows=skip, names=names))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_only_in_first_partition_test_read_csv_skiprows_only_in_first_partition.with_filetext_text_as_fn.None_1.with_pytest_raises_ValueE.dd_read_fn_blocksize_30_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_only_in_first_partition_test_read_csv_skiprows_only_in_first_partition.with_filetext_text_as_fn.None_1.with_pytest_raises_ValueE.dd_read_fn_blocksize_30_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 374, "end_line": 391, "span_ids": ["test_read_csv_skiprows_only_in_first_partition"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,text,skip\",\n [\n (dd.read_csv, pd.read_csv, csv_text, 7),\n (dd.read_table, pd.read_table, tsv_text, [1, 12]),\n ],\n)\ndef test_read_csv_skiprows_only_in_first_partition(dd_read, pd_read, text, skip):\n names = [\"name\", \"amount\"]\n with filetext(text) as fn:\n with pytest.warns(UserWarning, match=\"sample=blocksize\"):\n actual = dd_read(fn, blocksize=200, skiprows=skip, names=names).compute()\n assert_eq(actual, pd_read(fn, skiprows=skip, names=names))\n\n with pytest.warns(UserWarning):\n # if new sample does not contain all the skiprows, raise error\n with pytest.raises(ValueError):\n dd_read(fn, blocksize=30, skiprows=skip, names=names)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_test_read_csv_files.with_filetexts_files_mod.assert_eq_df_expected2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_test_read_csv_files.with_filetexts_files_mod.assert_eq_df_expected2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 394, "end_line": 406, "span_ids": ["test_read_csv_files"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files\",\n [(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],\n)\ndef test_read_csv_files(dd_read, pd_read, files):\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\")\n assert_eq(df, expected, check_dtype=False)\n\n fn = \"2014-01-01.csv\"\n df = dd_read(fn)\n expected2 = pd_read(BytesIO(files[fn]))\n assert_eq(df, expected2, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_list_test_read_csv_files_list.with_filetexts_files_mod.with_pytest_raises_ValueE.dd_read_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_files_list_test_read_csv_files_list.with_filetexts_files_mod.with_pytest_raises_ValueE.dd_read_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 409, "end_line": 421, "span_ids": ["test_read_csv_files_list"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files\",\n [(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],\n)\ndef test_read_csv_files_list(dd_read, pd_read, files):\n with filetexts(files, mode=\"b\"):\n subset = sorted(files)[:2] # Just first 2\n sol = pd.concat([pd_read(BytesIO(files[k])) for k in subset])\n res = dd_read(subset)\n assert_eq(res, sol, check_dtype=False)\n\n with pytest.raises(ValueError):\n dd_read([])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_test_read_csv_include_path_column.with_filetexts_files_mod.assert_2014_01_03_csv_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_test_read_csv_include_path_column.with_filetexts_files_mod.assert_2014_01_03_csv_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 424, "end_line": 437, "span_ids": ["test_read_csv_include_path_column"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,files\", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]\n)\ndef test_read_csv_include_path_column(dd_read, files):\n with filetexts(files, mode=\"b\"):\n df = dd_read(\n \"2014-01-*.csv\",\n include_path_column=True,\n converters={\"path\": parse_filename},\n )\n filenames = df.path.compute().unique()\n assert \"2014-01-01.csv\" in filenames\n assert \"2014-01-02.csv\" not in filenames\n assert \"2014-01-03.csv\" in filenames", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_as_str_test_read_csv_include_path_column_as_str.with_filetexts_files_mod.assert_2014_01_03_csv_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_as_str_test_read_csv_include_path_column_as_str.with_filetexts_files_mod.assert_2014_01_03_csv_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 440, "end_line": 453, "span_ids": ["test_read_csv_include_path_column_as_str"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,files\", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]\n)\ndef test_read_csv_include_path_column_as_str(dd_read, files):\n with filetexts(files, mode=\"b\"):\n df = dd_read(\n \"2014-01-*.csv\",\n include_path_column=\"filename\",\n converters={\"filename\": parse_filename},\n )\n filenames = df.filename.compute().unique()\n assert \"2014-01-01.csv\" in filenames\n assert \"2014-01-02.csv\" not in filenames\n assert \"2014-01-03.csv\" in filenames", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_with_duplicate_name_test_read_csv_include_path_column_is_dtype_category.with_filetexts_files_mod.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_with_duplicate_name_test_read_csv_include_path_column_is_dtype_category.with_filetexts_files_mod.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 456, "end_line": 477, "span_ids": ["test_read_csv_include_path_column_with_duplicate_name", "test_read_csv_include_path_column_is_dtype_category"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,files\", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]\n)\ndef test_read_csv_include_path_column_with_duplicate_name(dd_read, files):\n with filetexts(files, mode=\"b\"):\n with pytest.raises(ValueError):\n dd_read(\"2014-01-*.csv\", include_path_column=\"name\")\n\n\n@pytest.mark.parametrize(\n \"dd_read,files\", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]\n)\ndef test_read_csv_include_path_column_is_dtype_category(dd_read, files):\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\", include_path_column=True)\n assert df.path.dtype == \"category\"\n assert has_known_categories(df.path)\n\n dfs = dd_read(\"2014-01-*.csv\", include_path_column=True)\n result = dfs.compute()\n assert result.path.dtype == \"category\"\n assert has_known_categories(result.path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py__After_this_point_we_te_test_read_csv_index.with_filetext_csv_text_a.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py__After_this_point_we_te_test_read_csv_index.with_filetext_csv_text_a.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 478, "end_line": 498, "span_ids": ["test_read_csv_include_path_column_with_multiple_partitions_per_file", "test_read_csv_index"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# After this point, we test just using read_csv, as all functionality\n# for both is implemented using the same code.\n\n\ndef test_read_csv_index():\n with filetext(csv_text) as fn:\n f = dd.read_csv(fn, blocksize=20).set_index(\"amount\")\n result = f.compute(scheduler=\"sync\")\n assert result.index.name == \"amount\"\n\n blocks = compute_as_if_collection(\n dd.DataFrame, f.dask, f.__dask_keys__(), scheduler=\"sync\"\n )\n for i, block in enumerate(blocks):\n if i < len(f.divisions) - 2:\n assert (block.index < f.divisions[i + 1]).all()\n if i > 0:\n assert (block.index >= f.divisions[i]).all()\n\n expected = pd.read_csv(fn).set_index(\"amount\")\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_range_test_consistent_dtypes.with_filetext_text_as_fn.assert_df_amount_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_skiprows_range_test_consistent_dtypes.with_filetext_text_as_fn.assert_df_amount_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 502, "end_line": 565, "span_ids": ["test_consistent_dtypes", "test_read_csv_skiprows_range", "test_string_blocksize", "test_usecols", "test_skipinitialspace"], "tokens": 430}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_skiprows_range():\n with filetext(csv_text) as fn:\n f = dd.read_csv(fn, skiprows=range(5))\n result = f\n expected = pd.read_csv(fn, skiprows=range(5))\n assert_eq(result, expected)\n\n\ndef test_usecols():\n with filetext(timeseries) as fn:\n df = dd.read_csv(fn, blocksize=30, usecols=[\"High\", \"Low\"])\n df_select = df[[\"High\"]]\n expected = pd.read_csv(fn, usecols=[\"High\", \"Low\"])\n expected_select = expected[[\"High\"]]\n assert (df.compute().values == expected.values).all()\n assert (df_select.compute().values == expected_select.values).all()\n\n\ndef test_string_blocksize():\n with filetext(timeseries) as fn:\n a = dd.read_csv(fn, blocksize=\"30B\")\n b = dd.read_csv(fn, blocksize=\"30\")\n assert a.npartitions == b.npartitions\n\n c = dd.read_csv(fn, blocksize=\"64MiB\")\n assert c.npartitions == 1\n\n\ndef test_skipinitialspace():\n text = normalize_text(\n \"\"\"\n name, amount\n Alice,100\n Bob,-200\n Charlie,300\n Dennis,400\n Edith,-500\n Frank,600\n \"\"\"\n )\n\n with filetext(text) as fn:\n df = dd.read_csv(fn, skipinitialspace=True, blocksize=20)\n\n assert \"amount\" in df.columns\n assert df.amount.max().compute() == 600\n\n\ndef test_consistent_dtypes():\n text = normalize_text(\n \"\"\"\n name,amount\n Alice,100.5\n Bob,-200.5\n Charlie,300\n Dennis,400\n Edith,-500\n Frank,600\n \"\"\"\n )\n\n with filetext(text) as fn:\n df = dd.read_csv(fn, blocksize=30)\n assert df.amount.compute().dtype == float", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_consistent_dtypes_2_test_consistent_dtypes_2.with_filetexts_foo_1_cs.assert_df_name_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_consistent_dtypes_2_test_consistent_dtypes_2.with_filetexts_foo_1_cs.assert_df_name_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 546, "end_line": 568, "span_ids": ["test_consistent_dtypes_2"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_consistent_dtypes_2():\n text1 = normalize_text(\n \"\"\"\n name,amount\n Alice,100\n Bob,-200\n Charlie,300\n \"\"\"\n )\n\n text2 = normalize_text(\n \"\"\"\n name,amount\n 1,400\n 2,-500\n Frank,600\n \"\"\"\n )\n\n with filetexts({\"foo.1.csv\": text1, \"foo.2.csv\": text2}):\n df = dd.read_csv(\"foo.*.csv\", blocksize=25)\n assert df.name.dtype == object\n assert df.name.compute().dtype == object", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_dtypes_test_categorical_dtypes.with_filetexts_foo_1_cs.assert_sorted_res_fruit_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_dtypes_test_categorical_dtypes.with_filetexts_foo_1_cs.assert_sorted_res_fruit_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 571, "end_line": 598, "span_ids": ["test_categorical_dtypes"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_dtypes():\n text1 = normalize_text(\n \"\"\"\n fruit,count\n apple,10\n apple,25\n pear,100\n orange,15\n \"\"\"\n )\n\n text2 = normalize_text(\n \"\"\"\n fruit,count\n apple,200\n banana,300\n orange,400\n banana,10\n \"\"\"\n )\n\n with filetexts({\"foo.1.csv\": text1, \"foo.2.csv\": text2}):\n df = dd.read_csv(\"foo.*.csv\", dtype={\"fruit\": \"category\"}, blocksize=25)\n assert df.fruit.dtype == \"category\"\n assert not has_known_categories(df.fruit)\n res = df.compute()\n assert res.fruit.dtype == \"category\"\n assert sorted(res.fruit.cat.categories) == [\"apple\", \"banana\", \"orange\", \"pear\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_known_test_categorical_known.with_filetexts_foo_1_cs.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_categorical_known_test_categorical_known.with_filetexts_foo_1_cs.None_10", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 601, "end_line": 661, "span_ids": ["test_categorical_known"], "tokens": 512}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_known():\n text1 = normalize_text(\n \"\"\"\n A,B\n a,a\n b,b\n a,a\n \"\"\"\n )\n text2 = normalize_text(\n \"\"\"\n A,B\n a,a\n b,b\n c,c\n \"\"\"\n )\n dtype = pd.api.types.CategoricalDtype([\"a\", \"b\", \"c\"], ordered=False)\n with filetexts({\"foo.1.csv\": text1, \"foo.2.csv\": text2}):\n result = dd.read_csv(\"foo.*.csv\", dtype={\"A\": \"category\", \"B\": \"category\"})\n assert result.A.cat.known is False\n assert result.B.cat.known is False\n expected = pd.DataFrame(\n {\n \"A\": pd.Categorical(\n [\"a\", \"b\", \"a\", \"a\", \"b\", \"c\"], categories=dtype.categories\n ),\n \"B\": pd.Categorical(\n [\"a\", \"b\", \"a\", \"a\", \"b\", \"c\"], categories=dtype.categories\n ),\n },\n index=[0, 1, 2, 0, 1, 2],\n )\n assert_eq(result, expected)\n\n # Specify a dtype\n result = dd.read_csv(\"foo.*.csv\", dtype={\"A\": dtype, \"B\": \"category\"})\n assert result.A.cat.known is True\n assert result.B.cat.known is False\n tm.assert_index_equal(result.A.cat.categories, dtype.categories)\n assert result.A.cat.ordered is False\n assert_eq(result, expected)\n\n # ordered\n dtype = pd.api.types.CategoricalDtype([\"a\", \"b\", \"c\"], ordered=True)\n result = dd.read_csv(\"foo.*.csv\", dtype={\"A\": dtype, \"B\": \"category\"})\n expected[\"A\"] = expected[\"A\"].cat.as_ordered()\n assert result.A.cat.known is True\n assert result.B.cat.known is False\n assert result.A.cat.ordered is True\n\n assert_eq(result, expected)\n\n # Specify \"unknown\" categories\n result = dd.read_csv(\n \"foo.*.csv\", dtype=pd.api.types.CategoricalDtype(ordered=False)\n )\n assert result.A.cat.known is False\n\n result = dd.read_csv(\"foo.*.csv\", dtype=\"category\")\n assert result.A.cat.known is False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_compression_multiple_files_test_compression_multiple_files.with_tmpdir_as_tdir_.assert_len_df_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_compression_multiple_files_test_compression_multiple_files.with_tmpdir_as_tdir_.assert_len_df_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 682, "end_line": 697, "span_ids": ["test_compression_multiple_files"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.parametrize(\"compression\", [\"infer\", \"gzip\"])\ndef test_compression_multiple_files(compression):\n with tmpdir() as tdir:\n f = gzip.open(os.path.join(tdir, \"a.csv.gz\"), \"wb\")\n f.write(csv_text.encode())\n f.close()\n\n f = gzip.open(os.path.join(tdir, \"b.csv.gz\"), \"wb\")\n f.write(csv_text.encode())\n f.close()\n\n with pytest.warns(UserWarning):\n df = dd.read_csv(os.path.join(tdir, \"*.csv.gz\"), compression=compression)\n\n assert len(df.compute()) == (len(csv_text.split(\"\\n\")) - 1) * 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_empty_csv_file_test_read_csv_sensitive_to_enforce.with_filetexts_csv_files_.assert_a__name_b__name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_empty_csv_file_test_read_csv_sensitive_to_enforce.with_filetexts_csv_files_.assert_a__name_b__name", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 681, "end_line": 698, "span_ids": ["test_empty_csv_file", "test_read_csv_no_sample", "test_read_csv_sensitive_to_enforce"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_csv_file():\n with filetext(\"a,b\") as fn:\n df = dd.read_csv(fn, header=0)\n assert len(df.compute()) == 0\n assert list(df.columns) == [\"a\", \"b\"]\n\n\ndef test_read_csv_no_sample():\n with filetexts(csv_files, mode=\"b\") as fn:\n df = dd.read_csv(fn, sample=False)\n assert list(df.columns) == [\"name\", \"amount\", \"id\"]\n\n\ndef test_read_csv_sensitive_to_enforce():\n with filetexts(csv_files, mode=\"b\"):\n a = dd.read_csv(\"2014-01-*.csv\", enforce=True)\n b = dd.read_csv(\"2014-01-*.csv\", enforce=False)\n assert a._name != b._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_warn_non_seekable_files_test_warn_non_seekable_files.with_filetexts_files2_mo.with_pytest_raises_NotImp.with_pytest_warns_UserWar.df.dd_read_csv_2014_01_cs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_warn_non_seekable_files_test_warn_non_seekable_files.with_filetexts_files2_mo.with_pytest_raises_NotImp.with_pytest_warns_UserWar.df.dd_read_csv_2014_01_cs", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 767, "end_line": 787, "span_ids": ["test_warn_non_seekable_files"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skip\ndef test_warn_non_seekable_files():\n files2 = valmap(compress[\"gzip\"], csv_files)\n with filetexts(files2, mode=\"b\"):\n\n with pytest.warns(UserWarning) as w:\n df = dd.read_csv(\"2014-01-*.csv\", compression=\"gzip\")\n assert df.npartitions == 3\n\n assert len(w) == 1\n msg = str(w[0].message)\n assert \"gzip\" in msg\n assert \"blocksize=None\" in msg\n\n with warnings.catch_warnings(record=True) as record:\n df = dd.read_csv(\"2014-01-*.csv\", compression=\"gzip\", blocksize=None)\n assert not record\n\n with pytest.raises(NotImplementedError):\n with pytest.warns(UserWarning): # needed for pytest\n df = dd.read_csv(\"2014-01-*.csv\", compression=\"foo\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_windows_line_terminator_test_windows_line_terminator.with_filetext_text_as_fn.assert_df_a_sum_compute": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_windows_line_terminator_test_windows_line_terminator.with_filetext_text_as_fn.assert_df_a_sum_compute", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 742, "end_line": 747, "span_ids": ["test_windows_line_terminator"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_windows_line_terminator():\n text = \"a,b\\r\\n1,2\\r\\n2,3\\r\\n3,4\\r\\n4,5\\r\\n5,6\\r\\n6,7\"\n with filetext(text) as fn:\n df = dd.read_csv(fn, blocksize=5, lineterminator=\"\\r\\n\")\n assert df.b.sum().compute() == 2 + 3 + 4 + 5 + 6 + 7\n assert df.a.sum().compute() == 1 + 2 + 3 + 4 + 5 + 6", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_header_None_test_auto_blocksize_max64mb.assert_isinstance_blocksi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_header_None_test_auto_blocksize_max64mb.assert_isinstance_blocksi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 778, "end_line": 811, "span_ids": ["test_auto_blocksize", "test_header_None", "test_auto_blocksize_max64mb", "test__infer_block_size"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_header_None():\n with filetexts({\".tmp.1.csv\": \"1,2\", \".tmp.2.csv\": \"\", \".tmp.3.csv\": \"3,4\"}):\n df = dd.read_csv(\".tmp.*.csv\", header=None)\n expected = pd.DataFrame({0: [1, 3], 1: [2, 4]})\n assert_eq(df.compute().reset_index(drop=True), expected)\n\n\ndef test_auto_blocksize():\n assert isinstance(auto_blocksize(3000, 15), int)\n assert auto_blocksize(3000, 3) == 100\n assert auto_blocksize(5000, 2) == 250\n\n\ndef test__infer_block_size(monkeypatch):\n \"\"\"\n psutil returns a total memory of `None` on some systems\n see https://github.com/dask/dask/pull/7601\n \"\"\"\n psutil = pytest.importorskip(\"psutil\")\n\n class MockOutput:\n total = None\n\n def mock_virtual_memory():\n return MockOutput\n\n monkeypatch.setattr(psutil, \"virtual_memory\", mock_virtual_memory)\n assert _infer_block_size()\n\n\ndef test_auto_blocksize_max64mb():\n blocksize = auto_blocksize(1000000000000, 3)\n assert blocksize == int(64e6)\n assert isinstance(blocksize, int)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_auto_blocksize_csv_test_auto_blocksize_csv.with_filetexts_csv_files_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_auto_blocksize_csv_test_auto_blocksize_csv.with_filetexts_csv_files_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 769, "end_line": 780, "span_ids": ["test_auto_blocksize_csv"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_auto_blocksize_csv(monkeypatch):\n psutil = pytest.importorskip(\"psutil\")\n total_memory = psutil.virtual_memory().total\n cpu_count = psutil.cpu_count()\n mock_read_bytes = mock.Mock(wraps=read_bytes)\n monkeypatch.setattr(dask.dataframe.io.csv, \"read_bytes\", mock_read_bytes)\n\n expected_block_size = auto_blocksize(total_memory, cpu_count)\n with filetexts(csv_files, mode=\"b\"):\n dd.read_csv(\"2014-01-01.csv\")\n assert mock_read_bytes.called\n assert mock_read_bytes.call_args[1][\"blocksize\"] == expected_block_size", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_head_partial_line_fix_test_head_partial_line_fix.with_filetexts_files_.assert_df_dtypes_i8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_head_partial_line_fix_test_head_partial_line_fix.with_filetexts_files_.assert_df_dtypes_i8_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 783, "end_line": 798, "span_ids": ["test_head_partial_line_fix"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_head_partial_line_fix():\n files = {\n \".overflow1.csv\": (\n \"a,b\\n0,'abcdefghijklmnopqrstuvwxyz'\\n1,'abcdefghijklmnopqrstuvwxyz'\"\n ),\n \".overflow2.csv\": \"a,b\\n111111,-11111\\n222222,-22222\\n333333,-33333\\n\",\n }\n with filetexts(files):\n # 64 byte file, 52 characters is mid-quote; this should not cause exception in head-handling code.\n dd.read_csv(\".overflow1.csv\", sample=52)\n\n # 35 characters is cuts off before the second number on the last line\n # Should sample to end of line, otherwise pandas will infer `b` to be\n # a float dtype\n df = dd.read_csv(\".overflow2.csv\", sample=35)\n assert (df.dtypes == \"i8\").all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes_test_late_dtypes.date_msg._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes_test_late_dtypes.date_msg._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 850, "end_line": 869, "span_ids": ["test_late_dtypes"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_late_dtypes():\n text = \"numbers,names,more_numbers,integers,dates\\n\"\n for i in range(1000):\n text += \"1,,2,3,2017-10-31 00:00:00\\n\"\n text += \"1.5,bar,2.5,3,4998-01-01 00:00:00\\n\"\n\n date_msg = (\n \"\\n\"\n \"\\n\"\n \"-------------------------------------------------------------\\n\"\n \"\\n\"\n \"The following columns also failed to properly parse as dates:\\n\"\n \"\\n\"\n \"- dates\\n\"\n \"\\n\"\n \"This is usually due to an invalid value in that column. To\\n\"\n \"diagnose and fix it's recommended to drop these columns from the\\n\"\n \"`parse_dates` keyword, and manually convert them to dates later\\n\"\n \"using `dd.to_datetime`.\"\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes.with_filetext_text_as_fn_test_late_dtypes.with_filetext_text_as_fn.assert_eq_res_sol_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_late_dtypes.with_filetext_text_as_fn_test_late_dtypes.with_filetext_text_as_fn.assert_eq_res_sol_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 871, "end_line": 965, "span_ids": ["test_late_dtypes"], "tokens": 880}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_late_dtypes():\n # ... other code\n\n with filetext(text) as fn:\n sol = pd.read_csv(fn)\n msg = (\n \"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\\n\"\n \"\\n\"\n \"+--------------+---------+----------+\\n\"\n \"| Column | Found | Expected |\\n\"\n \"+--------------+---------+----------+\\n\"\n \"| more_numbers | float64 | int64 |\\n\"\n \"| names | object | float64 |\\n\"\n \"| numbers | float64 | int64 |\\n\"\n \"+--------------+---------+----------+\\n\"\n \"\\n\"\n \"- names\\n\"\n \" ValueError(.*)\\n\"\n \"\\n\"\n \"Usually this is due to dask's dtype inference failing, and\\n\"\n \"*may* be fixed by specifying dtypes manually by adding:\\n\"\n \"\\n\"\n \"dtype={'more_numbers': 'float64',\\n\"\n \" 'names': 'object',\\n\"\n \" 'numbers': 'float64'}\\n\"\n \"\\n\"\n \"to the call to `read_csv`/`read_table`.\"\n )\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(fn, sample=50, parse_dates=[\"dates\"]).compute(scheduler=\"sync\")\n assert e.match(msg + date_msg)\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(fn, sample=50).compute(scheduler=\"sync\")\n assert e.match(msg)\n\n msg = (\n \"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\\n\"\n \"\\n\"\n \"+--------------+---------+----------+\\n\"\n \"| Column | Found | Expected |\\n\"\n \"+--------------+---------+----------+\\n\"\n \"| more_numbers | float64 | int64 |\\n\"\n \"| numbers | float64 | int64 |\\n\"\n \"+--------------+---------+----------+\\n\"\n \"\\n\"\n \"Usually this is due to dask's dtype inference failing, and\\n\"\n \"*may* be fixed by specifying dtypes manually by adding:\\n\"\n \"\\n\"\n \"dtype={'more_numbers': 'float64',\\n\"\n \" 'numbers': 'float64'}\\n\"\n \"\\n\"\n \"to the call to `read_csv`/`read_table`.\\n\"\n \"\\n\"\n \"Alternatively, provide `assume_missing=True` to interpret\\n\"\n \"all unspecified integer columns as floats.\"\n )\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(fn, sample=50, dtype={\"names\": \"O\"}).compute(scheduler=\"sync\")\n assert str(e.value) == msg\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(\n fn, sample=50, parse_dates=[\"dates\"], dtype={\"names\": \"O\"}\n ).compute(scheduler=\"sync\")\n assert str(e.value) == msg + date_msg\n\n msg = (\n \"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\\n\"\n \"\\n\"\n \"The following columns failed to properly parse as dates:\\n\"\n \"\\n\"\n \"- dates\\n\"\n \"\\n\"\n \"This is usually due to an invalid value in that column. To\\n\"\n \"diagnose and fix it's recommended to drop these columns from the\\n\"\n \"`parse_dates` keyword, and manually convert them to dates later\\n\"\n \"using `dd.to_datetime`.\"\n )\n\n with pytest.raises(ValueError) as e:\n dd.read_csv(\n fn,\n sample=50,\n parse_dates=[\"dates\"],\n dtype={\"more_numbers\": float, \"names\": object, \"numbers\": float},\n ).compute(scheduler=\"sync\")\n assert str(e.value) == msg\n\n # Specifying dtypes works\n res = dd.read_csv(\n fn,\n sample=50,\n dtype={\"more_numbers\": float, \"names\": object, \"numbers\": float},\n )\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_assume_missing_test_assume_missing.None_3.assert_df_numbers_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_assume_missing_test_assume_missing.None_3.assert_df_numbers_dtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 968, "end_line": 1000, "span_ids": ["test_assume_missing"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assume_missing():\n text = \"numbers,names,more_numbers,integers\\n\"\n for i in range(1000):\n text += \"1,foo,2,3\\n\"\n text += \"1.5,bar,2.5,3\\n\"\n with filetext(text) as fn:\n sol = pd.read_csv(fn)\n\n # assume_missing affects all columns\n res = dd.read_csv(fn, sample=50, assume_missing=True)\n assert_eq(res, sol.astype({\"integers\": float}))\n\n # assume_missing doesn't override specified dtypes\n res = dd.read_csv(\n fn, sample=50, assume_missing=True, dtype={\"integers\": \"int64\"}\n )\n assert_eq(res, sol)\n\n # assume_missing works with dtype=None\n res = dd.read_csv(fn, sample=50, assume_missing=True, dtype=None)\n assert_eq(res, sol.astype({\"integers\": float}))\n\n text = \"numbers,integers\\n\"\n for i in range(1000):\n text += \"1,2\\n\"\n text += \"1.5,2\\n\"\n\n with filetext(text) as fn:\n sol = pd.read_csv(fn)\n\n # assume_missing ignored when all dtypes specifed\n df = dd.read_csv(fn, sample=30, dtype=\"int64\", assume_missing=True)\n assert df.numbers.dtype == \"int64\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_index_col_test_read_csv_with_datetime_index_partitions_one.with_filetext_timeseries_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_index_col_test_read_csv_with_datetime_index_partitions_one.with_filetext_timeseries_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1003, "end_line": 1027, "span_ids": ["test_index_col", "test_read_csv_with_datetime_index_partitions_one"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_col():\n with filetext(csv_text) as fn:\n try:\n dd.read_csv(fn, blocksize=30, index_col=\"name\")\n assert False\n except ValueError as e:\n assert \"set_index\" in str(e)\n\n\ndef test_read_csv_with_datetime_index_partitions_one():\n with filetext(timeseries) as fn:\n df = pd.read_csv(\n fn, index_col=0, header=0, usecols=[0, 4], parse_dates=[\"Date\"]\n )\n # blocksize set to explicitly set to single chunk\n ddf = dd.read_csv(\n fn, header=0, usecols=[0, 4], parse_dates=[\"Date\"], blocksize=10000000\n ).set_index(\"Date\")\n assert_eq(df, ddf)\n\n # because fn is so small, by default, this will only be one chunk\n ddf = dd.read_csv(fn, header=0, usecols=[0, 4], parse_dates=[\"Date\"]).set_index(\n \"Date\"\n )\n assert_eq(df, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_encoding_gh601_test_encoding_gh601.with_tmpfile_csv_as_f.assert_eq_d_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_encoding_gh601_test_encoding_gh601.with_tmpfile_csv_as_f.assert_eq_d_a_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1047, "end_line": 1069, "span_ids": ["test_encoding_gh601"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"encoding\",\n [\n pytest.param(\"utf-16\", marks=xfail_pandas_100),\n pytest.param(\"utf-16-le\", marks=xfail_pandas_100),\n \"utf-16-be\",\n ],\n)\ndef test_encoding_gh601(encoding):\n ar = pd.Series(range(0, 100))\n br = ar % 7\n cr = br * 3.3\n dr = br / 1.9836\n test_df = pd.DataFrame({\"a\": ar, \"b\": br, \"c\": cr, \"d\": dr})\n\n with tmpfile(\".csv\") as fn:\n test_df.to_csv(fn, encoding=encoding, index=False)\n\n a = pd.read_csv(fn, encoding=encoding)\n d = dd.read_csv(fn, encoding=encoding, blocksize=1000)\n d = d.compute()\n d.index = range(len(d.index))\n assert_eq(d, a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_header_issue_823_test_none_usecols.with_filetext_csv_text_a.assert_eq_df_pd_read_csv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_header_issue_823_test_none_usecols.with_filetext_csv_text_a.assert_eq_df_pd_read_csv", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1072, "end_line": 1085, "span_ids": ["test_read_csv_header_issue_823", "test_none_usecols"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_header_issue_823():\n text = \"\"\"a b c-d\\n1 2 3\\n4 5 6\"\"\".replace(\" \", \"\\t\")\n with filetext(text) as fn:\n df = dd.read_csv(fn, sep=\"\\t\")\n assert_eq(df, pd.read_csv(fn, sep=\"\\t\"))\n\n df = dd.read_csv(fn, delimiter=\"\\t\")\n assert_eq(df, pd.read_csv(fn, delimiter=\"\\t\"))\n\n\ndef test_none_usecols():\n with filetext(csv_text) as fn:\n df = dd.read_csv(fn, usecols=None)\n assert_eq(df, pd.read_csv(fn, usecols=None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_parse_dates_multi_column_test_parse_dates_multi_column.with_filetext_pdmc_text_.assert_len_df_len_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_parse_dates_multi_column_test_parse_dates_multi_column.with_filetext_pdmc_text_.assert_len_df_len_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1088, "end_line": 1113, "span_ids": ["test_parse_dates_multi_column"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_dates_multi_column():\n pdmc_text = normalize_text(\n \"\"\"\n ID,date,time\n 10,2003-11-04,180036\n 11,2003-11-05,125640\n 12,2003-11-01,2519\n 13,2003-10-22,142559\n 14,2003-10-24,163113\n 15,2003-10-20,170133\n 16,2003-11-11,160448\n 17,2003-11-03,171759\n 18,2003-11-07,190928\n 19,2003-10-21,84623\n 20,2003-10-25,192207\n 21,2003-11-13,180156\n 22,2003-11-15,131037\n \"\"\"\n )\n\n with filetext(pdmc_text) as fn:\n ddf = dd.read_csv(fn, parse_dates=[[\"date\", \"time\"]])\n df = pd.read_csv(fn, parse_dates=[[\"date\", \"time\"]])\n\n assert (df.columns == ddf.columns).all()\n assert len(df) == len(ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_error_if_sample_is_too_small_test_error_if_sample_is_too_small.None_1.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_error_if_sample_is_too_small_test_error_if_sample_is_too_small.None_1.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1163, "end_line": 1189, "span_ids": ["test_error_if_sample_is_too_small"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_error_if_sample_is_too_small():\n text = \"AAAAA,BBBBB,CCCCC,DDDDD,EEEEE\\n1,2,3,4,5\\n6,7,8,9,10\\n11,12,13,14,15\"\n with filetext(text) as fn:\n # Sample size stops mid header row\n sample = 20\n with pytest.raises(ValueError):\n dd.read_csv(fn, sample=sample)\n\n # Saying no header means this is fine\n assert_eq(\n dd.read_csv(fn, sample=sample, header=None), pd.read_csv(fn, header=None)\n )\n\n skiptext = \"# skip\\n# these\\n# lines\\n\"\n\n text = skiptext + text\n with filetext(text) as fn:\n # Sample size stops mid header row\n sample = 20 + len(skiptext)\n with pytest.raises(ValueError):\n dd.read_csv(fn, sample=sample, skiprows=3)\n\n # Saying no header means this is fine\n assert_eq(\n dd.read_csv(fn, sample=sample, header=None, skiprows=3),\n pd.read_csv(fn, header=None, skiprows=3),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_names_not_none_test_read_csv_names_not_none.with_filetext_text_as_fn.assert_eq_df_ddf_check_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_names_not_none_test_read_csv_names_not_none.with_filetext_text_as_fn.assert_eq_df_ddf_check_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1192, "end_line": 1205, "span_ids": ["test_read_csv_names_not_none"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_names_not_none():\n text = (\n \"Alice,100\\n\"\n \"Bob,-200\\n\"\n \"Charlie,300\\n\"\n \"Dennis,400\\n\"\n \"Edith,-500\\n\"\n \"Frank,600\\n\"\n )\n names = [\"name\", \"amount\"]\n with filetext(text) as fn:\n ddf = dd.read_csv(fn, names=names, blocksize=16)\n df = pd.read_csv(fn, names=names)\n assert_eq(df, ddf, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_multiple_files_cornercases_test_to_csv_multiple_files_cornercases.None_3.assert_eq_result_df16_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_multiple_files_cornercases_test_to_csv_multiple_files_cornercases.None_3.assert_eq_result_df16_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1236, "end_line": 1290, "span_ids": ["test_to_csv_multiple_files_cornercases"], "tokens": 482}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_multiple_files_cornercases():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n with pytest.raises(ValueError):\n fn = os.path.join(dn, \"data_*_*.csv\")\n a.to_csv(fn)\n\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n }\n )\n a = dd.from_pandas(df16, 16)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.csv\")\n a.to_csv(fn, index=False)\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df16)\n\n # test handling existing files when links are optimized out\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n a.to_csv(dn, index=False)\n fn = os.path.join(dn, \"data_*.csv\")\n a.to_csv(fn, mode=\"w\", index=False)\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df)\n\n # test handling existing files when links are optimized out\n a = dd.from_pandas(df16, 16)\n with tmpdir() as dn:\n a.to_csv(dn, index=False)\n fn = os.path.join(dn, \"data_*.csv\")\n a.to_csv(fn, mode=\"w\", index=False)\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df16)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_test_to_single_csv.for_npartitions_in_1_2_.None_1.assert_eq_result_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_test_to_single_csv.for_npartitions_in_1_2_.None_1.assert_eq_result_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1293, "end_line": 1309, "span_ids": ["test_to_single_csv"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_single_csv():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n\n for npartitions in [1, 2]:\n a = dd.from_pandas(df, npartitions)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv\")\n a.to_csv(fn, index=False, single_file=True)\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df)\n\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv\")\n r = a.to_csv(fn, index=False, compute=False, single_file=True)\n dask.compute(r, scheduler=\"sync\")\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_name_function_test_to_single_csv_with_name_function.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_fn_name_functio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_name_function_test_to_single_csv_with_name_function.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_fn_name_functio", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1312, "end_line": 1321, "span_ids": ["test_to_single_csv_with_name_function"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_single_csv_with_name_function():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n a = dd.from_pandas(df, 1)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv\")\n with pytest.raises(\n ValueError,\n match=\"name_function is not supported under the single file mode\",\n ):\n a.to_csv(fn, name_function=lambda x: x, index=False, single_file=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_header_first_partition_only_test_to_single_csv_with_header_first_partition_only.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_with_header_first_partition_only_test_to_single_csv_with_header_first_partition_only.with_tmpdir_as_dn_.with_pytest_raises_.a_to_csv_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1324, "end_line": 1335, "span_ids": ["test_to_single_csv_with_header_first_partition_only"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_single_csv_with_header_first_partition_only():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n a = dd.from_pandas(df, 1)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv\")\n with pytest.raises(\n ValueError,\n match=\"header_first_partition_only cannot be False in the single file mode.\",\n ):\n a.to_csv(\n fn, index=False, header_first_partition_only=False, single_file=True\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_gzip_test_to_single_csv_gzip.for_npartitions_in_1_2_.with_tmpdir_as_dn_.assert_eq_result_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_single_csv_gzip_test_to_single_csv_gzip.for_npartitions_in_1_2_.with_tmpdir_as_dn_.assert_eq_result_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1338, "end_line": 1347, "span_ids": ["test_to_single_csv_gzip"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_single_csv_gzip():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n\n for npartitions in [1, 2]:\n a = dd.from_pandas(df, npartitions)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"test.csv.gz\")\n a.to_csv(fn, index=False, compression=\"gzip\", single_file=True)\n result = pd.read_csv(fn, compression=\"gzip\").reset_index(drop=True)\n assert_eq(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_gzip_test_to_csv_gzip.for_npartitions_in_1_2_.with_tmpfile_csv_as_fn.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_gzip_test_to_csv_gzip.for_npartitions_in_1_2_.with_tmpfile_csv_as_fn.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1350, "end_line": 1361, "span_ids": ["test_to_csv_gzip"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"to_csv does not support compression\")\ndef test_to_csv_gzip():\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n for npartitions in [1, 2]:\n a = dd.from_pandas(df, npartitions)\n with tmpfile(\"csv\") as fn:\n a.to_csv(fn, compression=\"gzip\")\n result = pd.read_csv(fn, index_col=0, compression=\"gzip\")\n tm.assert_frame_equal(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_nodir_test_to_csv_nodir.assert_result_x_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_nodir_test_to_csv_nodir.assert_result_x_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1364, "end_line": 1376, "span_ids": ["test_to_csv_nodir"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_nodir():\n # See #6062 https://github.com/intake/filesystem_spec/pull/271 and\n df0 = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n df = dd.from_pandas(df0, npartitions=2)\n with tmpdir() as dir:\n dir0 = os.path.join(str(dir), \"createme\")\n df.to_csv(dir0)\n assert \"createme\" in os.listdir(dir)\n assert os.listdir(dir0)\n result = dd.read_csv(os.path.join(dir0, \"*\")).compute()\n assert (result.x.values == df0.x.values).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_simple_test_to_csv_simple.assert_result_x_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_simple_test_to_csv_simple.assert_result_x_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1379, "end_line": 1389, "span_ids": ["test_to_csv_simple"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_simple():\n df0 = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n df = dd.from_pandas(df0, npartitions=2)\n with tmpdir() as dir:\n dir = str(dir)\n df.to_csv(dir)\n assert os.listdir(dir)\n result = dd.read_csv(os.path.join(dir, \"*\")).compute()\n assert (result.x.values == df0.x.values).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_series_test_to_csv_series.assert_result_x_df0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_series_test_to_csv_series.assert_result_x_df0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1392, "end_line": 1400, "span_ids": ["test_to_csv_series"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_series():\n df0 = pd.Series([\"a\", \"b\", \"c\", \"d\"], index=[1.0, 2.0, 3.0, 4.0])\n df = dd.from_pandas(df0, npartitions=2)\n with tmpdir() as dir:\n dir = str(dir)\n df.to_csv(dir, header=False)\n assert os.listdir(dir)\n result = dd.read_csv(os.path.join(dir, \"*\"), header=None, names=[\"x\"]).compute()\n assert (result.x == df0).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_with_get_test_to_csv_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_with_get_test_to_csv_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1403, "end_line": 1419, "span_ids": ["test_to_csv_with_get"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_with_get():\n from dask.multiprocessing import get as mp_get\n\n flag = [False]\n\n def my_get(*args, **kwargs):\n flag[0] = True\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n with tmpdir() as dn:\n ddf.to_csv(dn, index=False, compute_kwargs={\"scheduler\": my_get})\n assert flag[0]\n result = dd.read_csv(os.path.join(dn, \"*\"))\n assert_eq(result, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_warns_using_scheduler_argument_test_to_csv_warns_using_scheduler_argument.with_tmpdir_as_dn_.with_pytest_warns_FutureW.ddf_to_csv_dn_index_Fals": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_warns_using_scheduler_argument_test_to_csv_warns_using_scheduler_argument.with_tmpdir_as_dn_.with_pytest_warns_FutureW.ddf_to_csv_dn_index_Fals", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1422, "end_line": 1433, "span_ids": ["test_to_csv_warns_using_scheduler_argument"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_warns_using_scheduler_argument():\n from dask.multiprocessing import get as mp_get\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n def my_get(*args, **kwargs):\n return mp_get(*args, **kwargs)\n\n with tmpdir() as dn:\n with pytest.warns(FutureWarning):\n ddf.to_csv(dn, index=False, scheduler=my_get)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_errors_using_multiple_scheduler_args_test_to_csv_errors_using_multiple_scheduler_args.with_tmpdir_as_dn_.with_pytest_raises_ValueE.ddf_to_csv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_errors_using_multiple_scheduler_args_test_to_csv_errors_using_multiple_scheduler_args.with_tmpdir_as_dn_.with_pytest_raises_ValueE.ddf_to_csv_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1436, "end_line": 1449, "span_ids": ["test_to_csv_errors_using_multiple_scheduler_args"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_errors_using_multiple_scheduler_args():\n from dask.multiprocessing import get as mp_get\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n def my_get(*args, **kwargs):\n return mp_get(*args, **kwargs)\n\n with tmpdir() as dn:\n with pytest.raises(ValueError) and pytest.warns(FutureWarning):\n ddf.to_csv(\n dn, index=False, scheduler=my_get, compute_kwargs={\"scheduler\": my_get}\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_keeps_all_non_scheduler_compute_kwargs_test_to_csv_paths.os_remove_foo1_csv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_keeps_all_non_scheduler_compute_kwargs_test_to_csv_paths.os_remove_foo1_csv_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1452, "end_line": 1478, "span_ids": ["test_to_csv_paths", "test_to_csv_keeps_all_non_scheduler_compute_kwargs"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_keeps_all_non_scheduler_compute_kwargs():\n from dask.multiprocessing import get as mp_get\n\n def my_get(*args, **kwargs):\n assert kwargs[\"test_kwargs_passed\"] == \"foobar\"\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n with tmpdir() as dn:\n ddf.to_csv(\n dn,\n index=False,\n compute_kwargs={\"scheduler\": my_get, \"test_kwargs_passed\": \"foobar\"},\n )\n\n\ndef test_to_csv_paths():\n df = pd.DataFrame({\"A\": range(10)})\n ddf = dd.from_pandas(df, npartitions=2)\n paths = ddf.to_csv(\"foo*.csv\")\n assert paths[0].endswith(\"foo0.csv\")\n assert paths[1].endswith(\"foo1.csv\")\n\n os.remove(\"foo0.csv\")\n os.remove(\"foo1.csv\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_empty_dataframe_test_to_csv_header_empty_dataframe.with_tmpdir_as_dn_.os_remove_filename_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_empty_dataframe_test_to_csv_header_empty_dataframe.with_tmpdir_as_dn_.os_remove_filename_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1571, "end_line": 1583, "span_ids": ["test_to_csv_header_empty_dataframe"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"header, expected\", [(False, \"\"), (True, \"x,y\\n\")])\ndef test_to_csv_header_empty_dataframe(header, expected):\n dfe = pd.DataFrame({\"x\": [], \"y\": []})\n ddfe = dd.from_pandas(dfe, npartitions=1)\n\n with tmpdir() as dn:\n ddfe.to_csv(os.path.join(dn, \"fooe*.csv\"), index=False, header=header)\n assert not os.path.exists(os.path.join(dn, \"fooe1.csv\"))\n filename = os.path.join(dn, \"fooe0.csv\")\n with open(filename) as fp:\n line = fp.readline()\n assert line == expected\n os.remove(filename)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_test_to_csv_header.with_tmpdir_as_dn_.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_header_test_to_csv_header.with_tmpdir_as_dn_.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1586, "end_line": 1623, "span_ids": ["test_to_csv_header"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"header,header_first_partition_only,expected_first,expected_next\",\n [\n (False, False, \"a,1\\n\", \"d,4\\n\"),\n (True, False, \"x,y\\n\", \"x,y\\n\"),\n (False, True, \"a,1\\n\", \"d,4\\n\"),\n (True, True, \"x,y\\n\", \"d,4\\n\"),\n ([\"aa\", \"bb\"], False, \"aa,bb\\n\", \"aa,bb\\n\"),\n ([\"aa\", \"bb\"], True, \"aa,bb\\n\", \"d,4\\n\"),\n ],\n)\ndef test_to_csv_header(\n header, header_first_partition_only, expected_first, expected_next\n):\n partition_count = 2\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"], \"y\": [1, 2, 3, 4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=partition_count)\n\n with tmpdir() as dn:\n # Test NO header case\n # (header=False, header_first_chunk_only not passed)\n ddf.to_csv(\n os.path.join(dn, \"fooa*.csv\"),\n index=False,\n header=header,\n header_first_partition_only=header_first_partition_only,\n )\n filename = os.path.join(dn, \"fooa0.csv\")\n with open(filename) as fp:\n line = fp.readline()\n assert line == expected_first\n os.remove(filename)\n\n filename = os.path.join(dn, \"fooa1.csv\")\n with open(filename) as fp:\n line = fp.readline()\n assert line == expected_next\n os.remove(filename)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_pd_test_make_timeseries.assert_a__name_e__name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_pd_test_make_timeseries.assert_a__name_e__name", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_demo.py", "file_name": "test_demo.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 72, "span_ids": ["imports", "test_make_timeseries"], "tokens": 598}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.blockwise import Blockwise, optimize_blockwise\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.optimize import optimize_dataframe_getitem\n\n\ndef test_make_timeseries():\n df = dd.demo.make_timeseries(\n \"2000\", \"2015\", {\"A\": float, \"B\": int, \"C\": str}, freq=\"2D\", partition_freq=\"6M\"\n )\n\n assert df.divisions[0] == pd.Timestamp(\"2000-01-31\")\n assert df.divisions[-1] == pd.Timestamp(\"2014-07-31\")\n tm.assert_index_equal(df.columns, pd.Index([\"A\", \"B\", \"C\"]))\n assert df[\"A\"].head().dtype == float\n assert df[\"B\"].head().dtype == int\n assert df[\"C\"].head().dtype == object\n assert df.index.name == \"timestamp\"\n assert df.head().index.name == df.index.name\n assert df.divisions == tuple(pd.date_range(start=\"2000\", end=\"2015\", freq=\"6M\"))\n\n tm.assert_frame_equal(df.head(), df.head())\n\n a = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"2D\",\n partition_freq=\"6M\",\n seed=123,\n )\n b = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"2D\",\n partition_freq=\"6M\",\n seed=123,\n )\n c = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"2D\",\n partition_freq=\"6M\",\n seed=456,\n )\n d = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"2D\",\n partition_freq=\"3M\",\n seed=123,\n )\n e = dd.demo.make_timeseries(\n \"2000\",\n \"2015\",\n {\"A\": float, \"B\": int, \"C\": str},\n freq=\"1D\",\n partition_freq=\"6M\",\n seed=123,\n )\n tm.assert_frame_equal(a.head(), b.head())\n assert not (a.head(10) == c.head(10)).all().all()\n assert a._name == b._name\n assert a._name != c._name\n assert a._name != d._name\n assert a._name != e._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_fancy_keywords_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_fancy_keywords_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_demo.py", "file_name": "test_demo.py", "file_type": "text/x-python", "category": "test", "start_line": 154, "end_line": 181, "span_ids": ["test_make_timeseries_fancy_keywords", "test_make_timeseries_getitem_compute"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_make_timeseries_fancy_keywords():\n df = dd.demo.make_timeseries(\n \"2000\",\n \"2001\",\n {\"A_B\": int, \"B_\": int, \"C\": str},\n freq=\"1D\",\n partition_freq=\"6M\",\n A_B_lam=1000000,\n B__lam=2,\n )\n a_cardinality = df.A_B.nunique()\n b_cardinality = df.B_.nunique()\n\n aa, bb = dask.compute(a_cardinality, b_cardinality, scheduler=\"single-threaded\")\n\n assert 100 < aa <= 10000000\n assert 1 < bb <= 100\n\n\ndef test_make_timeseries_getitem_compute():\n # See https://github.com/dask/dask/issues/7692\n\n df = dd.demo.make_timeseries()\n df2 = df[df.y > 0]\n df3 = df2.compute()\n assert df3[\"y\"].min() > 0\n assert list(df.columns) == list(df3.columns)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_link_optimizations_test_to_hdf_link_optimizations.None_2.assert_dependency_depth_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_link_optimizations_test_to_hdf_link_optimizations.None_2.assert_dependency_depth_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 302, "end_line": 367, "span_ids": ["test_to_hdf_link_optimizations"], "tokens": 547}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_link_optimizations():\n \"\"\"testing dask link levels is correct by calculating the depth of the dask graph\"\"\"\n pytest.importorskip(\"tables\")\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df16, 16)\n\n # saving to multiple hdf files, no links are needed\n # expected layers: from_pandas, to_hdf, list = depth of 3\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n d = a.to_hdf(fn, \"/data\", compute=False)\n assert dependency_depth(d.dask) == 3\n\n # saving to a single hdf file with multiple nodes\n # all subsequent nodes depend on the first\n # expected layers: from_pandas, first to_hdf(creates file+node), subsequent to_hdfs, list = 4\n with tmpfile() as fn:\n d = a.to_hdf(fn, \"/data*\", compute=False)\n assert dependency_depth(d.dask) == 4\n\n # saving to a single hdf file with a single node\n # every node depends on the previous node\n # expected layers: from_pandas, to_hdf times npartitions(15), list = 2 + npartitions = 17\n with tmpfile() as fn:\n d = a.to_hdf(fn, \"/data\", compute=False)\n assert dependency_depth(d.dask) == 2 + a.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_lock_delays_test_to_hdf_lock_delays.with_tmpdir_as_dn_.assert_eq_df16_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_lock_delays_test_to_hdf_lock_delays.with_tmpdir_as_dn_.assert_eq_df16_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 370, "end_line": 437, "span_ids": ["test_to_hdf_lock_delays"], "tokens": 487}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_to_hdf_lock_delays():\n pytest.importorskip(\"tables\")\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df16, 16)\n\n # adding artificial delays to make sure last tasks finish first\n # that's a way to simulate last tasks finishing last\n def delayed_nop(i):\n if i[1] < 10:\n sleep(0.1 * (10 - i[1]))\n return i\n\n # saving to multiple hdf nodes\n with tmpfile() as fn:\n a = a.apply(delayed_nop, axis=1, meta=a)\n a.to_hdf(fn, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df16, out)\n\n # saving to multiple hdf files\n # adding artificial delays to make sure last tasks finish first\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a = a.apply(delayed_nop, axis=1, meta=a)\n a.to_hdf(fn, \"/data\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df16, out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_exceptions_test_to_hdf_exceptions.with_tmpfile_as_fn_.with_pd_HDFStore_fn_as_h.with_pytest_raises_ValueE.a_to_hdf_hdf_data____": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_exceptions_test_to_hdf_exceptions.with_tmpfile_as_fn_.with_pd_HDFStore_fn_as_h.with_pytest_raises_ValueE.a_to_hdf_hdf_data____", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 440, "end_line": 457, "span_ids": ["test_to_hdf_exceptions"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_exceptions():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n a = dd.from_pandas(df, 1)\n\n # triggering too many asterisks error\n with tmpdir() as dn:\n with pytest.raises(ValueError):\n fn = os.path.join(dn, \"data_*.h5\")\n a.to_hdf(fn, \"/data_*\")\n\n # triggering too many asterisks error\n with tmpfile() as fn:\n with pd.HDFStore(fn) as hdf:\n with pytest.raises(ValueError):\n a.to_hdf(hdf, \"/data_*_*\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_schedulers_test_to_hdf_schedulers.None_2.assert_eq_df_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_schedulers_test_to_hdf_schedulers.None_2.assert_eq_df_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 460, "end_line": 524, "span_ids": ["test_to_hdf_schedulers"], "tokens": 477}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"scheduler\", [\"sync\", \"threads\", \"processes\"])\n@pytest.mark.parametrize(\"npartitions\", [1, 4, 10])\ndef test_to_hdf_schedulers(scheduler, npartitions):\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df, npartitions=npartitions)\n\n # test single file single node\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data\", scheduler=scheduler)\n out = pd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n # test multiple files single node\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.h5\")\n a.to_hdf(fn, \"/data\", scheduler=scheduler)\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n # test single file multiple nodes\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data*\", scheduler=scheduler)\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_kwargs_test_to_hdf_kwargs.None_1.tm_assert_frame_equal_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_kwargs_test_to_hdf_kwargs.None_1.tm_assert_frame_equal_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 527, "end_line": 540, "span_ids": ["test_to_hdf_kwargs"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_kwargs():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame({\"A\": [\"a\", \"aaaa\"]})\n ddf = dd.from_pandas(df, npartitions=2)\n with tmpfile(\"h5\") as fn:\n ddf.to_hdf(fn, \"foo4\", format=\"table\", min_itemsize=4)\n df2 = pd.read_hdf(fn, \"foo4\")\n tm.assert_frame_equal(df, df2)\n\n # test shorthand 't' for table\n with tmpfile(\"h5\") as fn:\n ddf.to_hdf(fn, \"foo4\", format=\"t\", min_itemsize=4)\n df2 = pd.read_hdf(fn, \"foo4\")\n tm.assert_frame_equal(df, df2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_test_read_hdf.None_2.compare_a_compute_sort": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_test_read_hdf.None_2.compare_a_compute_sort", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 600, "end_line": 645, "span_ids": ["test_read_hdf"], "tokens": 423}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"data, compare\",\n [\n (\n pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]},\n index=[1.0, 2.0, 3.0, 4.0],\n ),\n tm.assert_frame_equal,\n ),\n (pd.Series([1, 2, 3, 4], name=\"a\"), tm.assert_series_equal),\n ],\n)\ndef test_read_hdf(data, compare):\n pytest.importorskip(\"tables\")\n with tmpfile(\"h5\") as fn:\n data.to_hdf(fn, \"/data\")\n try:\n dd.read_hdf(fn, \"data\", chunksize=2, mode=\"r\")\n assert False\n except TypeError as e:\n assert \"format='table'\" in str(e)\n\n with tmpfile(\"h5\") as fn:\n data.to_hdf(fn, \"/data\", format=\"table\")\n a = dd.read_hdf(fn, \"/data\", chunksize=2, mode=\"r\")\n assert a.npartitions == 2\n\n compare(a.compute(), data)\n\n compare(\n dd.read_hdf(fn, \"/data\", chunksize=2, start=1, stop=3, mode=\"r\").compute(),\n pd.read_hdf(fn, \"/data\", start=1, stop=3),\n )\n\n assert sorted(dd.read_hdf(fn, \"/data\", mode=\"r\").dask) == sorted(\n dd.read_hdf(fn, \"/data\", mode=\"r\").dask\n )\n\n with tmpfile(\"h5\") as fn:\n sorted_data = data.sort_index()\n sorted_data.to_hdf(fn, \"/data\", format=\"table\")\n a = dd.read_hdf(fn, \"/data\", chunksize=2, sorted_index=True, mode=\"r\")\n assert a.npartitions == 2\n\n compare(a.compute(), sorted_data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiply_open_test_read_hdf_multiply_open.with_tmpfile_h5_as_fn_.with_pd_HDFStore_fn_mode.dd_read_hdf_fn_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiply_open_test_read_hdf_multiply_open.with_tmpfile_h5_as_fn_.with_pd_HDFStore_fn_mode.dd_read_hdf_fn_data_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 648, "end_line": 658, "span_ids": ["test_read_hdf_multiply_open"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_hdf_multiply_open():\n \"\"\"Test that we can read from a file that's already opened elsewhere in\n read-only mode.\"\"\"\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n with tmpfile(\"h5\") as fn:\n df.to_hdf(fn, \"/data\", format=\"table\")\n with pd.HDFStore(fn, mode=\"r\"):\n dd.read_hdf(fn, \"/data\", chunksize=2, mode=\"r\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiple_test_read_hdf_multiple.with_tmpfile_h5_as_fn_.assert_eq_a_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_multiple_test_read_hdf_multiple.with_tmpfile_h5_as_fn_.assert_eq_a_r_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 661, "end_line": 711, "span_ids": ["test_read_hdf_multiple"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_hdf_multiple():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df, 16)\n\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data*\")\n r = dd.read_hdf(fn, \"/data*\", sorted_index=True)\n assert a.npartitions == r.npartitions\n assert a.divisions == r.divisions\n assert_eq(a, r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_start_stop_values_test_read_hdf_start_stop_values.with_tmpfile_h5_as_fn_.None_2.dd_read_hdf_fn_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_start_stop_values_test_read_hdf_start_stop_values.with_tmpfile_h5_as_fn_.None_2.dd_read_hdf_fn_data_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 714, "end_line": 729, "span_ids": ["test_read_hdf_start_stop_values"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_hdf_start_stop_values():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n with tmpfile(\"h5\") as fn:\n df.to_hdf(fn, \"/data\", format=\"table\")\n\n with pytest.raises(ValueError, match=\"number of rows\"):\n dd.read_hdf(fn, \"/data\", stop=10)\n\n with pytest.raises(ValueError, match=\"is above or equal to\"):\n dd.read_hdf(fn, \"/data\", start=10)\n\n with pytest.raises(ValueError, match=\"positive integer\"):\n dd.read_hdf(fn, \"/data\", chunksize=-1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_globbing_test_hdf_globbing.with_tmpdir_as_tdir_.with_dask_config_set_sche.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_globbing_test_hdf_globbing.with_tmpdir_as_tdir_.with_dask_config_set_sche.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 732, "end_line": 766, "span_ids": ["test_hdf_globbing"], "tokens": 469}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hdf_globbing():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n with tmpdir() as tdir:\n df.to_hdf(os.path.join(tdir, \"one.h5\"), \"/foo/data\", format=\"table\")\n df.to_hdf(os.path.join(tdir, \"two.h5\"), \"/bar/data\", format=\"table\")\n df.to_hdf(os.path.join(tdir, \"two.h5\"), \"/foo/data\", format=\"table\")\n\n with dask.config.set(scheduler=\"sync\"):\n res = dd.read_hdf(os.path.join(tdir, \"one.h5\"), \"/*/data\", chunksize=2)\n assert res.npartitions == 2\n tm.assert_frame_equal(res.compute(), df)\n\n res = dd.read_hdf(\n os.path.join(tdir, \"one.h5\"), \"/*/data\", chunksize=2, start=1, stop=3\n )\n expected = pd.read_hdf(\n os.path.join(tdir, \"one.h5\"), \"/foo/data\", start=1, stop=3\n )\n tm.assert_frame_equal(res.compute(), expected)\n\n res = dd.read_hdf(os.path.join(tdir, \"two.h5\"), \"/*/data\", chunksize=2)\n assert res.npartitions == 2 + 2\n tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))\n\n res = dd.read_hdf(os.path.join(tdir, \"*.h5\"), \"/foo/data\", chunksize=2)\n assert res.npartitions == 2 + 2\n tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))\n\n res = dd.read_hdf(os.path.join(tdir, \"*.h5\"), \"/*/data\", chunksize=2)\n assert res.npartitions == 2 + 2 + 2\n tm.assert_frame_equal(res.compute(), pd.concat([df] * 3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_file_list_test_hdf_file_list.with_tmpdir_as_tdir_.with_dask_config_set_sche.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_file_list_test_hdf_file_list.with_tmpdir_as_tdir_.with_dask_config_set_sche.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 769, "end_line": 782, "span_ids": ["test_hdf_file_list"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hdf_file_list():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n with tmpdir() as tdir:\n df.iloc[:2].to_hdf(os.path.join(tdir, \"one.h5\"), \"dataframe\", format=\"table\")\n df.iloc[2:].to_hdf(os.path.join(tdir, \"two.h5\"), \"dataframe\", format=\"table\")\n\n with dask.config.set(scheduler=\"sync\"):\n input_files = [os.path.join(tdir, \"one.h5\"), os.path.join(tdir, \"two.h5\")]\n res = dd.read_hdf(input_files, \"dataframe\")\n tm.assert_frame_equal(res.compute(), df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_pattern_pathlike_test_read_hdf_pattern_pathlike.with_tmpfile_h5_as_fn_.assert_eq_res_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_read_hdf_pattern_pathlike_test_read_hdf_pattern_pathlike.with_tmpfile_h5_as_fn_.assert_eq_res_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 785, "end_line": 795, "span_ids": ["test_read_hdf_pattern_pathlike"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_hdf_pattern_pathlike():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n with tmpfile(\"h5\") as fn:\n path = pathlib.Path(fn)\n df.to_hdf(path, \"dataframe\", format=\"table\")\n res = dd.read_hdf(path, \"dataframe\")\n assert_eq(res, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_path_pathlike_test_read_hdf_doesnt_segfault.with_tmpfile_h5_as_fn_.assert_len_ddf_N": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_path_pathlike_test_read_hdf_doesnt_segfault.with_tmpfile_h5_as_fn_.assert_len_ddf_N", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 798, "end_line": 821, "span_ids": ["test_to_hdf_path_pathlike", "test_read_hdf_doesnt_segfault"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_path_pathlike():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n with tmpfile(\"h5\") as fn:\n path = pathlib.Path(fn)\n ddf.to_hdf(path, \"/data\")\n res = pd.read_hdf(path, \"/data\")\n assert_eq(res, ddf)\n\n\ndef test_read_hdf_doesnt_segfault():\n pytest.importorskip(\"tables\")\n with tmpfile(\"h5\") as fn:\n N = 40\n df = pd.DataFrame(np.random.randn(N, 3))\n with pd.HDFStore(fn, mode=\"w\") as store:\n store.append(\"/x\", df)\n\n ddf = dd.read_hdf(fn, \"/x\", chunksize=2)\n assert len(ddf) == N", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_filenames_test_hdf_filenames.os_remove_foo1_hdf5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_filenames_test_hdf_filenames.os_remove_foo1_hdf5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 824, "end_line": 832, "span_ids": ["test_hdf_filenames"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hdf_filenames():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert ddf.to_hdf(\"foo*.hdf5\", \"key\") == [\"foo0.hdf5\", \"foo1.hdf5\"]\n os.remove(\"foo0.hdf5\")\n os.remove(\"foo1.hdf5\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_path_exceptions_test_hdf_path_exceptions.with_pytest_raises_ValueE.dd_read_hdf_tmp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_path_exceptions_test_hdf_path_exceptions.with_pytest_raises_ValueE.dd_read_hdf_tmp_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 835, "end_line": 847, "span_ids": ["test_hdf_path_exceptions"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hdf_path_exceptions():\n\n # single file doesn't exist\n with pytest.raises(IOError):\n dd.read_hdf(\"nonexistant_store_X34HJK\", \"/tmp\")\n\n # a file from a list of files doesn't exist\n with pytest.raises(IOError):\n dd.read_hdf([\"nonexistant_store_X34HJK\", \"nonexistant_store_UY56YH\"], \"/tmp\")\n\n # list of files is empty\n with pytest.raises(ValueError):\n dd.read_hdf([], \"/tmp\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_nonpandas_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_hdf_nonpandas_keys_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 849, "end_line": 889, "span_ids": ["test_hdf_nonpandas_keys"], "tokens": 360}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hdf_nonpandas_keys():\n # https://github.com/dask/dask/issues/5934\n # TODO: maybe remove this if/when pandas copes with all keys\n\n tables = pytest.importorskip(\"tables\")\n import tables\n\n class Table1(tables.IsDescription):\n value1 = tables.Float32Col()\n\n class Table2(tables.IsDescription):\n value2 = tables.Float32Col()\n\n class Table3(tables.IsDescription):\n value3 = tables.Float32Col()\n\n with tmpfile(\"h5\") as path:\n with tables.open_file(path, mode=\"a\") as h5file:\n group = h5file.create_group(\"/\", \"group\")\n t = h5file.create_table(group, \"table1\", Table1, \"Table 1\")\n row = t.row\n row[\"value1\"] = 1\n row.append()\n t = h5file.create_table(group, \"table2\", Table2, \"Table 2\")\n row = t.row\n row[\"value2\"] = 1\n row.append()\n t = h5file.create_table(group, \"table3\", Table3, \"Table 3\")\n row = t.row\n row[\"value3\"] = 1\n row.append()\n\n # pandas keys should still work\n bar = pd.DataFrame(np.random.randn(10, 4))\n bar.to_hdf(path, \"/bar\", format=\"table\", mode=\"a\")\n\n dd.read_hdf(path, \"/group/table1\")\n dd.read_hdf(path, \"/group/table2\")\n dd.read_hdf(path, \"/group/table3\")\n dd.read_hdf(path, \"/bar\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_1darray_test_meta_from_1darray.with_pytest_raises_ValueE._meta_from_array_x_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_1darray_test_meta_from_1darray.with_pytest_raises_ValueE._meta_from_array_x_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 48, "end_line": 67, "span_ids": ["test_meta_from_1darray"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_from_1darray():\n x = np.array([1.0, 2.0, 3.0], dtype=np.float64)\n res = _meta_from_array(x)\n assert isinstance(res, pd.Series)\n assert res.dtype == np.float64\n\n x = np.array([1, 2, 3], dtype=np.object_)\n res = _meta_from_array(x, columns=\"x\")\n assert isinstance(res, pd.Series)\n assert res.name == \"x\"\n assert res.dtype == np.object_\n\n x = np.array([1, 2, 3], dtype=np.object_)\n res = _meta_from_array(x, columns=[\"x\"])\n assert isinstance(res, pd.DataFrame)\n assert res[\"x\"].dtype == np.object_\n tm.assert_index_equal(res.columns, pd.Index([\"x\"]))\n\n with pytest.raises(ValueError):\n _meta_from_array(x, columns=[\"a\", \"b\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_recarray_test_meta_from_recarray.with_pytest_raises_ValueE._meta_from_array_x_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_meta_from_recarray_test_meta_from_recarray.with_pytest_raises_ValueE._meta_from_array_x_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 70, "end_line": 87, "span_ids": ["test_meta_from_recarray"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_from_recarray():\n x = np.array(\n [(i, i * 10) for i in range(10)], dtype=[(\"a\", np.float64), (\"b\", np.int64)]\n )\n res = _meta_from_array(x)\n assert isinstance(res, pd.DataFrame)\n assert res[\"a\"].dtype == np.float64\n assert res[\"b\"].dtype == np.int64\n tm.assert_index_equal(res.columns, pd.Index([\"a\", \"b\"]))\n\n res = _meta_from_array(x, columns=[\"b\", \"a\"])\n assert isinstance(res, pd.DataFrame)\n assert res[\"a\"].dtype == np.float64\n assert res[\"b\"].dtype == np.int64\n tm.assert_index_equal(res.columns, pd.Index([\"b\", \"a\"]))\n\n with pytest.raises(ValueError):\n _meta_from_array(x, columns=[\"a\", \"b\", \"c\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_test_from_array.with_pytest_raises_ValueE.dd_from_array_np_ones_sha": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_test_from_array.with_pytest_raises_ValueE.dd_from_array_np_ones_sha", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 105, "span_ids": ["test_from_array"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array():\n x = np.arange(10 * 3).reshape(10, 3)\n d = dd.from_array(x, chunksize=4)\n assert isinstance(d, dd.DataFrame)\n tm.assert_index_equal(d.columns, pd.Index([0, 1, 2]))\n assert d.divisions == (0, 4, 8, 9)\n assert (d.compute().values == x).all()\n\n d = dd.from_array(x, chunksize=4, columns=list(\"abc\"))\n assert isinstance(d, dd.DataFrame)\n tm.assert_index_equal(d.columns, pd.Index([\"a\", \"b\", \"c\"]))\n assert d.divisions == (0, 4, 8, 9)\n assert (d.compute().values == x).all()\n\n with pytest.raises(ValueError):\n dd.from_array(np.ones(shape=(10, 10, 10)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_no_lock_test_from_bcolz_no_lock.assert_not_any_isinstance": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_no_lock_test_from_bcolz_no_lock.assert_not_any_isinstance", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 191, "end_line": 207, "span_ids": ["test_from_bcolz_no_lock"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz_no_lock():\n bcolz = pytest.importorskip(\"bcolz\")\n locktype = type(Lock())\n\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"], chunklen=2\n )\n\n with check_bcolz_deprecation_warning():\n a = dd.from_bcolz(t, chunksize=2)\n b = dd.from_bcolz(t, chunksize=2, lock=True)\n c = dd.from_bcolz(t, chunksize=2, lock=False)\n\n assert_eq(a, b)\n assert_eq(a, c)\n\n assert not any(isinstance(item, locktype) for v in c.dask.values() for item in v)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_filename_test_from_bcolz_filename.with_tmpfile_bcolz_as.assert_list_d_x_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_filename_test_from_bcolz_filename.with_tmpfile_bcolz_as.assert_list_d_x_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 210, "end_line": 224, "span_ids": ["test_from_bcolz_filename"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz_filename():\n bcolz = pytest.importorskip(\"bcolz\")\n\n with tmpfile(\".bcolz\") as fn:\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]],\n names=[\"x\", \"y\", \"a\"],\n rootdir=fn,\n )\n t.flush()\n\n with check_bcolz_deprecation_warning():\n d = dd.from_bcolz(fn, chunksize=2)\n\n assert list(d.x.compute()) == [1, 2, 3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_column_order_test_from_bcolz_column_order.assert_list_df_loc_0_com": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_column_order_test_from_bcolz_column_order.assert_list_df_loc_0_com", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 227, "end_line": 237, "span_ids": ["test_from_bcolz_column_order"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz_column_order():\n bcolz = pytest.importorskip(\"bcolz\")\n\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"]\n )\n\n with check_bcolz_deprecation_warning():\n df = dd.from_bcolz(t, chunksize=2)\n\n assert list(df.loc[0].compute().columns) == [\"x\", \"y\", \"a\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_dataframe_test_from_pandas_dataframe.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_dataframe_test_from_pandas_dataframe.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 222, "end_line": 244, "span_ids": ["test_from_pandas_dataframe"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_dataframe():\n a = list(\"aaaaaaabbbbbbbbccccccc\")\n df = pd.DataFrame(\n dict(a=a, b=np.random.randn(len(a))),\n index=pd.date_range(start=\"20120101\", periods=len(a)),\n )\n ddf = dd.from_pandas(df, 3)\n assert len(ddf.dask) == 3\n assert len(ddf.divisions) == len(ddf.dask) + 1\n assert isinstance(ddf.divisions[0], type(df.index[0]))\n tm.assert_frame_equal(df, ddf.compute())\n ddf = dd.from_pandas(df, chunksize=8)\n msg = \"Exactly one of npartitions and chunksize must be specified.\"\n with pytest.raises(ValueError) as err:\n dd.from_pandas(df, npartitions=2, chunksize=2)\n assert msg in str(err.value)\n with pytest.raises((ValueError, AssertionError)) as err:\n dd.from_pandas(df)\n assert msg in str(err.value)\n assert len(ddf.dask) == 3\n assert len(ddf.divisions) == len(ddf.dask) + 1\n assert isinstance(ddf.divisions[0], type(df.index[0]))\n tm.assert_frame_equal(df, ddf.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_small_test_from_pandas_small.for_sort_in_True_False_.for_i_in_0_2_.assert_eq_s_ds_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_small_test_from_pandas_small.for_sort_in_True_False_.for_i_in_0_2_.assert_eq_s_ds_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 268, "span_ids": ["test_from_pandas_small"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_small():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n for i in [1, 2, 30]:\n a = dd.from_pandas(df, i)\n assert len(a.compute()) == 3\n assert a.divisions[0] == 0\n assert a.divisions[-1] == 2\n\n a = dd.from_pandas(df, chunksize=i)\n assert len(a.compute()) == 3\n assert a.divisions[0] == 0\n assert a.divisions[-1] == 2\n\n for sort in [True, False]:\n for i in [0, 2]:\n df = pd.DataFrame({\"x\": [0] * i})\n ddf = dd.from_pandas(df, npartitions=5, sort=sort)\n assert_eq(df, ddf)\n\n s = pd.Series([0] * i, name=\"x\", dtype=int)\n ds = dd.from_pandas(s, npartitions=5, sort=sort)\n assert_eq(s, ds)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_npartitions_is_accurate_test_from_pandas_npartitions_is_accurate.assert_dd_from_pandas_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_npartitions_is_accurate_test_from_pandas_npartitions_is_accurate.assert_dd_from_pandas_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 271, "end_line": 276, "span_ids": ["test_from_pandas_npartitions_is_accurate"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [1, 2, 4, 5])\ndef test_from_pandas_npartitions_is_accurate(n):\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n assert dd.from_pandas(df, npartitions=n).npartitions <= n", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_series_test_from_pandas_series.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_series_test_from_pandas_series.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 292, "span_ids": ["test_from_pandas_series"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_series():\n n = 20\n s = pd.Series(np.random.randn(n), index=pd.date_range(start=\"20120101\", periods=n))\n ds = dd.from_pandas(s, 3)\n assert len(ds.dask) == 3\n assert len(ds.divisions) == len(ds.dask) + 1\n assert isinstance(ds.divisions[0], type(s.index[0]))\n tm.assert_series_equal(s, ds.compute())\n\n ds = dd.from_pandas(s, chunksize=8)\n assert len(ds.dask) == 3\n assert len(ds.divisions) == len(ds.dask) + 1\n assert isinstance(ds.divisions[0], type(s.index[0]))\n tm.assert_series_equal(s, ds.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_non_sorted_test_from_pandas_single_row.assert_eq_ddf_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_non_sorted_test_from_pandas_single_row.assert_eq_ddf_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 295, "end_line": 310, "span_ids": ["test_from_pandas_non_sorted", "test_from_pandas_single_row"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_non_sorted():\n df = pd.DataFrame({\"x\": [1, 2, 3]}, index=[3, 1, 2])\n ddf = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf.known_divisions\n assert_eq(df, ddf)\n\n ddf = dd.from_pandas(df, chunksize=2, sort=False)\n assert not ddf.known_divisions\n assert_eq(df, ddf)\n\n\ndef test_from_pandas_single_row():\n df = pd.DataFrame({\"x\": [1]}, index=[1])\n ddf = dd.from_pandas(df, npartitions=1)\n assert ddf.divisions == (1, 1)\n assert_eq(ddf, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_with_datetime_index_test_from_pandas_with_datetime_index.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_pandas_with_datetime_index_test_from_pandas_with_datetime_index.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 313, "end_line": 334, "span_ids": ["test_from_pandas_with_datetime_index"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_with_datetime_index():\n df = pd.DataFrame(\n {\n \"Date\": [\n \"2015-08-28\",\n \"2015-08-27\",\n \"2015-08-26\",\n \"2015-08-25\",\n \"2015-08-24\",\n \"2015-08-21\",\n \"2015-08-20\",\n \"2015-08-19\",\n \"2015-08-18\",\n ],\n \"Val\": list(range(9)),\n }\n )\n df.Date = df.Date.astype(\"datetime64[ns]\")\n ddf = dd.from_pandas(df, 2)\n assert_eq(df, ddf)\n ddf = dd.from_pandas(df, chunksize=2)\n assert_eq(df, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_DataFrame_from_dask_array_test_DataFrame_from_dask_array.assert_df2_divisions_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_DataFrame_from_dask_array_test_DataFrame_from_dask_array.assert_df2_divisions_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 337, "end_line": 350, "span_ids": ["test_DataFrame_from_dask_array"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_DataFrame_from_dask_array():\n x = da.ones((10, 3), chunks=(4, 2))\n\n df = dd.from_dask_array(x, [\"a\", \"b\", \"c\"])\n assert isinstance(df, dd.DataFrame)\n tm.assert_index_equal(df.columns, pd.Index([\"a\", \"b\", \"c\"]))\n assert list(df.divisions) == [0, 4, 8, 9]\n assert (df.compute(scheduler=\"sync\").values == x.compute(scheduler=\"sync\")).all()\n\n # dd.from_array should re-route to from_dask_array\n df2 = dd.from_array(x, columns=[\"a\", \"b\", \"c\"])\n assert isinstance(df, dd.DataFrame)\n tm.assert_index_equal(df2.columns, df.columns)\n assert df2.divisions == df.divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_Series_from_dask_array_test_Series_from_dask_array.assert_eq_ser_ser2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_Series_from_dask_array_test_Series_from_dask_array.assert_eq_ser_ser2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 353, "end_line": 369, "span_ids": ["test_Series_from_dask_array"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_Series_from_dask_array():\n x = da.ones(10, chunks=4)\n\n ser = dd.from_dask_array(x, \"a\")\n assert isinstance(ser, dd.Series)\n assert ser.name == \"a\"\n assert list(ser.divisions) == [0, 4, 8, 9]\n assert (ser.compute(scheduler=\"sync\").values == x.compute(scheduler=\"sync\")).all()\n\n ser = dd.from_dask_array(x)\n assert isinstance(ser, dd.Series)\n assert ser.name is None\n\n # dd.from_array should re-route to from_dask_array\n ser2 = dd.from_array(x)\n assert isinstance(ser2, dd.Series)\n assert_eq(ser, ser2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_test_from_dask_array_index_raises.assert_m_match_4_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_test_from_dask_array_index_raises.assert_m_match_4_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 372, "end_line": 395, "span_ids": ["test_from_dask_array_index", "test_from_dask_array_index_raises"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"as_frame\", [True, False])\ndef test_from_dask_array_index(as_frame):\n s = dd.from_pandas(pd.Series(range(10), index=list(\"abcdefghij\")), npartitions=3)\n if as_frame:\n s = s.to_frame()\n result = dd.from_dask_array(s.values, index=s.index)\n assert_eq(s, result)\n\n\ndef test_from_dask_array_index_raises():\n x = da.random.uniform(size=(10,), chunks=(5,))\n with pytest.raises(ValueError) as m:\n dd.from_dask_array(x, index=pd.Index(np.arange(10)))\n assert m.match(\"must be an instance\")\n\n a = dd.from_pandas(pd.Series(range(12)), npartitions=2)\n b = dd.from_pandas(pd.Series(range(12)), npartitions=4)\n with pytest.raises(ValueError) as m:\n dd.from_dask_array(a.values, index=b.index)\n\n assert m.match(\"index\")\n assert m.match(\"number\")\n assert m.match(\"blocks\")\n assert m.match(\"4 != 2\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_test_from_dask_array_compat_numpy_array.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_test_from_dask_array_compat_numpy_array.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 398, "end_line": 432, "span_ids": ["test_from_dask_array_compat_numpy_array"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_compat_numpy_array():\n x = da.ones((3, 3, 3), chunks=2)\n\n with pytest.raises(ValueError):\n dd.from_dask_array(x) # dask\n\n with pytest.raises(ValueError):\n dd.from_array(x.compute()) # numpy\n\n x = da.ones((10, 3), chunks=(3, 3))\n d1 = dd.from_dask_array(x) # dask\n assert isinstance(d1, dd.DataFrame)\n assert (d1.compute().values == x.compute()).all()\n tm.assert_index_equal(d1.columns, pd.Index([0, 1, 2]))\n\n d2 = dd.from_array(x.compute()) # numpy\n assert isinstance(d1, dd.DataFrame)\n assert (d2.compute().values == x.compute()).all()\n tm.assert_index_equal(d2.columns, pd.Index([0, 1, 2]))\n\n with pytest.raises(ValueError):\n dd.from_dask_array(x, columns=[\"a\"]) # dask\n\n with pytest.raises(ValueError):\n dd.from_array(x.compute(), columns=[\"a\"]) # numpy\n\n d1 = dd.from_dask_array(x, columns=[\"a\", \"b\", \"c\"]) # dask\n assert isinstance(d1, dd.DataFrame)\n assert (d1.compute().values == x.compute()).all()\n tm.assert_index_equal(d1.columns, pd.Index([\"a\", \"b\", \"c\"]))\n\n d2 = dd.from_array(x.compute(), columns=[\"a\", \"b\", \"c\"]) # numpy\n assert isinstance(d1, dd.DataFrame)\n assert (d2.compute().values == x.compute()).all()\n tm.assert_index_equal(d2.columns, pd.Index([\"a\", \"b\", \"c\"]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_1d_test_from_dask_array_compat_numpy_array_1d.tm_assert_index_equal_d2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_compat_numpy_array_1d_test_from_dask_array_compat_numpy_array_1d.tm_assert_index_equal_d2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 435, "end_line": 467, "span_ids": ["test_from_dask_array_compat_numpy_array_1d"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_compat_numpy_array_1d():\n\n x = da.ones(10, chunks=3)\n d1 = dd.from_dask_array(x) # dask\n assert isinstance(d1, dd.Series)\n assert (d1.compute().values == x.compute()).all()\n assert d1.name is None\n\n d2 = dd.from_array(x.compute()) # numpy\n assert isinstance(d1, dd.Series)\n assert (d2.compute().values == x.compute()).all()\n assert d2.name is None\n\n d1 = dd.from_dask_array(x, columns=\"name\") # dask\n assert isinstance(d1, dd.Series)\n assert (d1.compute().values == x.compute()).all()\n assert d1.name == \"name\"\n\n d2 = dd.from_array(x.compute(), columns=\"name\") # numpy\n assert isinstance(d1, dd.Series)\n assert (d2.compute().values == x.compute()).all()\n assert d2.name == \"name\"\n\n # passing list via columns results in DataFrame\n d1 = dd.from_dask_array(x, columns=[\"name\"]) # dask\n assert isinstance(d1, dd.DataFrame)\n assert (d1.compute().values == x.compute()).all()\n tm.assert_index_equal(d1.columns, pd.Index([\"name\"]))\n\n d2 = dd.from_array(x.compute(), columns=[\"name\"]) # numpy\n assert isinstance(d1, dd.DataFrame)\n assert (d2.compute().values == x.compute()).all()\n tm.assert_index_equal(d2.columns, pd.Index([\"name\"]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_struct_dtype_test_from_dask_array_struct_dtype.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_struct_dtype_test_from_dask_array_struct_dtype.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 470, "end_line": 479, "span_ids": ["test_from_dask_array_struct_dtype"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_struct_dtype():\n x = np.array([(1, \"a\"), (2, \"b\")], dtype=[(\"a\", \"i4\"), (\"b\", \"object\")])\n y = da.from_array(x, chunks=(1,))\n df = dd.from_dask_array(y)\n tm.assert_index_equal(df.columns, pd.Index([\"a\", \"b\"]))\n assert_eq(df, pd.DataFrame(x))\n\n assert_eq(\n dd.from_dask_array(y, columns=[\"b\", \"a\"]), pd.DataFrame(x, columns=[\"b\", \"a\"])\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_unknown_chunks_test_from_dask_array_unknown_chunks.with_pytest_raises_ValueE.df.dd_from_dask_array_dx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_unknown_chunks_test_from_dask_array_unknown_chunks.with_pytest_raises_ValueE.df.dd_from_dask_array_dx_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 482, "end_line": 506, "span_ids": ["test_from_dask_array_unknown_chunks"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_unknown_chunks():\n # Series\n dx = da.Array(\n {(\"x\", 0): np.arange(5), (\"x\", 1): np.arange(5, 11)},\n \"x\",\n ((np.nan, np.nan),),\n np.arange(1).dtype,\n )\n df = dd.from_dask_array(dx)\n assert isinstance(df, dd.Series)\n assert not df.known_divisions\n assert_eq(df, pd.Series(np.arange(11)), check_index=False)\n\n # DataFrame\n dsk = {(\"x\", 0, 0): np.random.random((2, 3)), (\"x\", 1, 0): np.random.random((5, 3))}\n dx = da.Array(dsk, \"x\", ((np.nan, np.nan), (3,)), np.float64)\n df = dd.from_dask_array(dx)\n assert isinstance(df, dd.DataFrame)\n assert not df.known_divisions\n assert_eq(df, pd.DataFrame(dx.compute()), check_index=False)\n\n # Unknown width\n dx = da.Array(dsk, \"x\", ((np.nan, np.nan), (np.nan,)), np.float64)\n with pytest.raises(ValueError):\n df = dd.from_dask_array(dx)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_with_lengths_test_to_records_with_lengths.assert_result_chunks_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_with_lengths_test_to_records_with_lengths.assert_result_chunks_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 534, "end_line": 552, "span_ids": ["test_to_records_with_lengths"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"lengths\", [[2, 2], True])\ndef test_to_records_with_lengths(lengths):\n pytest.importorskip(\"dask.array\")\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n result = ddf.to_records(lengths=lengths)\n assert_eq(df.to_records(), result, check_type=False) # TODO: make check_type pass\n\n assert isinstance(result, da.Array)\n\n expected_chunks = ((2, 2),)\n\n assert result.chunks == expected_chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_raises_test_to_records_raises.None_1.pytest_fail_Unexpected_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_raises_test_to_records_raises.None_1.pytest_fail_Unexpected_v", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 557, "end_line": 571, "span_ids": ["test_to_records_raises"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_records_raises():\n pytest.importorskip(\"dask.array\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n with pytest.raises(ValueError):\n ddf.to_records(lengths=[2, 2, 2])\n pytest.fail(\"3 != 2\")\n\n with pytest.raises(ValueError):\n ddf.to_records(lengths=5)\n pytest.fail(\"Unexpected value\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_test_from_delayed.assert_str_e_value_start": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_test_from_delayed.assert_str_e_value_start", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 574, "end_line": 602, "span_ids": ["test_from_delayed"], "tokens": 396}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed():\n df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list(\"abcd\"))\n parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]\n dfs = [delayed(parts.__getitem__)(i) for i in range(4)]\n meta = dfs[0].compute()\n\n my_len = lambda x: pd.Series([len(x)])\n\n for divisions in [None, [0, 1, 3, 6, 10]]:\n ddf = dd.from_delayed(dfs, meta=meta, divisions=divisions)\n assert_eq(ddf, df)\n assert list(ddf.map_partitions(my_len).compute()) == [1, 2, 3, 4]\n assert ddf.known_divisions == (divisions is not None)\n\n s = dd.from_delayed([d.a for d in dfs], meta=meta.a, divisions=divisions)\n assert_eq(s, df.a)\n assert list(s.map_partitions(my_len).compute()) == [1, 2, 3, 4]\n assert ddf.known_divisions == (divisions is not None)\n\n meta2 = [(c, \"f8\") for c in df.columns]\n assert_eq(dd.from_delayed(dfs, meta=meta2), df)\n assert_eq(dd.from_delayed([d.a for d in dfs], meta=(\"a\", \"f8\")), df.a)\n\n with pytest.raises(ValueError):\n dd.from_delayed(dfs, meta=meta, divisions=[0, 1, 3, 6])\n\n with pytest.raises(ValueError) as e:\n dd.from_delayed(dfs, meta=meta.a).compute()\n assert str(e.value).startswith(\"Metadata mismatch found in `from_delayed`\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_misordered_meta_test_from_delayed_misordered_meta.assert_msg_in_str_info_va": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_misordered_meta_test_from_delayed_misordered_meta.assert_msg_in_str_info_va", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 605, "end_line": 626, "span_ids": ["test_from_delayed_misordered_meta"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed_misordered_meta():\n df = pd.DataFrame(\n columns=[\"(1)\", \"(2)\", \"date\", \"ent\", \"val\"],\n data=[range(i * 5, i * 5 + 5) for i in range(3)],\n index=range(3),\n )\n\n # meta with different order for columns\n misordered_meta = pd.DataFrame(\n columns=[\"date\", \"ent\", \"val\", \"(1)\", \"(2)\"], data=[range(5)]\n )\n\n ddf = dd.from_delayed([delayed(lambda: df)()], meta=misordered_meta)\n\n with pytest.raises(ValueError) as info:\n # produces dataframe which does not match meta\n ddf.reset_index().compute(scheduler=\"sync\")\n msg = (\n \"The columns in the computed data do not match the columns in the\"\n \" provided metadata\"\n )\n assert msg in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_sorted_test_to_delayed.assert_eq_dx_compute_x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_sorted_test_to_delayed.assert_eq_dx_compute_x", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 629, "end_line": 653, "span_ids": ["test_from_delayed_sorted", "test_to_delayed"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed_sorted():\n a = pd.DataFrame({\"x\": [1, 2]}, index=[1, 10])\n b = pd.DataFrame({\"x\": [4, 1]}, index=[100, 200])\n\n A = dd.from_delayed([delayed(a), delayed(b)], divisions=\"sorted\")\n assert A.known_divisions\n\n assert A.divisions == (1, 100, 200)\n\n\ndef test_to_delayed():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n # Frame\n a, b = ddf.to_delayed()\n assert isinstance(a, Delayed)\n assert isinstance(b, Delayed)\n assert_eq(a.compute(), df.iloc[:2])\n\n # Scalar\n x = ddf.x.sum()\n dx = x.to_delayed()\n assert isinstance(dx, Delayed)\n assert_eq(dx.compute(), x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.assert_eq_dx_compute_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.assert_eq_dx_compute_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 656, "end_line": 674, "span_ids": ["test_to_delayed_optimize_graph"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_delayed_optimize_graph():\n df = pd.DataFrame({\"x\": list(range(20))})\n ddf = dd.from_pandas(df, npartitions=20)\n ddf2 = (ddf + 1).loc[:2]\n\n # Frame\n d = ddf2.to_delayed()[0]\n assert len(d.dask) < 20\n d2 = ddf2.to_delayed(optimize_graph=False)[0]\n assert sorted(d2.dask) == sorted(ddf2.dask)\n assert_eq(ddf2.get_partition(0), d.compute())\n assert_eq(ddf2.get_partition(0), d2.compute())\n\n # Scalar\n x = ddf2.x.sum()\n dx = x.to_delayed()\n dx2 = x.to_delayed(optimize_graph=False)\n assert len(dx.dask) < len(dx2.dask)\n assert_eq(dx.compute(), dx2.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_dask_array_index_dtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 677, "end_line": 700, "span_ids": ["test_from_dask_array_index_dtype"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_dask_array_index_dtype():\n x = da.ones((10,), chunks=(5,))\n\n df = pd.DataFrame(\n {\n \"date\": pd.date_range(\"2019-01-01\", periods=10, freq=\"1T\"),\n \"val1\": list(range(10)),\n }\n )\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"date\")\n\n ddf2 = dd.from_dask_array(x, index=ddf.index, columns=\"val2\")\n\n assert ddf.index.dtype == ddf2.index.dtype\n assert ddf.index.name == ddf2.index.name\n\n df = pd.DataFrame({\"idx\": np.arange(0, 1, 0.1), \"val1\": list(range(10))})\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"idx\")\n\n ddf2 = dd.from_dask_array(x, index=ddf.index, columns=\"val2\")\n\n assert ddf.index.dtype == ddf2.index.dtype\n assert ddf.index.name == ddf2.index.name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_fkeyword_test_read_json_fkeyword.with_tmpfile_json_as_f.assert_eq_actual_actual_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_fkeyword_test_read_json_fkeyword.with_tmpfile_json_as_f.assert_eq_actual_actual_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 30, "end_line": 41, "span_ids": ["test_read_json_fkeyword"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"fkeyword\", [\"pandas\", \"json\"])\ndef test_read_json_fkeyword(fkeyword):\n def _my_json_reader(*args, **kwargs):\n if fkeyword == \"json\":\n return pd.DataFrame.from_dict(json.load(*args))\n return pd.read_json(*args)\n\n with tmpfile(\"json\") as f:\n df.to_json(f, orient=\"records\", lines=False)\n actual = dd.read_json(f, orient=\"records\", lines=False, engine=_my_json_reader)\n actual_pd = pd.read_json(f, orient=\"records\", lines=False)\n assert_eq(actual, actual_pd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_meta_test_read_json_meta.if_orient_records_.assert_eq_res_sol_check": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_meta_test_read_json_meta.if_orient_records_.assert_eq_res_sol_check", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 72, "span_ids": ["test_read_json_meta"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"orient\", [\"split\", \"records\", \"index\", \"columns\", \"values\"])\ndef test_read_json_meta(orient, tmpdir):\n df = pd.DataFrame({\"x\": range(5), \"y\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n df2 = df.assign(x=df.x + 0.5)\n lines = orient == \"records\"\n df.to_json(str(tmpdir.join(\"fil1.json\")), orient=orient, lines=lines)\n df2.to_json(str(tmpdir.join(\"fil2.json\")), orient=orient, lines=lines)\n sol = pd.concat([df, df2])\n meta = df2.iloc[:0]\n\n if orient == \"values\":\n # orient=values loses column names\n sol.columns = meta.columns = [0, 1]\n\n res = dd.read_json(\n str(tmpdir.join(\"fil*.json\")), orient=orient, meta=meta, lines=lines\n )\n assert_eq(res, sol)\n\n if orient == \"records\":\n # Also check chunked version\n res = dd.read_json(\n str(tmpdir.join(\"fil*.json\")),\n orient=orient,\n meta=meta,\n lines=True,\n blocksize=50,\n )\n assert_eq(res, sol, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_write_json_basic_test_to_json_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_write_json_basic_test_to_json_with_get.with_tmpdir_as_dn_.assert_eq_result_df_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 154, "end_line": 181, "span_ids": ["test_to_json_with_get", "test_write_json_basic"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"orient\", [\"split\", \"records\", \"index\", \"columns\", \"values\"])\ndef test_write_json_basic(orient):\n with tmpdir() as path:\n fn = os.path.join(path, \"1.json\")\n df.to_json(fn, orient=orient, lines=False)\n actual = dd.read_json(fn, orient=orient, lines=False)\n if orient == \"values\":\n actual.columns = list(df.columns)\n assert_eq(actual, df)\n\n\ndef test_to_json_with_get():\n from dask.multiprocessing import get as mp_get\n\n flag = [False]\n\n def my_get(*args, **kwargs):\n flag[0] = True\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n with tmpdir() as dn:\n ddf.to_json(dn, compute_kwargs={\"scheduler\": my_get})\n assert flag[0]\n result = dd.read_json(os.path.join(dn, \"*\"))\n assert_eq(result, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_error_test_read_chunked.with_tmpdir_as_path_.assert_eq_d_df_check_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_error_test_read_chunked.with_tmpdir_as_path_.assert_eq_d_df_check_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 106, "end_line": 122, "span_ids": ["test_read_json_error", "test_read_chunked"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_json_error():\n with tmpfile(\"json\") as f:\n with pytest.raises(ValueError):\n df.to_json(f, orient=\"split\", lines=True)\n df.to_json(f, orient=\"split\", lines=False)\n with pytest.raises(ValueError):\n dd.read_json(f, orient=\"split\", blocksize=1)\n\n\n@pytest.mark.parametrize(\"block\", [5, 15, 33, 200, 90000])\ndef test_read_chunked(block):\n with tmpdir() as path:\n fn = os.path.join(path, \"1.json\")\n df.to_json(fn, orient=\"records\", lines=True)\n d = dd.read_json(fn, blocksize=block, sample=10)\n assert (d.npartitions > 1) or (block > 30)\n assert_eq(d, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_test_empty.assert_eq_ddf_read_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_test_empty.assert_eq_ddf_read_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 177, "end_line": 188, "span_ids": ["test_empty"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"index\", [False, True])\n@write_read_engines_xfail\ndef test_empty(tmpdir, write_engine, read_engine, index):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})[:0]\n if index:\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.to_parquet(fn, write_index=index, engine=write_engine)\n read_df = dd.read_parquet(fn, engine=read_engine)\n assert_eq(ddf, read_df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_simple_test_simple.assert_eq_ddf_read_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_simple_test_simple.assert_eq_ddf_read_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 234, "end_line": 245, "span_ids": ["test_simple"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_simple(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n if write_engine != \"fastparquet\":\n df = pd.DataFrame({\"a\": [b\"a\", b\"b\", b\"b\"], \"b\": [4, 5, 6]})\n else:\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=write_engine)\n read_df = dd.read_parquet(fn, index=[\"a\"], engine=read_engine)\n assert_eq(ddf, read_df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_delayed_no_metadata_test_delayed_no_metadata.assert_eq_ddf_read_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_delayed_no_metadata_test_delayed_no_metadata.assert_eq_ddf_read_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 202, "end_line": 220, "span_ids": ["test_delayed_no_metadata"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_delayed_no_metadata(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"b\"], \"b\": [4, 5, 6]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(\n fn, engine=write_engine, compute=False, write_metadata_file=False\n ).compute()\n files = os.listdir(fn)\n assert \"_metadata\" not in files\n # Fastparquet doesn't currently handle a directory without \"_metadata\"\n read_df = dd.read_parquet(\n os.path.join(fn, \"*.parquet\"),\n index=[\"a\"],\n engine=read_engine,\n gather_statistics=True,\n )\n assert_eq(ddf, read_df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_list_test_read_list.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_list_test_read_list.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 304, "end_line": 325, "span_ids": ["test_read_list"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_read_list(tmpdir, write_engine, read_engine):\n if write_engine == read_engine == \"fastparquet\" and os.name == \"nt\":\n # fastparquet or dask is not normalizing filepaths correctly on\n # windows.\n pytest.skip(\"filepath bug.\")\n\n tmpdir = str(tmpdir)\n ddf.to_parquet(tmpdir, engine=write_engine)\n files = sorted(\n (\n os.path.join(tmpdir, f)\n for f in os.listdir(tmpdir)\n if not f.endswith(\"_metadata\")\n ),\n key=natural_sort_key,\n )\n\n ddf2 = dd.read_parquet(\n files, engine=read_engine, index=\"myindex\", gather_statistics=True\n )\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_auto_index_test_columns_auto_index.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_auto_index_test_columns_auto_index.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 311, "end_line": 337, "span_ids": ["test_columns_auto_index"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_columns_auto_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n\n # XFAIL, auto index selection no longer supported (for simplicity)\n # ### Empty columns ###\n # With divisions if supported\n assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])\n\n # No divisions\n assert_eq(\n dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),\n ddf[[]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Single column, auto select index ###\n # With divisions if supported\n assert_eq(dd.read_parquet(fn, columns=[\"x\"], engine=read_engine), ddf[[\"x\"]])\n\n # No divisions\n assert_eq(\n dd.read_parquet(fn, columns=[\"x\"], engine=read_engine, gather_statistics=False),\n ddf[[\"x\"]].clear_divisions(),\n check_divisions=True,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_test_columns_index.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_test_columns_index.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 340, "end_line": 400, "span_ids": ["test_columns_index"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_columns_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n\n # With Index\n # ----------\n # ### Empty columns, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, columns=[], engine=read_engine, index=\"myindex\"), ddf[[]]\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn, columns=[], engine=read_engine, index=\"myindex\", gather_statistics=False\n ),\n ddf[[]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Single column, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, index=\"myindex\", columns=[\"x\"], engine=read_engine),\n ddf[[\"x\"]],\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn,\n index=\"myindex\",\n columns=[\"x\"],\n engine=read_engine,\n gather_statistics=False,\n ),\n ddf[[\"x\"]].clear_divisions(),\n check_divisions=True,\n )\n\n # ### Two columns, specify index ###\n # With divisions if supported\n assert_eq(\n dd.read_parquet(fn, index=\"myindex\", columns=[\"x\", \"y\"], engine=read_engine),\n ddf,\n )\n\n # No divisions\n assert_eq(\n dd.read_parquet(\n fn,\n index=\"myindex\",\n columns=[\"x\", \"y\"],\n engine=read_engine,\n gather_statistics=False,\n ),\n ddf.clear_divisions(),\n check_divisions=True,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_nonsense_column_test_gather_statistics_no_index.assert_not_df_known_divis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_nonsense_column_test_gather_statistics_no_index.assert_not_df_known_divis", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 357, "end_line": 418, "span_ids": ["test_columns_no_index", "test_gather_statistics_no_index", "test_nonsense_column"], "tokens": 418}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nonsense_column(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n with pytest.raises((ValueError, KeyError)):\n dd.read_parquet(fn, columns=[\"nonesense\"], engine=engine)\n with pytest.raises((Exception, KeyError)):\n dd.read_parquet(fn, columns=[\"nonesense\"] + list(ddf.columns), engine=engine)\n\n\n@write_read_engines()\ndef test_columns_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine)\n ddf2 = ddf.reset_index()\n\n # No Index\n # --------\n # All columns, none as index\n assert_eq(\n dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),\n ddf2,\n check_index=False,\n check_divisions=True,\n )\n\n # Two columns, none as index\n assert_eq(\n dd.read_parquet(\n fn,\n index=False,\n columns=[\"x\", \"y\"],\n engine=read_engine,\n gather_statistics=True,\n ),\n ddf2[[\"x\", \"y\"]],\n check_index=False,\n check_divisions=True,\n )\n\n # One column and one index, all as columns\n assert_eq(\n dd.read_parquet(\n fn,\n index=False,\n columns=[\"myindex\", \"x\"],\n engine=read_engine,\n gather_statistics=True,\n ),\n ddf2[[\"myindex\", \"x\"]],\n check_index=False,\n check_divisions=True,\n )\n\n\n@write_read_engines()\ndef test_gather_statistics_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=write_engine, write_index=False)\n\n df = dd.read_parquet(fn, engine=read_engine, index=False)\n assert df.index.name is None\n assert not df.known_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_with_multi_index_test_columns_index_with_multi_index.for_ind_col_sol_df_in_.assert_eq_d_sol_df_col_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_index_with_multi_index_test_columns_index_with_multi_index.for_ind_col_sol_df_in_.assert_eq_d_sol_df_col_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 457, "end_line": 516, "span_ids": ["test_columns_index_with_multi_index"], "tokens": 690}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_columns_index_with_multi_index(tmpdir, engine):\n fn = os.path.join(str(tmpdir), \"test.parquet\")\n index = pd.MultiIndex.from_arrays(\n [np.arange(10), np.arange(10) + 1], names=[\"x0\", \"x1\"]\n )\n df = pd.DataFrame(np.random.randn(10, 2), columns=[\"a\", \"b\"], index=index)\n df2 = df.reset_index(drop=False)\n\n if engine == \"fastparquet\":\n fastparquet.write(fn, df.reset_index(), write_index=False)\n\n else:\n pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)\n\n ddf = dd.read_parquet(fn, engine=engine, index=index.names)\n assert_eq(ddf, df)\n\n d = dd.read_parquet(fn, columns=\"a\", engine=engine, index=index.names)\n assert_eq(d, df[\"a\"])\n\n d = dd.read_parquet(fn, index=[\"a\", \"b\"], columns=[\"x0\", \"x1\"], engine=engine)\n assert_eq(d, df2.set_index([\"a\", \"b\"])[[\"x0\", \"x1\"]])\n\n # Just index\n d = dd.read_parquet(fn, index=False, engine=engine)\n assert_eq(d, df2)\n\n d = dd.read_parquet(fn, columns=[\"b\"], index=[\"a\"], engine=engine)\n assert_eq(d, df2.set_index(\"a\")[[\"b\"]])\n\n d = dd.read_parquet(fn, columns=[\"a\", \"b\"], index=[\"x0\"], engine=engine)\n assert_eq(d, df2.set_index(\"x0\")[[\"a\", \"b\"]])\n\n # Just columns\n d = dd.read_parquet(fn, columns=[\"x0\", \"a\"], index=[\"x1\"], engine=engine)\n assert_eq(d, df2.set_index(\"x1\")[[\"x0\", \"a\"]])\n\n # Both index and columns\n d = dd.read_parquet(fn, index=False, columns=[\"x0\", \"b\"], engine=engine)\n assert_eq(d, df2[[\"x0\", \"b\"]])\n\n for index in [\"x1\", \"b\"]:\n d = dd.read_parquet(fn, index=index, columns=[\"x0\", \"a\"], engine=engine)\n assert_eq(d, df2.set_index(index)[[\"x0\", \"a\"]])\n\n # Columns and index intersect\n for index in [\"a\", \"x0\"]:\n with pytest.raises(ValueError):\n d = dd.read_parquet(fn, index=index, columns=[\"x0\", \"a\"], engine=engine)\n\n # Series output\n for ind, col, sol_df in [\n (\"x1\", \"x0\", df2.set_index(\"x1\")),\n (False, \"b\", df2),\n (False, \"x0\", df2[[\"x0\"]]),\n (\"a\", \"x0\", df2.set_index(\"a\")[[\"x0\"]]),\n (\"a\", \"b\", df2.set_index(\"a\")),\n ]:\n d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)\n assert_eq(d, sol_df[col])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_no_index_test_read_series.assert_eq_ddf_x_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_no_index_test_read_series.assert_eq_ddf_x_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 488, "end_line": 505, "span_ids": ["test_read_series", "test_no_index"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_no_index(tmpdir, write_engine, read_engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(fn, engine=read_engine)\n assert_eq(df, ddf2, check_index=False)\n\n\ndef test_read_series(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, columns=[\"x\"], index=\"myindex\", engine=engine)\n assert_eq(ddf[[\"x\"]], ddf2)\n\n ddf2 = dd.read_parquet(fn, columns=\"x\", index=\"myindex\", engine=engine)\n assert_eq(ddf.x, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_names_test_names.assert_set_read_fn_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_names_test_names.assert_set_read_fn_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 508, "end_line": 519, "span_ids": ["test_names"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_names(tmpdir, engine):\n fn = str(tmpdir)\n ddf.to_parquet(fn, engine=engine)\n\n def read(fn, **kwargs):\n return dd.read_parquet(fn, engine=engine, **kwargs)\n\n assert set(read(fn).dask) == set(read(fn).dask)\n\n assert set(read(fn).dask) != set(read(fn, columns=[\"x\"]).dask)\n\n assert set(read(fn, columns=(\"x\",)).dask) == set(read(fn, columns=[\"x\"]).dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_from_pandas_test_roundtrip_from_pandas.assert_eq_dfp_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_from_pandas_test_roundtrip_from_pandas.assert_eq_dfp_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 574, "end_line": 583, "span_ids": ["test_roundtrip_from_pandas"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):\n fn = str(tmpdir.join(\"test.parquet\"))\n dfp = df.copy()\n dfp.index.name = \"index\"\n dfp.to_parquet(\n fn, engine=\"pyarrow\" if write_engine.startswith(\"pyarrow\") else \"fastparquet\"\n )\n ddf = dd.read_parquet(fn, index=\"index\", engine=read_engine)\n assert_eq(dfp, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_test_append.assert_eq_df_ddf3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_test_append.assert_eq_df_ddf3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 593, "end_line": 615, "span_ids": ["test_append"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append(tmpdir, engine):\n \"\"\"Test that appended parquet equal to the original one.\"\"\"\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n ddf2.to_parquet(tmp, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp, engine=engine)\n assert_eq(df, ddf3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_create_test_append_create.assert_eq_df_ddf3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_create_test_append_create.assert_eq_df_ddf3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 589, "end_line": 611, "span_ids": ["test_append_create"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_create(tmpdir, engine):\n \"\"\"Test that appended parquet equal to the original one.\"\"\"\n tmp_path = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp_path, append=True, engine=engine)\n ddf2.to_parquet(tmp_path, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp_path, engine=engine)\n assert_eq(df, ddf3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_with_partition_test_append_with_partition.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_with_partition_test_append_with_partition.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 670, "end_line": 715, "span_ids": ["test_append_with_partition"], "tokens": 423}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_with_partition(tmpdir, engine):\n tmp = str(tmpdir)\n df0 = pd.DataFrame(\n {\n \"lat\": np.arange(0, 10, dtype=\"int64\"),\n \"lon\": np.arange(10, 20, dtype=\"int64\"),\n \"value\": np.arange(100, 110, dtype=\"int64\"),\n }\n )\n df0.index.name = \"index\"\n df1 = pd.DataFrame(\n {\n \"lat\": np.arange(10, 20, dtype=\"int64\"),\n \"lon\": np.arange(10, 20, dtype=\"int64\"),\n \"value\": np.arange(120, 130, dtype=\"int64\"),\n }\n )\n df1.index.name = \"index\"\n\n # Check that nullable dtypes work\n # (see: https://github.com/dask/dask/issues/8373)\n df0[\"lat\"] = df0[\"lat\"].astype(\"Int64\")\n df1[\"lat\"].iloc[0] = np.nan\n df1[\"lat\"] = df1[\"lat\"].astype(\"Int64\")\n\n dd_df0 = dd.from_pandas(df0, npartitions=1)\n dd_df1 = dd.from_pandas(df1, npartitions=1)\n dd.to_parquet(dd_df0, tmp, partition_on=[\"lon\"], engine=engine)\n dd.to_parquet(\n dd_df1,\n tmp,\n partition_on=[\"lon\"],\n append=True,\n ignore_divisions=True,\n engine=engine,\n )\n\n out = dd.read_parquet(\n tmp, engine=engine, index=\"index\", gather_statistics=True\n ).compute()\n # convert categorical to plain int just to pass assert\n out[\"lon\"] = out.lon.astype(\"int64\")\n # sort required since partitioning breaks index order\n assert_eq(\n out.sort_values(\"value\"), pd.concat([df0, df1])[out.columns], check_index=False\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_test_partition_on_cats.assert_set_df_b_cat_categ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_test_partition_on_cats.assert_set_df_b_cat_categ", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 655, "end_line": 667, "span_ids": ["test_partition_on_cats"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_on_cats(tmpdir, engine):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\"], engine=engine)\n df = dd.read_parquet(tmp, engine=engine)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_pyarrow_test_partition_on_cats_pyarrow.assert_set_df_b_cat_categ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_pyarrow_test_partition_on_cats_pyarrow.assert_set_df_b_cat_categ", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 699, "end_line": 714, "span_ids": ["test_partition_on_cats_pyarrow"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\"meta\", [False, True])\n@pytest.mark.parametrize(\"stats\", [False, True])\ndef test_partition_on_cats_pyarrow(tmpdir, stats, meta):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\"], engine=\"pyarrow\", write_metadata_file=meta)\n df = dd.read_parquet(tmp, engine=\"pyarrow\", gather_statistics=stats)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_2_test_partition_on_cats_2.assert_set_df_cat_categor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_cats_2_test_partition_on_cats_2.assert_set_df_cat_categor", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 689, "end_line": 713, "span_ids": ["test_partition_on_cats_2"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_on_cats_2(tmpdir, engine):\n tmp = str(tmpdir)\n d = pd.DataFrame(\n {\n \"a\": np.random.rand(50),\n \"b\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n \"c\": np.random.choice([\"x\", \"y\", \"z\"], size=50),\n }\n )\n d = dd.from_pandas(d, 2)\n d.to_parquet(tmp, partition_on=[\"b\", \"c\"], engine=engine)\n df = dd.read_parquet(tmp, engine=engine)\n assert set(df.b.cat.categories) == {\"x\", \"y\", \"z\"}\n assert set(df.c.cat.categories) == {\"x\", \"y\", \"z\"}\n\n df = dd.read_parquet(tmp, columns=[\"a\", \"c\"], engine=engine)\n assert set(df.c.cat.categories) == {\"x\", \"y\", \"z\"}\n assert \"b\" not in df.columns\n assert_eq(df, df.compute())\n df = dd.read_parquet(tmp, index=\"c\", engine=engine)\n assert set(df.index.categories) == {\"x\", \"y\", \"z\"}\n assert \"c\" not in df.columns\n # series\n df = dd.read_parquet(tmp, columns=\"b\", engine=engine)\n assert set(df.cat.categories) == {\"x\", \"y\", \"z\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_wo_index_test_append_wo_index.assert_eq_df_set_index_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_wo_index_test_append_wo_index.assert_eq_df_set_index_f", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 716, "end_line": 743, "span_ids": ["test_append_wo_index"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_wo_index(tmpdir, engine):\n \"\"\"Test append with write_index=False.\"\"\"\n tmp = str(tmpdir.join(\"tmp1.parquet\"))\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)\n assert \"Appended columns\" in str(excinfo.value)\n\n tmp = str(tmpdir.join(\"tmp2.parquet\"))\n ddf1.to_parquet(tmp, write_index=False, engine=engine)\n ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)\n\n ddf3 = dd.read_parquet(tmp, index=\"f\", engine=engine)\n assert_eq(df.set_index(\"f\"), ddf3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_overlapping_divisions_test_append_overlapping_divisions.ddf2_to_parquet_tmp_engi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_overlapping_divisions_test_append_overlapping_divisions.ddf2_to_parquet_tmp_engi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 746, "end_line": 768, "span_ids": ["test_append_overlapping_divisions"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_overlapping_divisions(tmpdir, engine):\n \"\"\"Test raising of error when divisions overlapping.\"\"\"\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n half = len(df) // 2\n ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)\n ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended divisions\" in str(excinfo.value)\n\n ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_different_columns_test_append_different_columns.assert_Appended_dtypes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_different_columns_test_append_different_columns.assert_Appended_dtypes_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 771, "end_line": 790, "span_ids": ["test_append_different_columns"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_different_columns(tmpdir, engine):\n \"\"\"Test raising of error when non equal columns.\"\"\"\n tmp = str(tmpdir)\n df1 = pd.DataFrame({\"i32\": np.arange(100, dtype=np.int32)})\n df2 = pd.DataFrame({\"i64\": np.arange(100, dtype=np.int64)})\n df3 = pd.DataFrame({\"i32\": np.arange(100, dtype=np.int64)})\n\n ddf1 = dd.from_pandas(df1, chunksize=2)\n ddf2 = dd.from_pandas(df2, chunksize=2)\n ddf3 = dd.from_pandas(df3, chunksize=2)\n\n ddf1.to_parquet(tmp, engine=engine)\n\n with pytest.raises(ValueError) as excinfo:\n ddf2.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended columns\" in str(excinfo.value)\n\n with pytest.raises(ValueError) as excinfo:\n ddf3.to_parquet(tmp, engine=engine, append=True)\n assert \"Appended dtypes\" in str(excinfo.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_ordering_test_ordering.assert_eq_ddf_ddf2_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_ordering_test_ordering.assert_eq_ddf_ddf2_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 793, "end_line": 809, "span_ids": ["test_ordering"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines_xfail\ndef test_ordering(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [10, 20, 30], \"c\": [100, 200, 300]},\n index=pd.Index([-1, -2, -3], name=\"myindex\"),\n columns=[\"c\", \"a\", \"b\"],\n )\n ddf = dd.from_pandas(df, npartitions=2)\n dd.to_parquet(ddf, tmp, engine=write_engine)\n\n if read_engine == \"fastparquet\":\n pf = fastparquet.ParquetFile(tmp)\n assert pf.columns == [\"myindex\", \"c\", \"a\", \"b\"]\n\n ddf2 = dd.read_parquet(tmp, index=\"myindex\", engine=read_engine)\n assert_eq(ddf, ddf2, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_custom_columns_test_read_parquet_custom_columns.assert_eq_df_f_i32_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_custom_columns_test_read_parquet_custom_columns.assert_eq_df_f_i32_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 864, "end_line": 881, "span_ids": ["test_read_parquet_custom_columns"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_parquet_custom_columns(tmpdir, engine):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\"i32\": np.arange(1000, dtype=np.int32), \"f\": np.arange(1000, dtype=np.float64)}\n )\n df = dd.from_pandas(data, chunksize=50)\n df.to_parquet(tmp, engine=engine)\n\n df2 = dd.read_parquet(tmp, columns=[\"i32\", \"f\"], engine=engine)\n assert_eq(df[[\"i32\", \"f\"]], df2, check_index=False)\n\n fns = glob.glob(os.path.join(tmp, \"*.parquet\"))\n df2 = dd.read_parquet(fns, columns=[\"i32\"], engine=engine).compute()\n df2.sort_values(\"i32\", inplace=True)\n assert_eq(df[[\"i32\"]], df2, check_index=False, check_divisions=False)\n\n df3 = dd.read_parquet(tmp, columns=[\"f\", \"i32\"], engine=engine)\n assert_eq(df[[\"f\", \"i32\"]], df3, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_test_categories.with_pytest_raises_Value.ddf2.dd_read_parquet_fn_categ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_test_categories.with_pytest_raises_Value.ddf2.dd_read_parquet_fn_categ", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 889, "end_line": 915, "span_ids": ["test_categories"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categories(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": list(\"caaab\")})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf[\"y\"] = ddf.y.astype(\"category\")\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, categories=[\"y\"], engine=engine)\n\n # Shouldn't need to specify categories explicitly\n ddf3 = dd.read_parquet(fn, engine=engine)\n assert_eq(ddf3, ddf2)\n\n with pytest.raises(NotImplementedError):\n ddf2.y.cat.categories\n assert set(ddf2.y.compute().cat.categories) == {\"a\", \"b\", \"c\"}\n cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()\n assert cats_set.tolist() == [\"a\", \"c\", \"a\", \"b\"]\n\n if engine == \"fastparquet\":\n assert_eq(ddf.y, ddf2.y, check_names=False)\n with pytest.raises(TypeError):\n # attempt to load as category that which is not so encoded\n ddf2 = dd.read_parquet(fn, categories=[\"x\"], engine=engine).compute()\n\n with pytest.raises((ValueError, FutureWarning)):\n # attempt to load as category unknown column\n ddf2 = dd.read_parquet(fn, categories=[\"foo\"], engine=engine)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_partition_test_empty_partition.assert_eq_sol_ddf3_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_empty_partition_test_empty_partition.assert_eq_sol_ddf3_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 918, "end_line": 929, "span_ids": ["test_empty_partition"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_partition(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10)})\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf2 = ddf[ddf.a <= 5]\n ddf2.to_parquet(fn, engine=engine)\n\n ddf3 = dd.read_parquet(fn, engine=engine)\n assert ddf3.npartitions < 5\n sol = ddf2.compute()\n assert_eq(sol, ddf3, check_names=False, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp_index_test_to_parquet_default_writes_nulls.assert_table_1_null_coun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp_index_test_to_parquet_default_writes_nulls.assert_table_1_null_coun", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1028, "end_line": 1048, "span_ids": ["test_to_parquet_default_writes_nulls", "test_timestamp_index"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_timestamp_index(tmpdir, engine):\n fn = str(tmpdir)\n df = dd._compat.makeTimeDataFrame()\n df.index.name = \"foo\"\n ddf = dd.from_pandas(df, npartitions=5)\n ddf.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, engine=engine)\n assert_eq(ddf, ddf2)\n\n\n@FASTPARQUET_MARK\n@PYARROW_MARK\ndef test_to_parquet_default_writes_nulls(tmpdir):\n fn = str(tmpdir.join(\"test.parquet\"))\n\n df = pd.DataFrame({\"c1\": [1.0, np.nan, 2, np.nan, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n ddf.to_parquet(fn)\n table = pq.read_table(fn)\n assert table[1].null_count == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default.with_pytest_raises_ValueE.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default.with_pytest_raises_ValueE.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1082, "end_line": 1118, "span_ids": ["test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_LE_MARK\ndef test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):\n df = pd.DataFrame(\n {\"partition_column\": [0, 0, 1, 1], \"strings\": [\"a\", \"b\", None, None]}\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n # In order to allow pyarrow to write an inconsistent schema,\n # we need to avoid writing the _metadata file (will fail >0.17.1)\n # and need to avoid schema inference (i.e. use `schema=None`)\n ddf.to_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n partition_on=[\"partition_column\"],\n write_metadata_file=False,\n schema=None,\n )\n\n # Test that schema is not validated by default\n # (shouldn't raise error with legacy dataset)\n dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-legacy\",\n gather_statistics=False,\n ).compute()\n\n # Test that read fails when validate_schema=True\n # Note: This fails differently for pyarrow.dataset api\n with pytest.raises(ValueError) as e_info:\n dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-legacy\",\n gather_statistics=False,\n dataset={\"validate_schema\": True},\n ).compute()\n assert e_info.message.contains(\"ValueError: Schema in partition\")\n assert e_info.message.contains(\"was different\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema_test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1090, "end_line": 1175, "span_ids": ["test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema"], "tokens": 817}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(\n tmpdir,\n):\n # Data types to test: strings, arrays, ints, timezone aware timestamps\n in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]\n out_arrays = [[0, 1, 2], [3, 4], None, None]\n in_strings = [\"a\", \"b\", np.nan, np.nan]\n out_strings = [\"a\", \"b\", None, None]\n tstamp = pd.Timestamp(1513393355, unit=\"s\")\n in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]\n out_tstamps = [\n # Timestamps come out in numpy.datetime64 format\n tstamp.to_datetime64(),\n tstamp.to_datetime64(),\n np.datetime64(\"NaT\"),\n np.datetime64(\"NaT\"),\n ]\n timezone = \"US/Eastern\"\n tz_tstamp = pd.Timestamp(1513393355, unit=\"s\", tz=timezone)\n in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]\n out_tz_tstamps = [\n # Timezones do not make it through a write-read cycle.\n tz_tstamp.tz_convert(None).to_datetime64(),\n tz_tstamp.tz_convert(None).to_datetime64(),\n np.datetime64(\"NaT\"),\n np.datetime64(\"NaT\"),\n ]\n\n df = pd.DataFrame(\n {\n \"partition_column\": [0, 0, 1, 1],\n \"arrays\": in_arrays,\n \"strings\": in_strings,\n \"tstamps\": in_tstamps,\n \"tz_tstamps\": in_tz_tstamps,\n }\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n schema = pa.schema(\n [\n (\"arrays\", pa.list_(pa.int64())),\n (\"strings\", pa.string()),\n (\"tstamps\", pa.timestamp(\"ns\")),\n (\"tz_tstamps\", pa.timestamp(\"ns\", timezone)),\n (\"partition_column\", pa.int64()),\n ]\n )\n ddf.to_parquet(\n str(tmpdir), engine=\"pyarrow\", partition_on=\"partition_column\", schema=schema\n )\n ddf_after_write = (\n dd.read_parquet(str(tmpdir), engine=\"pyarrow\", gather_statistics=False)\n .compute()\n .reset_index(drop=True)\n )\n\n # Check array support\n arrays_after_write = ddf_after_write.arrays.values\n for i in range(len(df)):\n assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])\n\n # Check datetime support\n tstamps_after_write = ddf_after_write.tstamps.values\n for i in range(len(df)):\n # Need to test NaT separately\n if np.isnat(tstamps_after_write[i]):\n assert np.isnat(out_tstamps[i])\n else:\n assert tstamps_after_write[i] == out_tstamps[i]\n\n # Check timezone aware datetime support\n tz_tstamps_after_write = ddf_after_write.tz_tstamps.values\n for i in range(len(df)):\n # Need to test NaT separately\n if np.isnat(tz_tstamps_after_write[i]):\n assert np.isnat(out_tz_tstamps[i])\n else:\n assert tz_tstamps_after_write[i] == out_tz_tstamps[i]\n\n # Check string support\n assert np.array_equal(ddf_after_write.strings.values, out_strings)\n\n # Check partition column\n assert np.array_equal(ddf_after_write.partition_column, df.partition_column)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_schema_inference_test_pyarrow_schema_inference.if_index_and_engine_f.else_.assert_eq_df_df_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_schema_inference_test_pyarrow_schema_inference.if_index_and_engine_f.else_.assert_eq_df_df_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1209, "end_line": 1252, "span_ids": ["test_pyarrow_schema_inference"], "tokens": 391}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\"index\", [False, True])\n@pytest.mark.parametrize(\"schema\", [\"infer\", \"complex\"])\ndef test_pyarrow_schema_inference(tmpdir, index, engine, schema):\n if schema == \"complex\":\n schema = {\"index\": pa.string(), \"amount\": pa.int64()}\n\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"index\": [\"1\", \"2\", \"3\", \"2\", \"3\", \"1\", \"4\"],\n \"date\": pd.to_datetime(\n [\n \"2017-01-01\",\n \"2017-01-01\",\n \"2017-01-01\",\n \"2017-01-02\",\n \"2017-01-02\",\n \"2017-01-06\",\n \"2017-01-09\",\n ]\n ),\n \"amount\": [100, 200, 300, 400, 500, 600, 700],\n },\n index=range(7, 14),\n )\n if index:\n df = dd.from_pandas(df, npartitions=2).set_index(\"index\")\n else:\n df = dd.from_pandas(df, npartitions=2)\n\n df.to_parquet(tmpdir, engine=\"pyarrow\", schema=schema)\n df_out = dd.read_parquet(tmpdir, engine=engine)\n df_out.compute()\n\n if index and engine == \"fastparquet\":\n # Fastparquet fails to detect int64 from _metadata\n df_out[\"amount\"] = df_out[\"amount\"].astype(\"int64\")\n\n # Fastparquet not handling divisions for\n # pyarrow-written dataset with string index\n assert_eq(df, df_out, check_divisions=False)\n else:\n assert_eq(df, df_out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_duplicates_test_partition_on_duplicates.for_root_dirs_files_in_.for_file_in_files_.assert_file_in_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_duplicates_test_partition_on_duplicates.for_root_dirs_files_in_.for_file_in_files_.assert_file_in_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1153, "end_line": 1178, "span_ids": ["test_partition_on_duplicates"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_on_duplicates(tmpdir, engine):\n # https://github.com/dask/dask/issues/6445\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a1\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"a2\": np.random.choice([\"X\", \"Y\", \"Z\"], size=100),\n \"data\": np.random.random(size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n\n for _ in range(2):\n d.to_parquet(tmpdir, partition_on=[\"a1\", \"a2\"], engine=engine)\n\n out = dd.read_parquet(tmpdir, engine=engine).compute()\n\n assert len(df) == len(out)\n for root, dirs, files in os.walk(tmpdir):\n for file in files:\n assert file in (\n \"part.0.parquet\",\n \"part.1.parquet\",\n \"_common_metadata\",\n \"_metadata\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_string_test_partition_on_string.for_val_in_df_aa_unique_.assert_set_df_bb_df_aa_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_string_test_partition_on_string.for_val_in_df_aa_unique_.assert_set_df_bb_df_aa_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1281, "end_line": 1303, "span_ids": ["test_partition_on_string"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\"partition_on\", [\"aa\", [\"aa\"]])\ndef test_partition_on_string(tmpdir, partition_on):\n tmpdir = str(tmpdir)\n with dask.config.set(scheduler=\"single-threaded\"):\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"aa\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"bb\": np.random.random(size=100),\n \"cc\": np.random.randint(1, 5, size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(\n tmpdir, partition_on=partition_on, write_index=False, engine=\"pyarrow\"\n )\n out = dd.read_parquet(\n tmpdir, index=False, gather_statistics=False, engine=\"pyarrow\"\n )\n out = out.compute()\n for val in df.aa.unique():\n assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_categorical_test_filters_categorical.assert_len_ddftest_read_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_categorical_test_filters_categorical.assert_len_ddftest_read_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1206, "end_line": 1224, "span_ids": ["test_filters_categorical"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_filters_categorical(tmpdir, write_engine, read_engine):\n tmpdir = str(tmpdir)\n cats = [\"2018-01-01\", \"2018-01-02\", \"2018-01-03\", \"2018-01-04\"]\n dftest = pd.DataFrame(\n {\n \"dummy\": [1, 1, 1, 1],\n \"DatePart\": pd.Categorical(cats, categories=cats, ordered=True),\n }\n )\n ddftest = dd.from_pandas(dftest, npartitions=4).set_index(\"dummy\")\n ddftest.to_parquet(tmpdir, partition_on=\"DatePart\", engine=write_engine)\n ddftest_read = dd.read_parquet(\n tmpdir,\n index=\"dummy\",\n engine=read_engine,\n filters=[((\"DatePart\", \"<=\", \"2018-01-02\"))],\n )\n assert len(ddftest_read) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_fiters_file_list_test_fiters_file_list.assert_len_ddf2_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_fiters_file_list_test_fiters_file_list.assert_len_ddf2_0", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1303, "end_line": 1323, "span_ids": ["test_fiters_file_list"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fiters_file_list(tmpdir, engine):\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"aabbccddee\")})\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf.to_parquet(str(tmpdir), engine=engine)\n fils = str(tmpdir.join(\"*.parquet\"))\n ddf_out = dd.read_parquet(\n fils, gather_statistics=True, engine=engine, filters=[(\"x\", \">\", 3)]\n )\n\n assert ddf_out.npartitions == 3\n assert_eq(df[df[\"x\"] > 3], ddf_out.compute(), check_index=False)\n\n # Check that first parition gets filtered for single-path input\n ddf2 = dd.read_parquet(\n str(tmpdir.join(\"part.0.parquet\")),\n gather_statistics=True,\n engine=engine,\n filters=[(\"x\", \">\", 3)],\n )\n assert len(ddf2) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_read_with_filters_test_divisions_read_with_filters.assert_out_divisions_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_read_with_filters_test_divisions_read_with_filters.assert_out_divisions_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1326, "end_line": 1348, "span_ids": ["test_divisions_read_with_filters"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divisions_read_with_filters(tmpdir):\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n tmpdir = str(tmpdir)\n # generate dataframe\n size = 100\n categoricals = []\n for value in [\"a\", \"b\", \"c\", \"d\"]:\n categoricals += [value] * int(size / 4)\n df = pd.DataFrame(\n {\n \"a\": categoricals,\n \"b\": np.random.random(size=size),\n \"c\": np.random.randint(1, 5, size=size),\n }\n )\n d = dd.from_pandas(df, npartitions=4)\n # save it\n d.to_parquet(tmpdir, write_index=True, partition_on=[\"a\"], engine=\"fastparquet\")\n # read it\n out = dd.read_parquet(tmpdir, engine=\"fastparquet\", filters=[(\"a\", \"==\", \"b\")])\n # test it\n expected_divisions = (25, 49)\n assert out.divisions == expected_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_are_known_read_with_filters_test_divisions_are_known_read_with_filters.assert_out_divisions_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_are_known_read_with_filters_test_divisions_are_known_read_with_filters.assert_out_divisions_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1351, "end_line": 1370, "span_ids": ["test_divisions_are_known_read_with_filters"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divisions_are_known_read_with_filters(tmpdir):\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n tmpdir = str(tmpdir)\n # generate dataframe\n df = pd.DataFrame(\n {\n \"unique\": [0, 0, 1, 1, 2, 2, 3, 3],\n \"id\": [\"id1\", \"id2\", \"id1\", \"id2\", \"id1\", \"id2\", \"id1\", \"id2\"],\n },\n index=[0, 0, 1, 1, 2, 2, 3, 3],\n )\n d = dd.from_pandas(df, npartitions=2)\n # save it\n d.to_parquet(tmpdir, partition_on=[\"id\"], engine=\"fastparquet\")\n # read it\n out = dd.read_parquet(tmpdir, engine=\"fastparquet\", filters=[(\"id\", \"==\", \"id1\")])\n # test it\n assert out.known_divisions\n expected_divisions = (0, 2, 3)\n assert out.divisions == expected_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_from_fastparquet_parquetfile_test_read_from_fastparquet_parquetfile.with_pytest_raises_Assert.out.dd_read_parquet_pq_f_eng": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_from_fastparquet_parquetfile_test_read_from_fastparquet_parquetfile.with_pytest_raises_Assert.out.dd_read_parquet_pq_f_eng", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1546, "end_line": 1574, "span_ids": ["test_read_from_fastparquet_parquetfile"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@FASTPARQUET_MARK\n@pytest.mark.xfail(reason=\"No longer accept ParquetFile objects\")\ndef test_read_from_fastparquet_parquetfile(tmpdir):\n fn = str(tmpdir)\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(fn, partition_on=[\"a\"], engine=\"fastparquet\")\n\n pq_f = fastparquet.ParquetFile(fn)\n\n # OK with no filters\n out = dd.read_parquet(pq_f).compute()\n for val in df.a.unique():\n assert set(df.b[df.a == val]) == set(out.b[out.a == val])\n\n # OK with filters\n out = dd.read_parquet(pq_f, filters=[(\"a\", \"==\", \"B\")]).compute()\n assert set(df.b[df.a == \"B\"]) == set(out.b)\n\n # Engine should not be set to 'pyarrow'\n with pytest.raises(AssertionError):\n out = dd.read_parquet(pq_f, engine=\"pyarrow\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_lazy_test_to_parquet_lazy.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_lazy_test_to_parquet_lazy.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1404, "end_line": 1418, "span_ids": ["test_to_parquet_lazy"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"scheduler\", [\"threads\", \"processes\"])\ndef test_to_parquet_lazy(tmpdir, scheduler, engine):\n tmpdir = str(tmpdir)\n df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1.0, 2.0, 3.0, 4.0]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n value = ddf.to_parquet(tmpdir, compute=False, engine=engine)\n\n assert hasattr(value, \"dask\")\n value.compute(scheduler=scheduler)\n assert os.path.exists(tmpdir)\n\n ddf2 = dd.read_parquet(tmpdir, engine=engine)\n\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp96_test_timestamp96.assert_eq_out_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timestamp96_test_timestamp96.assert_eq_out_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1640, "end_line": 1649, "span_ids": ["test_timestamp96"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@FASTPARQUET_MARK\ndef test_timestamp96(tmpdir):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [pd.to_datetime(\"now\", utc=True)]})\n ddf = dd.from_pandas(df, 1)\n ddf.to_parquet(fn, write_index=False, times=\"int96\")\n pf = fastparquet.ParquetFile(fn)\n assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96\n out = dd.read_parquet(fn, index=False).compute()\n assert_eq(out, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_drill_scheme_test_drill_scheme.assert_np_unique_out_dir": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_drill_scheme_test_drill_scheme.assert_np_unique_out_dir", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1606, "end_line": 1626, "span_ids": ["test_drill_scheme"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@FASTPARQUET_MARK\ndef test_drill_scheme(tmpdir):\n fn = str(tmpdir)\n N = 5\n df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate([\"a\", \"b\", \"c\"])})\n df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate([\"a\", \"b\", \"c\"])})\n files = []\n for d in [\"test_data1\", \"test_data2\"]:\n dn = os.path.join(fn, d)\n if not os.path.exists(dn):\n os.mkdir(dn)\n files.append(os.path.join(dn, \"data1.parq\"))\n\n fastparquet.write(files[0], df1)\n fastparquet.write(files[1], df2)\n\n df = dd.read_parquet(files)\n assert \"dir0\" in df.columns\n out = df.compute()\n assert \"dir0\" in out\n assert (np.unique(out.dir0) == [\"test_data1\", \"test_data2\"]).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_select_cats_test_parquet_select_cats.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_select_cats_test_parquet_select_cats.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1456, "end_line": 1474, "span_ids": ["test_parquet_select_cats"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parquet_select_cats(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame(\n {\n \"categories\": pd.Series(\n np.random.choice([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"], size=100),\n dtype=\"category\",\n ),\n \"ints\": pd.Series(list(range(0, 100)), dtype=\"int\"),\n \"floats\": pd.Series(list(range(0, 100)), dtype=\"float\"),\n }\n )\n\n ddf = dd.from_pandas(df, 1)\n ddf.to_parquet(fn, engine=engine)\n rddf = dd.read_parquet(fn, columns=[\"ints\"], engine=engine)\n assert list(rddf.columns) == [\"ints\"]\n rddf = dd.read_parquet(fn, engine=engine)\n assert list(rddf.columns) == list(df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_name_test_columns_name.assert_eq_result_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_columns_name_test_columns_name.assert_eq_result_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1650, "end_line": 1660, "span_ids": ["test_columns_name"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_columns_name(tmpdir, engine):\n if engine == \"fastparquet\" and fastparquet_version <= parse_version(\"0.3.1\"):\n pytest.skip(\"Fastparquet does not write column_indexes up to 0.3.1\")\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"A\": [1, 2]}, index=pd.Index([\"a\", \"b\"], name=\"idx\"))\n df.columns.name = \"cols\"\n ddf = dd.from_pandas(df, 2)\n\n ddf.to_parquet(tmp_path, engine=engine)\n result = dd.read_parquet(tmp_path, engine=engine, index=[\"idx\"])\n assert_eq(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_check_compression_check_compression.if_engine_fastparquet.else_.for_i_in_range_metadata_n.for_j_in_range_len_names_.if_compression_is_None_.else_.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_check_compression_check_compression.if_engine_fastparquet.else_.for_i_in_range_metadata_n.for_j_in_range_len_names_.if_compression_is_None_.else_.assert_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1490, "end_line": 1516, "span_ids": ["check_compression"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_compression(engine, filename, compression):\n if engine == \"fastparquet\":\n pf = fastparquet.ParquetFile(filename)\n md = pf.fmd.row_groups[0].columns[0].meta_data\n if compression is None:\n assert md.total_compressed_size == md.total_uncompressed_size\n else:\n assert md.total_compressed_size != md.total_uncompressed_size\n else:\n metadata = pa.parquet.ParquetDataset(filename).metadata\n names = metadata.schema.names\n for i in range(metadata.num_row_groups):\n row_group = metadata.row_group(i)\n for j in range(len(names)):\n column = row_group.column(j)\n if compression is None:\n assert (\n column.total_compressed_size == column.total_uncompressed_size\n )\n else:\n compress_expect = compression\n if compression == \"default\":\n compress_expect = \"snappy\"\n assert compress_expect.lower() == column.compression.lower()\n assert (\n column.total_compressed_size != column.total_uncompressed_size\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_compression_test_writing_parquet_with_compression.check_compression_engine_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_compression_test_writing_parquet_with_compression.check_compression_engine_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1519, "end_line": 1532, "span_ids": ["test_writing_parquet_with_compression"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression,\", [\"default\", None, \"gzip\", \"snappy\"])\ndef test_writing_parquet_with_compression(tmpdir, compression, engine):\n fn = str(tmpdir)\n if compression in [\"snappy\", \"default\"]:\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 10, \"y\": [1, 2, 3] * 10})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf.to_parquet(fn, compression=compression, engine=engine)\n out = dd.read_parquet(fn, engine=engine)\n assert_eq(out, ddf)\n check_compression(engine, fn, compression)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_partition_on_and_compression_test_writing_parquet_with_partition_on_and_compression.check_compression_engine_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_partition_on_and_compression_test_writing_parquet_with_partition_on_and_compression.check_compression_engine_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1535, "end_line": 1546, "span_ids": ["test_writing_parquet_with_partition_on_and_compression"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression,\", [\"default\", None, \"gzip\", \"snappy\"])\ndef test_writing_parquet_with_partition_on_and_compression(tmpdir, compression, engine):\n fn = str(tmpdir)\n if compression in [\"snappy\", \"default\"]:\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 10, \"y\": [1, 2, 3] * 10})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf.to_parquet(fn, compression=compression, engine=engine, partition_on=[\"x\"])\n check_compression(engine, fn, compression)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pandas_metadata_pandas_metadata.return.request_param": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pandas_metadata_pandas_metadata.return.request_param", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1549, "end_line": 1623, "span_ids": ["pandas_metadata"], "tokens": 452}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture(\n params=[\n # fastparquet 0.1.3\n {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"idx\"],\n \"pandas_version\": \"0.21.0\",\n },\n # pyarrow 0.7.1\n {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"idx\"],\n \"pandas_version\": \"0.21.0\",\n },\n # pyarrow 0.8.0\n {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"idx\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n },\n # TODO: fastparquet update\n ]\n)\ndef pandas_metadata(request):\n return request.param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_test_parse_pandas_metadata.assert_isinstance_mapping": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_test_parse_pandas_metadata.assert_isinstance_mapping", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1626, "end_line": 1640, "span_ids": ["test_parse_pandas_metadata"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_pandas_metadata(pandas_metadata):\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(\n pandas_metadata\n )\n assert index_names == [\"idx\"]\n assert column_names == [\"A\"]\n assert column_index_names == [None]\n\n # for new pyarrow\n if pandas_metadata[\"index_columns\"] == [\"__index_level_0__\"]:\n assert mapping == {\"__index_level_0__\": \"idx\", \"A\": \"A\"}\n else:\n assert mapping == {\"idx\": \"idx\", \"A\": \"A\"}\n\n assert isinstance(mapping, dict)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_null_index_test_parse_pandas_metadata_null_index.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_null_index_test_parse_pandas_metadata_null_index.None_9", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1643, "end_line": 1708, "span_ids": ["test_parse_pandas_metadata_null_index"], "tokens": 492}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_pandas_metadata_null_index():\n # pyarrow 0.7.1 None for index\n e_index_names = [None]\n e_column_names = [\"x\"]\n e_mapping = {\"__index_level_0__\": None, \"x\": \"x\"}\n e_column_index_names = [None]\n\n md = {\n \"columns\": [\n {\n \"metadata\": None,\n \"name\": \"x\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"metadata\": None,\n \"name\": \"__index_level_0__\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)\n assert index_names == e_index_names\n assert column_names == e_column_names\n assert mapping == e_mapping\n assert column_index_names == e_column_index_names\n\n # pyarrow 0.8.0 None for index\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"x\",\n \"metadata\": None,\n \"name\": \"x\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": None,\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)\n assert index_names == e_index_names\n assert column_names == e_column_names\n assert mapping == e_mapping\n assert column_index_names == e_column_index_names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_no_metadata_test_read_no_metadata.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_no_metadata_test_read_no_metadata.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1884, "end_line": 1896, "span_ids": ["test_read_no_metadata"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_read_no_metadata(tmpdir, engine):\n # use pyarrow.parquet to create a parquet file without\n # pandas metadata\n tmp = str(tmpdir) + \"table.parq\"\n\n table = pa.Table.from_arrays(\n [pa.array([1, 2, 3]), pa.array([3, 4, 5])], names=[\"A\", \"B\"]\n )\n pq.write_table(table, tmp)\n result = dd.read_parquet(tmp, engine=engine)\n expected = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [3, 4, 5]})\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_duplicate_index_columns_test_parse_pandas_metadata_duplicate_index_columns.assert_column_index_names": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_duplicate_index_columns_test_parse_pandas_metadata_duplicate_index_columns.assert_column_index_names", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1726, "end_line": 1765, "span_ids": ["test_parse_pandas_metadata_duplicate_index_columns"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_pandas_metadata_duplicate_index_columns():\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(md)\n assert index_names == [\"A\"]\n assert column_names == [\"A\"]\n assert storage_name_mapping == {\"__index_level_0__\": \"A\", \"A\": \"A\"}\n assert column_index_names == [None]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_column_with_index_name_test_parse_pandas_metadata_column_with_index_name.assert_column_index_names": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parse_pandas_metadata_column_with_index_name_test_parse_pandas_metadata_column_with_index_name.assert_column_index_names", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1768, "end_line": 1807, "span_ids": ["test_parse_pandas_metadata_column_with_index_name"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_pandas_metadata_column_with_index_name():\n md = {\n \"column_indexes\": [\n {\n \"field_name\": None,\n \"metadata\": {\"encoding\": \"UTF-8\"},\n \"name\": None,\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n }\n ],\n \"columns\": [\n {\n \"field_name\": \"A\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"int64\",\n \"pandas_type\": \"int64\",\n },\n {\n \"field_name\": \"__index_level_0__\",\n \"metadata\": None,\n \"name\": \"A\",\n \"numpy_type\": \"object\",\n \"pandas_type\": \"unicode\",\n },\n ],\n \"index_columns\": [\"__index_level_0__\"],\n \"pandas_version\": \"0.21.0\",\n }\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(md)\n assert index_names == [\"A\"]\n assert column_names == [\"A\"]\n assert storage_name_mapping == {\"__index_level_0__\": \"A\", \"A\": \"A\"}\n assert column_index_names == [None]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_kwargs_test_writing_parquet_with_kwargs.for_val_in_df_a_unique_.assert_set_df_b_df_a_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_kwargs_test_writing_parquet_with_kwargs.for_val_in_df_a_unique_.assert_set_df_b_df_a_v", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1962, "end_line": 1999, "span_ids": ["test_writing_parquet_with_kwargs"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_writing_parquet_with_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n path1 = os.path.join(fn, \"normal\")\n path2 = os.path.join(fn, \"partitioned\")\n pytest.importorskip(\"snappy\")\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n engine_kwargs = {\n \"pyarrow-dataset\": {\n \"compression\": \"snappy\",\n \"coerce_timestamps\": None,\n \"use_dictionary\": True,\n },\n \"fastparquet\": {\"compression\": \"snappy\", \"times\": \"int64\", \"fixed_text\": None},\n }\n engine_kwargs[\"pyarrow-legacy\"] = engine_kwargs[\"pyarrow-dataset\"]\n\n ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])\n out = dd.read_parquet(path1, engine=engine)\n assert_eq(out, ddf, check_index=(engine != \"fastparquet\"))\n\n # Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets\n with dask.config.set(scheduler=\"sync\"):\n ddf.to_parquet(\n path2, engine=engine, partition_on=[\"a\"], **engine_kwargs[engine]\n )\n out = dd.read_parquet(path2, engine=engine).compute()\n for val in df.a.unique():\n assert set(df.b[df.a == val]) == set(out.b[out.a == val])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_unknown_kwargs_test_to_parquet_with_get.assert_eq_result_df_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_writing_parquet_with_unknown_kwargs_test_to_parquet_with_get.assert_eq_result_df_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2023, "end_line": 2049, "span_ids": ["test_to_parquet_with_get", "test_writing_parquet_with_unknown_kwargs"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_writing_parquet_with_unknown_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n\n with pytest.raises(TypeError):\n ddf.to_parquet(fn, engine=engine, unknown_key=\"unknown_value\")\n\n\n@ANY_ENGINE_MARK\ndef test_to_parquet_with_get(tmpdir):\n from dask.multiprocessing import get as mp_get\n\n tmpdir = str(tmpdir)\n\n flag = [False]\n\n def my_get(*args, **kwargs):\n flag[0] = True\n return mp_get(*args, **kwargs)\n\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.to_parquet(tmpdir, compute_kwargs={\"scheduler\": my_get})\n assert flag[0]\n\n result = dd.read_parquet(os.path.join(tmpdir, \"*\"))\n assert_eq(result, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_select_partitioned_column_test_select_partitioned_column.df_partitioned_df_partiti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_select_partitioned_column_test_select_partitioned_column.df_partitioned_df_partiti", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2097, "end_line": 2117, "span_ids": ["test_select_partitioned_column"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_select_partitioned_column(tmpdir, engine):\n pytest.importorskip(\"snappy\")\n\n fn = str(tmpdir)\n size = 20\n d = {\n \"signal1\": np.random.normal(0, 0.3, size=size).cumsum() + 50,\n \"fake_categorical1\": np.random.choice([\"A\", \"B\", \"C\"], size=size),\n \"fake_categorical2\": np.random.choice([\"D\", \"E\", \"F\"], size=size),\n }\n df = dd.from_pandas(pd.DataFrame(d), 2)\n df.to_parquet(\n fn,\n compression=\"snappy\",\n write_index=False,\n engine=engine,\n partition_on=[\"fake_categorical1\", \"fake_categorical2\"],\n )\n\n df_partitioned = dd.read_parquet(fn, engine=engine)\n df_partitioned[df_partitioned.fake_categorical1 == \"A\"].compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_with_tz_test_with_tz.with_warnings_catch_warni.if_engine_fastparquet.assert_eq_df_df2_check_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_with_tz_test_with_tz.with_warnings_catch_warni.if_engine_fastparquet.assert_eq_df_df2_check_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2120, "end_line": 2133, "span_ids": ["test_with_tz"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_with_tz(tmpdir, engine):\n if engine == \"fastparquet\" and fastparquet_version < parse_version(\"0.3.0\"):\n pytest.skip(\"fastparquet<0.3.0 did not support this\")\n\n with warnings.catch_warnings():\n if engine == \"fastparquet\":\n # fastparquet-442\n warnings.simplefilter(\"ignore\", FutureWarning) # pandas 0.25\n fn = str(tmpdir)\n df = pd.DataFrame([[0]], columns=[\"a\"], dtype=\"datetime64[ns, UTC]\")\n df = dd.from_pandas(df, 1)\n df.to_parquet(fn, engine=engine)\n df2 = dd.read_parquet(fn, engine=engine)\n assert_eq(df, df2, check_divisions=False, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_arrow_partitioning_test_arrow_partitioning.ddf_astype_b_np_float": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_arrow_partitioning_test_arrow_partitioning.ddf_astype_b_np_float", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2096, "end_line": 2112, "span_ids": ["test_arrow_partitioning"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_arrow_partitioning(tmpdir):\n # Issue #3518\n path = str(tmpdir)\n data = {\n \"p\": np.repeat(np.arange(3), 2).astype(np.int8),\n \"b\": np.repeat(-1, 6).astype(np.int16),\n \"c\": np.repeat(-2, 6).astype(np.float32),\n \"d\": np.repeat(-3, 6).astype(np.float64),\n }\n pdf = pd.DataFrame(data)\n ddf = dd.from_pandas(pdf, npartitions=2)\n ddf.to_parquet(path, engine=\"pyarrow\", write_index=False, partition_on=\"p\")\n\n ddf = dd.read_parquet(path, index=False, engine=\"pyarrow\")\n\n ddf.astype({\"b\": np.float32}).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_informative_error_messages_test_append_cat_fp.assert_d_x_tolist_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_informative_error_messages_test_append_cat_fp.assert_d_x_tolist_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1968, "end_line": 1988, "span_ids": ["test_append_cat_fp", "test_informative_error_messages"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_informative_error_messages():\n with pytest.raises(ValueError) as info:\n dd.read_parquet(\"foo\", engine=\"foo\")\n\n assert \"foo\" in str(info.value)\n assert \"arrow\" in str(info.value)\n assert \"fastparquet\" in str(info.value)\n\n\ndef test_append_cat_fp(tmpdir, engine):\n path = str(tmpdir)\n # https://github.com/dask/dask/issues/4120\n df = pd.DataFrame({\"x\": [\"a\", \"a\", \"b\", \"a\", \"b\"]})\n df[\"x\"] = df[\"x\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=1)\n\n dd.to_parquet(ddf, path, engine=engine)\n dd.to_parquet(ddf, path, append=True, ignore_divisions=True, engine=engine)\n\n d = dd.read_parquet(path, engine=engine).compute()\n assert d[\"x\"].tolist() == [\"a\", \"a\", \"b\", \"a\", \"b\"] * 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_arrow_test_roundtrip_arrow.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_arrow_test_roundtrip_arrow.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2138, "end_line": 2177, "span_ids": ["test_roundtrip_arrow"], "tokens": 683}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\n \"df\",\n [\n pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]}),\n pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}),\n pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}),\n pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}),\n pytest.param(pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])})),\n pytest.param(pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])})),\n pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000000, 2000000, 1000000]))}), # ms\n pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), # us\n pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"),\n # pd.DataFrame({'x': [3, 2, 1]}).astype('M8[ns]'), # Casting errors\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"),\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"),\n pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]),\n pd.DataFrame(\n {\"x\": [4, 5, 6, 1, 2, 3]}, index=pd.Index([1, 2, 3, 4, 5, 6], name=\"foo\")\n ),\n pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}),\n pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]),\n pd.DataFrame({\"0\": [3, 2, 1]}),\n pd.DataFrame({\"x\": [3, 2, None]}),\n pd.DataFrame({\"-\": [3.0, 2.0, None]}),\n pd.DataFrame({\".\": [3.0, 2.0, None]}),\n pd.DataFrame({\" \": [3.0, 2.0, None]}),\n ],\n)\ndef test_roundtrip_arrow(tmpdir, df):\n # Index will be given a name when preserved as index\n tmp_path = str(tmpdir)\n if not df.index.name:\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n dd.to_parquet(ddf, tmp_path, engine=\"pyarrow\", write_index=True)\n ddf2 = dd.read_parquet(tmp_path, engine=\"pyarrow\", gather_statistics=True)\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_datasets_timeseries_test_pathlib_path.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_datasets_timeseries_test_pathlib_path.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2033, "end_line": 2053, "span_ids": ["test_pathlib_path", "test_datasets_timeseries"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_datasets_timeseries(tmpdir, engine):\n tmp_path = str(tmpdir)\n df = dask.datasets.timeseries(\n start=\"2000-01-01\", end=\"2000-01-10\", freq=\"1d\"\n ).persist()\n df.to_parquet(tmp_path, engine=engine)\n\n df2 = dd.read_parquet(tmp_path, engine=engine)\n assert_eq(df, df2)\n\n\ndef test_pathlib_path(tmpdir, engine):\n import pathlib\n\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n path = pathlib.Path(str(tmpdir))\n ddf.to_parquet(path, engine=engine)\n ddf2 = dd.read_parquet(path, engine=engine)\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_metadata_nthreads_test_pyarrow_metadata_nthreads.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_metadata_nthreads_test_pyarrow_metadata_nthreads.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2243, "end_line": 2252, "span_ids": ["test_pyarrow_metadata_nthreads"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_LE_MARK\ndef test_pyarrow_metadata_nthreads(tmpdir):\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(tmp_path, engine=\"pyarrow\")\n ops = {\"dataset\": {\"metadata_nthreads\": 2}}\n ddf2 = dd.read_parquet(tmp_path, engine=\"pyarrow-legacy\", **ops)\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_large_test_categories_large.assert_eq_sorted_df_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_large_test_categories_large.assert_eq_sorted_df_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2215, "end_line": 2226, "span_ids": ["test_categories_large"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@FASTPARQUET_MARK\ndef test_categories_large(tmpdir, engine):\n # Issue #5112\n fn = str(tmpdir.join(\"parquet_int16.parq\"))\n numbers = np.random.randint(0, 800000, size=1000000)\n df = pd.DataFrame(numbers.T, columns=[\"name\"])\n df.name = df.name.astype(\"category\")\n\n df.to_parquet(fn, engine=\"fastparquet\", compression=\"uncompressed\")\n ddf = dd.read_parquet(fn, engine=engine, categories={\"name\": 80000})\n\n assert_eq(sorted(df.name.cat.categories), sorted(ddf.compute().name.cat.categories))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_pyarrow_test_timeseries_nulls_in_schema_pyarrow.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_pyarrow_test_timeseries_nulls_in_schema_pyarrow.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2346, "end_line": 2389, "span_ids": ["test_timeseries_nulls_in_schema_pyarrow"], "tokens": 383}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_LE_MARK\n@pytest.mark.parametrize(\"numerical\", [True, False])\n@pytest.mark.parametrize(\n \"timestamp\", [\"2000-01-01\", \"2000-01-02\", \"2000-01-03\", \"2000-01-04\"]\n)\ndef test_timeseries_nulls_in_schema_pyarrow(tmpdir, timestamp, numerical):\n tmp_path = str(tmpdir)\n ddf2 = dd.from_pandas(\n pd.DataFrame(\n {\n \"timestamp\": [\n pd.Timestamp(\"2000-01-01\"),\n pd.Timestamp(\"2000-01-02\"),\n pd.Timestamp(\"2000-01-03\"),\n pd.Timestamp(\"2000-01-04\"),\n ],\n \"id\": np.arange(4, dtype=\"float64\"),\n \"name\": [\"cat\", \"dog\", \"bird\", \"cow\"],\n }\n ),\n npartitions=2,\n ).persist()\n if numerical:\n ddf2.id = ddf2.id.where(ddf2.timestamp == timestamp, None)\n ddf2.id = ddf2.id.astype(\"float64\")\n else:\n ddf2.name = ddf2.name.where(ddf2.timestamp == timestamp, None)\n\n # There should be no schema error if you specify a schema on write\n schema = pa.schema(\n [(\"timestamp\", pa.timestamp(\"ns\")), (\"id\", pa.float64()), (\"name\", pa.string())]\n )\n ddf2.to_parquet(tmp_path, schema=schema, write_index=False, engine=\"pyarrow\")\n assert_eq(\n dd.read_parquet(\n tmp_path,\n dataset={\"validate_schema\": True},\n index=False,\n engine=\"pyarrow-legacy\",\n ),\n ddf2,\n check_divisions=False,\n check_index=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_graph_size_pyarrow_test_graph_size_pyarrow.assert_len_pickle_dumps_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_graph_size_pyarrow_test_graph_size_pyarrow.assert_len_pickle_dumps_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2369, "end_line": 2381, "span_ids": ["test_graph_size_pyarrow"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_graph_size_pyarrow(tmpdir, engine):\n import pickle\n\n fn = str(tmpdir)\n\n ddf1 = dask.datasets.timeseries(\n start=\"2000-01-01\", end=\"2000-01-02\", freq=\"60S\", partition_freq=\"1H\"\n )\n\n ddf1.to_parquet(fn, engine=engine)\n ddf2 = dd.read_parquet(fn, engine=engine)\n\n assert len(pickle.dumps(ddf2.__dask_graph__())) < 25000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_test_getitem_optimization.assert_eq_ddf_compute_opt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_test_getitem_optimization.assert_eq_ddf_compute_opt", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2445, "end_line": 2472, "span_ids": ["test_getitem_optimization"], "tokens": 350}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"preserve_index\", [True, False])\n@pytest.mark.parametrize(\"index\", [None, np.random.permutation(2000)])\ndef test_getitem_optimization(tmpdir, engine, preserve_index, index):\n tmp_path_rd = str(tmpdir.mkdir(\"read\"))\n tmp_path_wt = str(tmpdir.mkdir(\"write\"))\n df = pd.DataFrame(\n {\"A\": [1, 2] * 1000, \"B\": [3, 4] * 1000, \"C\": [5, 6] * 1000}, index=index\n )\n df.index.name = \"my_index\"\n ddf = dd.from_pandas(df, 2, sort=False)\n\n ddf.to_parquet(tmp_path_rd, engine=engine, write_index=preserve_index)\n ddf = dd.read_parquet(tmp_path_rd, engine=engine)[\"B\"]\n\n # Write ddf back to disk to check that the round trip\n # preserves the getitem optimization\n out = ddf.to_frame().to_parquet(tmp_path_wt, engine=engine, compute=False)\n dsk = optimize_dataframe_getitem(out.dask, keys=[out.key])\n\n subgraph_rd = hlg_layer(dsk, \"read-parquet\")\n assert isinstance(subgraph_rd, DataFrameIOLayer)\n assert subgraph_rd.columns == [\"B\"]\n assert next(iter(subgraph_rd.dsk.values()))[0].columns == [\"B\"]\n\n subgraph_wt = hlg_layer(dsk, \"to-parquet\")\n assert isinstance(subgraph_wt, Blockwise)\n\n assert_eq(ddf.compute(optimize_graph=False), ddf.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_empty_test_getitem_optimization_empty.assert_subgraph_columns_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_empty_test_getitem_optimization_empty.assert_subgraph_columns_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2472, "end_line": 2483, "span_ids": ["test_getitem_optimization_empty"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_optimization_empty(tmpdir, engine):\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n fn = os.path.join(str(tmpdir))\n ddf.to_parquet(fn, engine=engine)\n\n df2 = dd.read_parquet(fn, columns=[], engine=engine)\n dsk = optimize_dataframe_getitem(df2.dask, keys=[df2._name])\n\n subgraph = next(iter(dsk.layers.values()))\n assert isinstance(subgraph, DataFrameIOLayer)\n assert subgraph.columns == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_getitem_and_nonblockwise_test_optimize_getitem_and_nonblockwise.df2_a_b_rolling_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_getitem_and_nonblockwise_test_optimize_getitem_and_nonblockwise.df2_a_b_rolling_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2646, "end_line": 2656, "span_ids": ["test_optimize_getitem_and_nonblockwise"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@ANY_ENGINE_MARK\ndef test_optimize_getitem_and_nonblockwise(tmpdir):\n path = os.path.join(tmpdir, \"path.parquet\")\n df = pd.DataFrame(\n {\"a\": [3, 4, 2], \"b\": [1, 2, 4], \"c\": [5, 4, 2], \"d\": [1, 2, 3]},\n index=[\"a\", \"b\", \"c\"],\n )\n df.to_parquet(path)\n\n df2 = dd.read_parquet(path)\n df2[[\"a\", \"b\"]].rolling(3).max().compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_and_not_test_optimize_and_not.for_a_b_in_zip_result_e.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_and_not_test_optimize_and_not.for_a_b_in_zip_result_e.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2659, "end_line": 2682, "span_ids": ["test_optimize_and_not"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@ANY_ENGINE_MARK\ndef test_optimize_and_not(tmpdir):\n path = os.path.join(tmpdir, \"path.parquet\")\n df = pd.DataFrame(\n {\"a\": [3, 4, 2], \"b\": [1, 2, 4], \"c\": [5, 4, 2], \"d\": [1, 2, 3]},\n index=[\"a\", \"b\", \"c\"],\n )\n df.to_parquet(path)\n\n df2 = dd.read_parquet(path)\n df2a = df2[\"a\"].groupby(df2[\"c\"]).first().to_delayed()\n df2b = df2[\"b\"].groupby(df2[\"c\"]).first().to_delayed()\n df2c = df2[[\"a\", \"b\"]].rolling(2).max().to_delayed()\n df2d = df2.rolling(2).max().to_delayed()\n (result,) = dask.compute(df2a + df2b + df2c + df2d)\n\n expected = [\n dask.compute(df2a)[0][0],\n dask.compute(df2b)[0][0],\n dask.compute(df2c)[0][0],\n dask.compute(df2d)[0][0],\n ]\n for a, b in zip(result, expected):\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_pandas_chunksize_test_roundtrip_pandas_chunksize.assert_eq_pdf_ddf_read_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_pandas_chunksize_test_roundtrip_pandas_chunksize.assert_eq_pdf_ddf_read_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2708, "end_line": 2726, "span_ids": ["test_roundtrip_pandas_chunksize"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_roundtrip_pandas_chunksize(tmpdir, write_engine, read_engine):\n path = str(tmpdir.join(\"test.parquet\"))\n pdf = df.copy()\n pdf.index.name = \"index\"\n pdf.to_parquet(\n path, engine=\"pyarrow\" if write_engine.startswith(\"pyarrow\") else \"fastparquet\"\n )\n\n ddf_read = dd.read_parquet(\n path,\n engine=read_engine,\n chunksize=\"10 kiB\",\n gather_statistics=True,\n split_row_groups=True,\n index=\"index\",\n )\n\n assert_eq(pdf, ddf_read)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_pandas_fastparquet_partitioned_test_read_pandas_fastparquet_partitioned.assert_len_ddf_read_compu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_pandas_fastparquet_partitioned_test_read_pandas_fastparquet_partitioned.assert_len_ddf_read_compu", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2763, "end_line": 2773, "span_ids": ["test_read_pandas_fastparquet_partitioned"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@FASTPARQUET_MARK\ndef test_read_pandas_fastparquet_partitioned(tmpdir, engine):\n pdf = pd.DataFrame(\n [{\"str\": str(i), \"int\": i, \"group\": \"ABC\"[i % 3]} for i in range(6)]\n )\n path = str(tmpdir)\n pdf.to_parquet(path, partition_cols=[\"group\"], engine=\"fastparquet\")\n ddf_read = dd.read_parquet(path, engine=engine)\n\n assert len(ddf_read[\"group\"].compute()) == 6\n assert len(ddf_read.compute().group) == 6", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filter_nonpartition_columns_test_filter_nonpartition_columns.assert_df_read_time_ma": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filter_nonpartition_columns_test_filter_nonpartition_columns.assert_df_read_time_ma", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2569, "end_line": 2595, "span_ids": ["test_filter_nonpartition_columns"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"gather_statistics\", [None, True])\n@write_read_engines()\ndef test_filter_nonpartition_columns(\n tmpdir, write_engine, read_engine, gather_statistics\n):\n tmpdir = str(tmpdir)\n df_write = pd.DataFrame(\n {\n \"id\": [1, 2, 3, 4] * 4,\n \"time\": np.arange(16),\n \"random\": np.random.choice([\"cat\", \"dog\"], size=16),\n }\n )\n ddf_write = dd.from_pandas(df_write, npartitions=4)\n ddf_write.to_parquet(\n tmpdir, write_index=False, partition_on=[\"id\"], engine=write_engine\n )\n ddf_read = dd.read_parquet(\n tmpdir,\n index=False,\n engine=read_engine,\n gather_statistics=gather_statistics,\n filters=[((\"time\", \"<\", 5))],\n )\n df_read = ddf_read.compute()\n assert len(df_read) == len(df_read[df_read[\"time\"] < 5])\n assert df_read[\"time\"].max() < 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_metadata_nullable_pyarrow_test_pandas_metadata_nullable_pyarrow.assert_eq_ddf1_ddf2_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_metadata_nullable_pyarrow_test_pandas_metadata_nullable_pyarrow.assert_eq_ddf1_ddf2_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2985, "end_line": 3001, "span_ids": ["test_pandas_metadata_nullable_pyarrow"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_pandas_metadata_nullable_pyarrow(tmpdir):\n tmpdir = str(tmpdir)\n\n ddf1 = dd.from_pandas(\n pd.DataFrame(\n {\n \"A\": pd.array([1, None, 2], dtype=\"Int64\"),\n \"B\": pd.array([\"dog\", \"cat\", None], dtype=\"str\"),\n }\n ),\n npartitions=1,\n )\n ddf1.to_parquet(tmpdir, engine=\"pyarrow\")\n ddf2 = dd.read_parquet(tmpdir, engine=\"pyarrow\")\n\n assert_eq(ddf1, ddf2, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow.ArrowEngineWithTimestampClamp_test_pandas_timestamp_overflow_pyarrow.dd_read_parquet_str_tmpdi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow.ArrowEngineWithTimestampClamp_test_pandas_timestamp_overflow_pyarrow.dd_read_parquet_str_tmpdi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3056, "end_line": 3101, "span_ids": ["test_pandas_timestamp_overflow_pyarrow"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_pandas_timestamp_overflow_pyarrow(tmpdir):\n # ... other code\n\n class ArrowEngineWithTimestampClamp(ArrowEngine):\n @classmethod\n def clamp_arrow_datetimes(cls, arrow_table: pa.Table) -> pa.Table:\n \"\"\"Constrain datetimes to be valid for pandas\n\n Since pandas works in ns precision and arrow / parquet defaults to ms\n precision we need to clamp our datetimes to something reasonable\"\"\"\n\n new_columns = []\n for i, col in enumerate(arrow_table.columns):\n if pa.types.is_timestamp(col.type) and (\n col.type.unit in (\"s\", \"ms\", \"us\")\n ):\n multiplier = {\"s\": 1_0000_000_000, \"ms\": 1_000_000, \"us\": 1_000}[\n col.type.unit\n ]\n\n original_type = col.type\n\n series: pd.Series = col.cast(pa.int64()).to_pandas()\n info = np.iinfo(np.dtype(\"int64\"))\n # constrain data to be within valid ranges\n series.clip(\n lower=info.min // multiplier + 1,\n upper=info.max // multiplier,\n inplace=True,\n )\n new_array = pa.array(series, pa.int64())\n new_array = new_array.cast(original_type)\n new_columns.append(new_array)\n else:\n new_columns.append(col)\n\n return pa.Table.from_arrays(new_columns, names=arrow_table.column_names)\n\n @classmethod\n def _arrow_table_to_pandas(\n cls, arrow_table: pa.Table, categories, **kwargs\n ) -> pd.DataFrame:\n fixed_arrow_table = cls.clamp_arrow_datetimes(arrow_table)\n return super()._arrow_table_to_pandas(\n fixed_arrow_table, categories, **kwargs\n )\n\n # this should not fail, but instead produce timestamps that are in the valid range\n dd.read_parquet(str(tmpdir), engine=ArrowEngineWithTimestampClamp).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_preserve_index_test_partitioned_preserve_index.assert_eq_expect_got_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_preserve_index_test_partitioned_preserve_index.assert_eq_expect_got_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3150, "end_line": 3169, "span_ids": ["test_partitioned_preserve_index"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@fp_pandas_xfail\ndef test_partitioned_preserve_index(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n size = 1_000\n npartitions = 4\n b = np.arange(npartitions).repeat(size // npartitions)\n data = pd.DataFrame(\n {\n \"myindex\": np.arange(size),\n \"A\": np.random.random(size=size),\n \"B\": pd.Categorical(b),\n }\n ).set_index(\"myindex\")\n data.index.name = None\n df1 = dd.from_pandas(data, npartitions=npartitions)\n df1.to_parquet(tmp, partition_on=\"B\", engine=write_engine)\n\n expect = data[data[\"B\"] == 1]\n got = dd.read_parquet(tmp, engine=read_engine, filters=[(\"B\", \"==\", 1)])\n assert_eq(expect, got)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_index_test_from_pandas_preserve_none_index.assert_eq_expect_got_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_index_test_from_pandas_preserve_none_index.assert_eq_expect_got_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2980, "end_line": 2995, "span_ids": ["test_from_pandas_preserve_none_index"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_pandas_preserve_none_index(tmpdir, engine):\n if engine.startswith(\"pyarrow\"):\n pytest.importorskip(\"pyarrow\", minversion=\"0.15.0\")\n\n fn = str(tmpdir.join(\"test.parquet\"))\n df = pd.DataFrame({\"a\": [1, 2], \"b\": [4, 5], \"c\": [6, 7]}).set_index(\"c\")\n df.index.name = None\n df.to_parquet(\n fn,\n engine=\"pyarrow\" if engine.startswith(\"pyarrow\") else \"fastparquet\",\n index=True,\n )\n\n expect = pd.read_parquet(fn)\n got = dd.read_parquet(fn, engine=engine)\n assert_eq(expect, got)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_rangeindex_test_from_pandas_preserve_none_rangeindex.assert_eq_df0_df1_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_from_pandas_preserve_none_rangeindex_test_from_pandas_preserve_none_rangeindex.assert_eq_df0_df1_comput", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2951, "end_line": 2961, "span_ids": ["test_from_pandas_preserve_none_rangeindex"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_from_pandas_preserve_none_rangeindex(tmpdir, write_engine, read_engine):\n # See GitHub Issue#6348\n fn = str(tmpdir.join(\"test.parquet\"))\n df0 = pd.DataFrame({\"t\": [1, 2, 3]}, index=pd.RangeIndex(start=1, stop=4))\n df0.to_parquet(\n fn, engine=\"pyarrow\" if write_engine.startswith(\"pyarrow\") else \"fastparquet\"\n )\n\n df1 = dd.read_parquet(fn, engine=read_engine)\n assert_eq(df0, df1.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_illegal_column_name_test_illegal_column_name.assert_null_name_in_str_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_illegal_column_name_test_illegal_column_name.assert_null_name_in_str_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2749, "end_line": 2767, "span_ids": ["test_illegal_column_name"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_illegal_column_name(tmpdir, engine):\n # Make sure user is prevented from preserving a \"None\" index\n # name if there is already a column using the special `null_name`\n null_name = \"__null_dask_index__\"\n fn = str(tmpdir.join(\"test.parquet\"))\n df = pd.DataFrame({\"x\": [1, 2], null_name: [4, 5]}).set_index(\"x\")\n df.index.name = None\n ddf = dd.from_pandas(df, npartitions=2)\n\n # If we don't want to preserve the None index name, the\n # write should work, but the user should be warned\n with pytest.warns(UserWarning, match=null_name):\n ddf.to_parquet(fn, engine=engine, write_index=False)\n\n # If we do want to preserve the None index name, should\n # get a ValueError for having an illegal column name\n with pytest.raises(ValueError) as e:\n ddf.to_parquet(fn, engine=engine)\n assert null_name in str(e.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_test_empty.with_tmpfile_as_f_.assert_pd_dataframe_empty": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_test_empty.with_tmpfile_as_f_.assert_pd_dataframe_empty", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 64, "span_ids": ["test_empty"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty(db):\n from sqlalchemy import Column, Integer, MetaData, Table, create_engine\n\n with tmpfile() as f:\n uri = \"sqlite:///%s\" % f\n metadata = MetaData()\n engine = create_engine(uri)\n table = Table(\n \"empty_table\",\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"col2\", Integer),\n )\n metadata.create_all(engine)\n\n dask_df = read_sql_table(table.name, uri, index_col=\"id\", npartitions=1)\n assert dask_df.index.name == \"id\"\n # The dtype of the empty result might no longer be as expected\n # assert dask_df.col2.dtype == np.dtype(\"int64\")\n pd_dataframe = dask_df.compute()\n assert pd_dataframe.empty is True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_other_schema_test_empty_other_schema.engine_execute_DROP_SCHE": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_empty_other_schema_test_empty_other_schema.engine_execute_DROP_SCHE", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 113, "end_line": 157, "span_ids": ["test_empty_other_schema"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skip(\n reason=\"Requires a postgres server. Sqlite does not support multiple schemas.\"\n)\ndef test_empty_other_schema():\n from sqlalchemy import DDL, Column, Integer, MetaData, Table, create_engine, event\n\n # Database configurations.\n pg_host = \"localhost\"\n pg_port = \"5432\"\n pg_user = \"user\"\n pg_pass = \"pass\"\n pg_db = \"db\"\n db_url = f\"postgresql://{pg_user}:{pg_pass}@{pg_host}:{pg_port}/{pg_db}\"\n\n # Create an empty table in a different schema.\n table_name = \"empty_table\"\n schema_name = \"other_schema\"\n engine = create_engine(db_url)\n metadata = MetaData()\n table = Table(\n table_name,\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"col2\", Integer),\n schema=schema_name,\n )\n # Create the schema and the table.\n event.listen(\n metadata, \"before_create\", DDL(\"CREATE SCHEMA IF NOT EXISTS %s\" % schema_name)\n )\n metadata.create_all(engine)\n\n # Read the empty table from the other schema.\n dask_df = read_sql_table(\n table.name, db_url, index_col=\"id\", schema=table.schema, npartitions=1\n )\n\n # Validate that the retrieved table is empty.\n assert dask_df.index.name == \"id\"\n assert dask_df.col2.dtype == np.dtype(\"int64\")\n pd_dataframe = dask_df.compute()\n assert pd_dataframe.empty is True\n\n # Drop the schema and the table.\n engine.execute(\"DROP SCHEMA IF EXISTS %s CASCADE\" % schema_name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_needs_rational_test_needs_rational.with_tmpfile_as_f_.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_needs_rational_test_needs_rational.with_tmpfile_as_f_.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 165, "end_line": 215, "span_ids": ["test_needs_rational"], "tokens": 461}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_needs_rational(db):\n import datetime\n\n now = datetime.datetime.now()\n d = datetime.timedelta(seconds=1)\n df = pd.DataFrame(\n {\n \"a\": list(\"ghjkl\"),\n \"b\": [now + i * d for i in range(5)],\n \"c\": [True, True, False, True, True],\n }\n )\n df = pd.concat(\n [\n df,\n pd.DataFrame(\n [\n {\"a\": \"x\", \"b\": now + d * 1000, \"c\": None},\n {\"a\": None, \"b\": now + d * 1001, \"c\": None},\n ]\n ),\n ]\n )\n with tmpfile() as f:\n uri = \"sqlite:///%s\" % f\n df.to_sql(\"test\", uri, index=False, if_exists=\"replace\")\n\n # one partition contains NULL\n data = read_sql_table(\"test\", uri, npartitions=2, index_col=\"b\")\n df2 = df.set_index(\"b\")\n assert_eq(data, df2.astype({\"c\": bool})) # bools are coerced\n\n # one partition contains NULL, but big enough head\n data = read_sql_table(\"test\", uri, npartitions=2, index_col=\"b\", head_rows=12)\n df2 = df.set_index(\"b\")\n assert_eq(data, df2)\n\n # empty partitions\n data = read_sql_table(\"test\", uri, npartitions=20, index_col=\"b\")\n part = data.get_partition(12).compute()\n assert part.dtypes.tolist() == [\"O\", bool]\n assert part.empty\n df2 = df.set_index(\"b\")\n assert_eq(data, df2.astype({\"c\": bool}))\n\n # explicit meta\n data = read_sql_table(\"test\", uri, npartitions=2, index_col=\"b\", meta=df2[:0])\n part = data.get_partition(1).compute()\n assert part.dtypes.tolist() == [\"O\", \"O\"]\n df2 = df.set_index(\"b\")\n assert_eq(data, df2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_simple_test_npartitions.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_simple_test_npartitions.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 218, "end_line": 265, "span_ids": ["test_npartitions", "test_simple"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_simple(db):\n # single chunk\n data = read_sql_table(\"test\", db, npartitions=2, index_col=\"number\").compute()\n assert (data.name == df.name).all()\n assert data.index.name == \"number\"\n assert_eq(data, df)\n\n\ndef test_npartitions(db):\n data = read_sql_table(\n \"test\", db, columns=list(df.columns), npartitions=2, index_col=\"number\"\n )\n assert len(data.divisions) == 3\n assert (data.name.compute() == df.name).all()\n data = read_sql_table(\n \"test\", db, columns=[\"name\"], npartitions=6, index_col=\"number\"\n )\n assert_eq(data, df[[\"name\"]])\n data = read_sql_table(\n \"test\",\n db,\n columns=list(df.columns),\n bytes_per_chunk=\"2 GiB\",\n index_col=\"number\",\n )\n assert data.npartitions == 1\n assert (data.name.compute() == df.name).all()\n\n data_1 = read_sql_table(\n \"test\",\n db,\n columns=list(df.columns),\n bytes_per_chunk=2**30,\n index_col=\"number\",\n head_rows=1,\n )\n assert data_1.npartitions == 1\n assert (data_1.name.compute() == df.name).all()\n\n data = read_sql_table(\n \"test\",\n db,\n columns=list(df.columns),\n bytes_per_chunk=250,\n index_col=\"number\",\n head_rows=1,\n )\n assert data.npartitions == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_divisions_test_division_or_partition.assert_eq_out_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_divisions_test_division_or_partition.assert_eq_out_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 207, "end_line": 232, "span_ids": ["test_divisions", "test_division_or_partition"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divisions(db):\n data = read_sql_table(\n \"test\", db, columns=[\"name\"], divisions=[0, 2, 4], index_col=\"number\"\n )\n assert data.divisions == (0, 2, 4)\n assert data.index.max().compute() == 4\n assert_eq(data, df[[\"name\"]][df.index <= 4])\n\n\ndef test_division_or_partition(db):\n with pytest.raises(TypeError):\n read_sql_table(\n \"test\",\n db,\n columns=[\"name\"],\n index_col=\"number\",\n divisions=[0, 2, 4],\n npartitions=3,\n )\n\n out = read_sql_table(\"test\", db, index_col=\"number\", bytes_per_chunk=100)\n m = out.map_partitions(\n lambda d: d.memory_usage(deep=True, index=True).sum()\n ).compute()\n assert (50 < m).all() and (m < 200).all()\n assert_eq(out, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_warnings_try_loc.try_.except_KeyError_.return.df_head_0_loc_cindexe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_warnings_try_loc.try_.except_KeyError_.return.df_head_0_loc_cindexe", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 53, "span_ids": ["iloc", "imports", "try_loc", "loc"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport numpy as np\nimport pandas as pd\nfrom tlz import partition\n\nfrom ._compat import PANDAS_GT_131\n\n# preserve compatibility while moving dispatch objects\nfrom .dispatch import ( # noqa: F401\n concat,\n concat_dispatch,\n group_split_dispatch,\n hash_object_dispatch,\n is_categorical_dtype,\n is_categorical_dtype_dispatch,\n tolist,\n tolist_dispatch,\n union_categoricals,\n)\nfrom .utils import is_dataframe_like, is_index_like, is_series_like\n\n# cuDF may try to import old dispatch functions\nhash_df = hash_object_dispatch\ngroup_split = group_split_dispatch\n\n# ---------------------------------\n# indexing\n# ---------------------------------\n\n\ndef loc(df, iindexer, cindexer=None):\n \"\"\"\n .loc for known divisions\n \"\"\"\n if cindexer is None:\n return df.loc[iindexer]\n else:\n return df.loc[iindexer, cindexer]\n\n\ndef iloc(df, cindexer=None):\n return df.iloc[:, cindexer]\n\n\ndef try_loc(df, iindexer, cindexer=None):\n \"\"\"\n .loc for unknown divisions\n \"\"\"\n try:\n return loc(df, iindexer, cindexer)\n except KeyError:\n return df.head(0).loc[:, cindexer]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_boundary_slice_boundary_slice.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_boundary_slice_boundary_slice.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 56, "end_line": 128, "span_ids": ["boundary_slice"], "tokens": 594}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def boundary_slice(df, start, stop, right_boundary=True, left_boundary=True, kind=None):\n \"\"\"Index slice start/stop. Can switch include/exclude boundaries.\n\n Examples\n --------\n >>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])\n >>> boundary_slice(df, 2, None)\n x\n 2 20\n 2 30\n 3 40\n 4 50\n >>> boundary_slice(df, 1, 3)\n x\n 1 10\n 2 20\n 2 30\n 3 40\n >>> boundary_slice(df, 1, 3, right_boundary=False)\n x\n 1 10\n 2 20\n 2 30\n\n Empty input DataFrames are returned\n\n >>> df_empty = pd.DataFrame()\n >>> boundary_slice(df_empty, 1, 3)\n Empty DataFrame\n Columns: []\n Index: []\n \"\"\"\n if len(df.index) == 0:\n return df\n\n if PANDAS_GT_131:\n if kind is not None:\n warnings.warn(\n \"The `kind` argument is no longer used/supported. \"\n \"It will be dropped in a future release.\",\n category=FutureWarning,\n )\n kind_opts = {}\n kind = \"loc\"\n else:\n kind = kind or \"loc\"\n kind_opts = {\"kind\": kind}\n\n if kind == \"loc\" and not df.index.is_monotonic_increasing:\n # Pandas treats missing keys differently for label-slicing\n # on monotonic vs. non-monotonic indexes\n # If the index is monotonic, `df.loc[start:stop]` is fine.\n # If it's not, `df.loc[start:stop]` raises when `start` is missing\n if start is not None:\n if left_boundary:\n df = df[df.index >= start]\n else:\n df = df[df.index > start]\n if stop is not None:\n if right_boundary:\n df = df[df.index <= stop]\n else:\n df = df[df.index < stop]\n return df\n\n result = getattr(df, kind)[start:stop]\n if not right_boundary and stop is not None:\n right_index = result.index.get_slice_bound(stop, \"left\", **kind_opts)\n result = result.iloc[:right_index]\n if not left_boundary and start is not None:\n left_index = result.index.get_slice_bound(start, \"right\", **kind_opts)\n result = result.iloc[left_index:]\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_index_count_describe_aggregate.return.pd_concat_values_axis_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_index_count_describe_aggregate.return.pd_concat_values_axis_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 110, "end_line": 162, "span_ids": ["wrap_var_reduction", "index_count", "wrap_kurtosis_reduction", "mean_aggregate", "var_mixed_concat", "describe_aggregate", "wrap_skew_reduction"], "tokens": 332}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def index_count(x):\n # Workaround since Index doesn't implement `.count`\n return pd.notnull(x).sum()\n\n\ndef mean_aggregate(s, n):\n try:\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\")\n return s / n\n except ZeroDivisionError:\n return np.float64(np.nan)\n\n\ndef wrap_var_reduction(array_var, index):\n if isinstance(array_var, np.ndarray) or isinstance(array_var, list):\n return pd.Series(array_var, index=index)\n\n return array_var\n\n\ndef wrap_skew_reduction(array_skew, index):\n if isinstance(array_skew, np.ndarray) or isinstance(array_skew, list):\n return pd.Series(array_skew, index=index)\n\n return array_skew\n\n\ndef wrap_kurtosis_reduction(array_kurtosis, index):\n if isinstance(array_kurtosis, np.ndarray) or isinstance(array_kurtosis, list):\n return pd.Series(array_kurtosis, index=index)\n\n return array_kurtosis\n\n\ndef var_mixed_concat(numeric_var, timedelta_var, columns):\n vars = pd.concat([numeric_var, timedelta_var])\n\n return vars.reindex(index=columns)\n\n\ndef describe_aggregate(values):\n assert len(values) > 0\n\n # arrange categorical and numeric stats\n names = []\n values_indexes = sorted((x.index for x in values), key=len)\n for idxnames in values_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n\n return pd.concat(values, axis=1, sort=False).reindex(names)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_numeric_aggregate_describe_numeric_aggregate.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_numeric_aggregate_describe_numeric_aggregate.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 186, "end_line": 225, "span_ids": ["describe_numeric_aggregate"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def describe_numeric_aggregate(\n stats, name=None, is_timedelta_col=False, is_datetime_col=False\n):\n assert len(stats) == 6\n count, mean, std, min, q, max = stats\n\n if is_series_like(count):\n typ = type(count.to_frame())\n else:\n typ = type(q)\n\n if is_timedelta_col:\n mean = pd.to_timedelta(mean)\n std = pd.to_timedelta(std)\n min = pd.to_timedelta(min)\n max = pd.to_timedelta(max)\n q = q.apply(lambda x: pd.to_timedelta(x))\n\n if is_datetime_col:\n # mean is not implemented for datetime\n min = pd.to_datetime(min)\n max = pd.to_datetime(max)\n q = q.apply(lambda x: pd.to_datetime(x))\n\n if is_datetime_col:\n part1 = typ([count, min], index=[\"count\", \"min\"])\n else:\n part1 = typ([count, mean, std, min], index=[\"count\", \"mean\", \"std\", \"min\"])\n\n q.index = [f\"{l * 100:g}%\" for l in tolist(q.index)]\n if is_series_like(q) and typ != type(q):\n q = q.to_frame()\n part3 = typ([max], index=[\"max\"])\n\n result = concat([part1, q, part3], sort=False)\n\n if is_series_like(result):\n result.name = name\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_nonnumeric_aggregate_describe_nonnumeric_aggregate.return.pd_Series_values_index_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_describe_nonnumeric_aggregate_describe_nonnumeric_aggregate.return.pd_Series_values_index_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 189, "end_line": 235, "span_ids": ["describe_nonnumeric_aggregate"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def describe_nonnumeric_aggregate(stats, name):\n args_len = len(stats)\n\n is_datetime_column = args_len == 5\n is_categorical_column = args_len == 3\n\n assert is_datetime_column or is_categorical_column\n\n if is_categorical_column:\n nunique, count, top_freq = stats\n else:\n nunique, count, top_freq, min_ts, max_ts = stats\n\n # input was empty dataframe/series\n if len(top_freq) == 0:\n data = [0, 0]\n index = [\"count\", \"unique\"]\n dtype = None\n data.extend([None, None])\n index.extend([\"top\", \"freq\"])\n dtype = object\n result = pd.Series(data, index=index, dtype=dtype, name=name)\n return result\n\n top = top_freq.index[0]\n freq = top_freq.iloc[0]\n\n index = [\"unique\", \"count\", \"top\", \"freq\"]\n values = [nunique, count]\n\n if is_datetime_column:\n tz = top.tz\n top = pd.Timestamp(top)\n if top.tzinfo is not None and tz is not None:\n # Don't tz_localize(None) if key is already tz-aware\n top = top.tz_convert(tz)\n else:\n top = top.tz_localize(tz)\n\n first = pd.Timestamp(min_ts, tz=tz)\n last = pd.Timestamp(max_ts, tz=tz)\n index.extend([\"first\", \"last\"])\n values.extend([top, freq, first, last])\n else:\n values.extend([top, freq])\n\n return pd.Series(values, index=index, name=name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_align_partitions_align_partitions.return.dfs2_tuple_divisions_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_align_partitions_align_partitions.return.dfs2_tuple_divisions_r", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 93, "end_line": 149, "span_ids": ["align_partitions"], "tokens": 468}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def align_partitions(*dfs):\n \"\"\"Mutually partition and align DataFrame blocks\n\n This serves as precursor to multi-dataframe operations like join, concat,\n or merge.\n\n Parameters\n ----------\n dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar\n Sequence of dataframes to be aligned on their index\n\n Returns\n -------\n dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar\n These must have consistent divisions with each other\n divisions: tuple\n Full divisions sequence of the entire result\n result: list\n A list of lists of keys that show which data exist on which\n divisions\n \"\"\"\n _is_broadcastable = partial(is_broadcastable, dfs)\n dfs1 = [df for df in dfs if isinstance(df, _Frame) and not _is_broadcastable(df)]\n if len(dfs) == 0:\n raise ValueError(\"dfs contains no DataFrame and Series\")\n if not all(df.known_divisions for df in dfs1):\n raise ValueError(\n \"Not all divisions are known, can't align \"\n \"partitions. Please use `set_index` \"\n \"to set the index.\"\n )\n\n divisions = list(unique(merge_sorted(*[df.divisions for df in dfs1])))\n if len(divisions) == 1: # single value for index\n divisions = (divisions[0], divisions[0])\n dfs2 = [\n df.repartition(divisions, force=True) if isinstance(df, _Frame) else df\n for df in dfs\n ]\n\n result = list()\n inds = [0 for df in dfs]\n for d in divisions[:-1]:\n L = list()\n for i, df in enumerate(dfs2):\n if isinstance(df, _Frame):\n j = inds[i]\n divs = df.divisions\n if j < len(divs) - 1 and divs[j] == d:\n L.append((df._name, inds[i]))\n inds[i] += 1\n else:\n L.append(None)\n else: # Scalar has no divisions\n L.append(None)\n result.append(L)\n return dfs2, tuple(divisions), result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__maybe_align_partitions__maybe_align_partitions.return.args": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__maybe_align_partitions__maybe_align_partitions.return.args", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 167, "span_ids": ["_maybe_align_partitions"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _maybe_align_partitions(args):\n \"\"\"Align DataFrame blocks if divisions are different.\n\n Note that if all divisions are unknown, but have equal npartitions, then\n they will be passed through unchanged. This is different than\n `align_partitions`, which will fail if divisions aren't all known\"\"\"\n _is_broadcastable = partial(is_broadcastable, args)\n dfs = [df for df in args if isinstance(df, _Frame) and not _is_broadcastable(df)]\n if not dfs:\n return args\n\n divisions = dfs[0].divisions\n if not all(df.divisions == divisions for df in dfs):\n dfs2 = iter(align_partitions(*dfs)[0])\n return [a if not isinstance(a, _Frame) else next(dfs2) for a in args]\n return args", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_require_require.return.divisions_parts": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_require_require.return.divisions_parts", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 170, "end_line": 212, "span_ids": ["require"], "tokens": 476}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def require(divisions, parts, required=None):\n \"\"\"Clear out divisions where required components are not present\n\n In left, right, or inner joins we exclude portions of the dataset if one\n side or the other is not present. We can achieve this at the partition\n level as well\n\n >>> divisions = [1, 3, 5, 7, 9]\n >>> parts = [(('a', 0), None),\n ... (('a', 1), ('b', 0)),\n ... (('a', 2), ('b', 1)),\n ... (None, ('b', 2))]\n\n >>> divisions2, parts2 = require(divisions, parts, required=[0])\n >>> divisions2\n (1, 3, 5, 7)\n >>> parts2 # doctest: +NORMALIZE_WHITESPACE\n ((('a', 0), None),\n (('a', 1), ('b', 0)),\n (('a', 2), ('b', 1)))\n\n >>> divisions2, parts2 = require(divisions, parts, required=[1])\n >>> divisions2\n (3, 5, 7, 9)\n >>> parts2 # doctest: +NORMALIZE_WHITESPACE\n ((('a', 1), ('b', 0)),\n (('a', 2), ('b', 1)),\n (None, ('b', 2)))\n\n >>> divisions2, parts2 = require(divisions, parts, required=[0, 1])\n >>> divisions2\n (3, 5, 7)\n >>> parts2 # doctest: +NORMALIZE_WHITESPACE\n ((('a', 1), ('b', 0)),\n (('a', 2), ('b', 1)))\n \"\"\"\n if not required:\n return divisions, parts\n for i in required:\n present = [j for j, p in enumerate(parts) if p[i] is not None]\n divisions = tuple(divisions[min(present) : max(present) + 2])\n parts = tuple(parts[min(present) : max(present) + 1])\n return divisions, parts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_1_merge_chunk.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_1_merge_chunk.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 218, "end_line": 282, "span_ids": ["require", "merge_chunk", "impl"], "tokens": 454}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Join / Merge\n###############################################################\n\n\nrequired = {\n \"left\": [0],\n \"leftsemi\": [0],\n \"leftanti\": [0],\n \"right\": [1],\n \"inner\": [0, 1],\n \"outer\": [],\n}\nallowed_left = (\"inner\", \"left\", \"leftsemi\", \"leftanti\")\nallowed_right = (\"inner\", \"right\")\n\n\ndef merge_chunk(lhs, *args, empty_index_dtype=None, categorical_columns=None, **kwargs):\n\n rhs, *args = args\n left_index = kwargs.get(\"left_index\", False)\n right_index = kwargs.get(\"right_index\", False)\n\n if categorical_columns is not None:\n for col in categorical_columns:\n left = None\n right = None\n\n if col in lhs:\n left = lhs[col]\n elif col == kwargs.get(\"right_on\", None) and left_index:\n if is_categorical_dtype(lhs.index):\n left = lhs.index\n\n if col in rhs:\n right = rhs[col]\n elif col == kwargs.get(\"left_on\", None) and right_index:\n if is_categorical_dtype(rhs.index):\n right = rhs.index\n\n dtype = \"category\"\n if left is not None and right is not None:\n dtype = methods.union_categoricals(\n [left.astype(\"category\"), right.astype(\"category\")]\n ).dtype\n\n if left is not None:\n if isinstance(left, pd.Index):\n lhs.index = left.astype(dtype)\n else:\n lhs = lhs.assign(**{col: left.astype(dtype)})\n if right is not None:\n if isinstance(right, pd.Index):\n rhs.index = right.astype(dtype)\n else:\n rhs = rhs.assign(**{col: right.astype(dtype)})\n\n out = lhs.merge(rhs, *args, **kwargs)\n\n # Workaround pandas bug where if the output result of a merge operation is\n # an empty dataframe, the output index is `int64` in all cases, regardless\n # of input dtypes.\n if len(out) == 0 and empty_index_dtype is not None:\n out.index = out.index.astype(empty_index_dtype)\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_indexed_dataframes_shuffle_func.shuffle": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_indexed_dataframes_shuffle_func.shuffle", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 288, "end_line": 311, "span_ids": ["merge_indexed_dataframes", "impl:7"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_indexed_dataframes(lhs, rhs, left_index=True, right_index=True, **kwargs):\n \"\"\"Join two partitioned dataframes along their index\"\"\"\n how = kwargs.get(\"how\", \"left\")\n kwargs[\"left_index\"] = left_index\n kwargs[\"right_index\"] = right_index\n\n (lhs, rhs), divisions, parts = align_partitions(lhs, rhs)\n divisions, parts = require(divisions, parts, required[how])\n\n name = \"join-indexed-\" + tokenize(lhs, rhs, **kwargs)\n\n meta = lhs._meta_nonempty.merge(rhs._meta_nonempty, **kwargs)\n kwargs[\"empty_index_dtype\"] = meta.index.dtype\n kwargs[\"categorical_columns\"] = meta.select_dtypes(include=\"category\").columns\n\n dsk = dict()\n for i, (a, b) in enumerate(parts):\n dsk[(name, i)] = (apply, merge_chunk, [a, b], kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[lhs, rhs])\n return new_dd_object(graph, name, meta, divisions)\n\n\nshuffle_func = shuffle", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__name_sometimes_conflict_hash_join.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__name_sometimes_conflict_hash_join.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 308, "end_line": 386, "span_ids": ["impl:7", "hash_join"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": " # name sometimes conflicts with keyword argument\n\n\ndef hash_join(\n lhs,\n left_on,\n rhs,\n right_on,\n how=\"inner\",\n npartitions=None,\n suffixes=(\"_x\", \"_y\"),\n shuffle=None,\n indicator=False,\n max_branch=None,\n):\n \"\"\"Join two DataFrames on particular columns with hash join\n\n This shuffles both datasets on the joined column and then performs an\n embarrassingly parallel join partition-by-partition\n\n >>> hash_join(lhs, 'id', rhs, 'id', how='left', npartitions=10) # doctest: +SKIP\n \"\"\"\n if npartitions is None:\n npartitions = max(lhs.npartitions, rhs.npartitions)\n\n lhs2 = shuffle_func(\n lhs, left_on, npartitions=npartitions, shuffle=shuffle, max_branch=max_branch\n )\n rhs2 = shuffle_func(\n rhs, right_on, npartitions=npartitions, shuffle=shuffle, max_branch=max_branch\n )\n\n if isinstance(left_on, Index):\n left_on = None\n left_index = True\n else:\n left_index = False\n\n if isinstance(right_on, Index):\n right_on = None\n right_index = True\n else:\n right_index = False\n\n kwargs = dict(\n how=how,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n )\n\n # dummy result\n # Avoid using dummy data for a collection it is empty\n _lhs_meta = lhs._meta_nonempty if len(lhs.columns) else lhs._meta\n _rhs_meta = rhs._meta_nonempty if len(rhs.columns) else rhs._meta\n meta = _lhs_meta.merge(_rhs_meta, **kwargs)\n\n if isinstance(left_on, list):\n left_on = (list, tuple(left_on))\n if isinstance(right_on, list):\n right_on = (list, tuple(right_on))\n\n token = tokenize(lhs2, rhs2, npartitions, shuffle, **kwargs)\n name = \"hash-join-\" + token\n\n kwargs[\"empty_index_dtype\"] = meta.index.dtype\n kwargs[\"categorical_columns\"] = meta.select_dtypes(include=\"category\").columns\n\n dsk = {\n (name, i): (apply, merge_chunk, [(lhs2._name, i), (rhs2._name, i)], kwargs)\n for i in range(npartitions)\n }\n\n divisions = [None] * (npartitions + 1)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[lhs2, rhs2])\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_warn_dtype_mismatch_warn_dtype_mismatch.if_all_col_in_left_column.if_dtype_mism_.warnings_warn_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_warn_dtype_mismatch_warn_dtype_mismatch.if_all_col_in_left_column.if_dtype_mism_.warnings_warn_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 377, "end_line": 405, "span_ids": ["warn_dtype_mismatch"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def warn_dtype_mismatch(left, right, left_on, right_on):\n \"\"\"Checks for merge column dtype mismatches and throws a warning (#4574)\"\"\"\n\n if not isinstance(left_on, list):\n left_on = [left_on]\n if not isinstance(right_on, list):\n right_on = [right_on]\n\n if all(col in left.columns for col in left_on) and all(\n col in right.columns for col in right_on\n ):\n dtype_mism = [\n ((lo, ro), left.dtypes[lo], right.dtypes[ro])\n for lo, ro in zip(left_on, right_on)\n if not is_dtype_equal(left.dtypes[lo], right.dtypes[ro])\n ]\n\n if dtype_mism:\n col_tb = asciitable(\n (\"Merge columns\", \"left dtype\", \"right dtype\"), dtype_mism\n )\n\n warnings.warn(\n (\n \"Merging dataframes with merge column data \"\n \"type mismatches: \\n{}\\nCast dtypes explicitly to \"\n \"avoid unexpected results.\"\n ).format(col_tb)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_merge._Both_sides_indexed": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_merge._Both_sides_indexed", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 478, "end_line": 549, "span_ids": ["merge"], "tokens": 510}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(pd.merge)\ndef merge(\n left,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n suffixes=(\"_x\", \"_y\"),\n indicator=False,\n npartitions=None,\n shuffle=None,\n max_branch=None,\n broadcast=None,\n):\n for o in [on, left_on, right_on]:\n if isinstance(o, _Frame):\n raise NotImplementedError(\n \"Dask collections not currently allowed in merge columns\"\n )\n if not on and not left_on and not right_on and not left_index and not right_index:\n on = [c for c in left.columns if c in right.columns]\n if not on:\n left_index = right_index = True\n\n if on and not left_on and not right_on:\n left_on = right_on = on\n on = None\n\n if isinstance(left, (pd.Series, pd.DataFrame)) and isinstance(\n right, (pd.Series, pd.DataFrame)\n ):\n return pd.merge(\n left,\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n )\n\n # Transform pandas objects into dask.dataframe objects\n if not is_dask_collection(left):\n if right_index and left_on: # change to join on index\n left = left.set_index(left[left_on])\n left_on = None\n left_index = True\n left = from_pandas(left, npartitions=1) # turn into DataFrame\n\n if not is_dask_collection(right):\n if left_index and right_on: # change to join on index\n right = right.set_index(right[right_on])\n right_on = None\n right_index = True\n right = from_pandas(right, npartitions=1) # turn into DataFrame\n\n # Both sides are now dd.DataFrame or dd.Series objects\n merge_indexed_left = (\n left_index or left._contains_index_name(left_on)\n ) and left.known_divisions\n\n merge_indexed_right = (\n right_index or right._contains_index_name(right_on)\n ) and right.known_divisions\n\n # Both sides indexed\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge.if_merge_indexed_left_and_merge.if_merge_indexed_left_and.else_.return.hash_join_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge.if_merge_indexed_left_and_merge.if_merge_indexed_left_and.else_.return.hash_join_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 539, "end_line": 683, "span_ids": ["merge"], "tokens": 1161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(pd.merge)\ndef merge(\n left,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n suffixes=(\"_x\", \"_y\"),\n indicator=False,\n npartitions=None,\n shuffle=None,\n max_branch=None,\n broadcast=None,\n):\n # ... other code\n if merge_indexed_left and merge_indexed_right: # Do indexed join\n return merge_indexed_dataframes(\n left,\n right,\n how=how,\n suffixes=suffixes,\n indicator=indicator,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n )\n\n # Single partition on one side\n # Note that cudf supports \"leftsemi\" and \"leftanti\" joins\n elif (\n left.npartitions == 1\n and how in allowed_right\n or right.npartitions == 1\n and how in allowed_left\n ):\n return single_partition_join(\n left,\n right,\n how=how,\n right_on=right_on,\n left_on=left_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n )\n\n # One side is indexed, the other not\n elif (\n left_index\n and left.known_divisions\n and not right_index\n or right_index\n and right.known_divisions\n and not left_index\n ):\n left_empty = left._meta_nonempty\n right_empty = right._meta_nonempty\n meta = left_empty.merge(\n right_empty,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n )\n categorical_columns = meta.select_dtypes(include=\"category\").columns\n\n if merge_indexed_left and left.known_divisions:\n right = rearrange_by_divisions(\n right, right_on, left.divisions, max_branch, shuffle=shuffle\n )\n left = left.clear_divisions()\n elif merge_indexed_right and right.known_divisions:\n left = rearrange_by_divisions(\n left, left_on, right.divisions, max_branch, shuffle=shuffle\n )\n right = right.clear_divisions()\n return map_partitions(\n merge_chunk,\n left,\n right,\n meta=meta,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n empty_index_dtype=meta.index.dtype,\n categorical_columns=categorical_columns,\n )\n # Catch all hash join\n else:\n if left_on and right_on:\n warn_dtype_mismatch(left, right, left_on, right_on)\n\n # Check if we should use a broadcast_join\n # See note on `broadcast_bias` below.\n broadcast_bias = 0.5\n if isinstance(broadcast, float):\n broadcast_bias = float(broadcast)\n broadcast = None\n elif not isinstance(broadcast, bool) and broadcast is not None:\n # Let's be strict about the `broadcast` type to\n # avoid arbitrarily casting int to float or bool.\n raise ValueError(\n f\"Optional `broadcast` argument must be float or bool.\"\n f\"Type={type(broadcast)} is not supported.\"\n )\n bcast_side = \"left\" if left.npartitions < right.npartitions else \"right\"\n n_small = min(left.npartitions, right.npartitions)\n n_big = max(left.npartitions, right.npartitions)\n if (\n shuffle == \"tasks\"\n and how in (\"inner\", \"left\", \"right\")\n and how != bcast_side\n and broadcast is not False\n ):\n # Note on `broadcast_bias`:\n # We can expect the broadcast merge to be competitive with\n # the shuffle merge when the number of partitions in the\n # smaller collection is less than the logarithm of the number\n # of partitions in the larger collection. By default, we add\n # a small preference for the shuffle-based merge by multiplying\n # the log result by a 0.5 scaling factor. We call this factor\n # the `broadcast_bias`, because a larger number will make Dask\n # more likely to select the `broadcast_join` code path. If\n # the user specifies a floating-point value for the `broadcast`\n # kwarg, that value will be used as the `broadcast_bias`.\n if broadcast or (n_small < math.log2(n_big) * broadcast_bias):\n return broadcast_join(\n left,\n left.index if left_index else left_on,\n right,\n right.index if right_index else right_on,\n how,\n npartitions,\n suffixes,\n indicator=indicator,\n )\n\n return hash_join(\n left,\n left.index if left_index else left_on,\n right,\n right.index if right_index else right_on,\n how,\n npartitions,\n suffixes,\n shuffle=shuffle,\n indicator=indicator,\n max_branch=max_branch,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_5_compute_heads.if_by_is_None_.else_.return.suffix_reduction_most_rec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_5_compute_heads.if_by_is_None_.else_.return.suffix_reduction_most_rec", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 577, "end_line": 625, "span_ids": ["most_recent_head_summary", "most_recent_tail", "compute_heads", "compute_tails", "merge", "most_recent_head", "most_recent_tail_summary"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# ASOF Join\n###############################################################\n\n\ndef most_recent_tail(left, right):\n if len(right.index) == 0:\n return left\n return right.tail(1)\n\n\ndef most_recent_tail_summary(left, right, by=None):\n return pd.concat([left, right]).drop_duplicates(subset=by, keep=\"last\")\n\n\ndef compute_tails(ddf, by=None):\n \"\"\"For each partition, returns the last row of the most recent nonempty\n partition.\n \"\"\"\n empty = ddf._meta.iloc[0:0]\n\n if by is None:\n return prefix_reduction(most_recent_tail, ddf, empty)\n else:\n kwargs = {\"by\": by}\n return prefix_reduction(most_recent_tail_summary, ddf, empty, **kwargs)\n\n\ndef most_recent_head(left, right):\n if len(left.index) == 0:\n return right\n return left.head(1)\n\n\ndef most_recent_head_summary(left, right, by=None):\n return pd.concat([left, right]).drop_duplicates(subset=by, keep=\"first\")\n\n\ndef compute_heads(ddf, by=None):\n \"\"\"For each partition, returns the first row of the next nonempty\n partition.\n \"\"\"\n empty = ddf._meta.iloc[0:0]\n\n if by is None:\n return suffix_reduction(most_recent_head, ddf, empty)\n else:\n kwargs = {\"by\": by}\n return suffix_reduction(most_recent_head_summary, ddf, empty, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_pair_partitions_pair_partitions.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_pair_partitions_pair_partitions.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 628, "end_line": 661, "span_ids": ["pair_partitions"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pair_partitions(L, R):\n \"\"\"Returns which partitions to pair for the merge_asof algorithm and the\n bounds on which to split them up\n \"\"\"\n result = []\n\n n, m = len(L) - 1, len(R) - 1\n i, j = 0, -1\n while j + 1 < m and R[j + 1] <= L[i]:\n j += 1\n J = []\n while i < n:\n partition = max(0, min(m - 1, j))\n lower = R[j] if j >= 0 and R[j] > L[i] else None\n upper = (\n R[j + 1]\n if j + 1 < m\n and (R[j + 1] < L[i + 1] or R[j + 1] == L[i + 1] and i == n - 1)\n else None\n )\n\n J.append((partition, lower, upper))\n\n i1 = i + 1 if j + 1 == m or (i + 1 < n and R[j + 1] >= L[i + 1]) else i\n j1 = j + 1 if i + 1 == n or (j + 1 < m and L[i + 1] >= R[j + 1]) else j\n if i1 > i:\n result.append(J)\n J = []\n elif i == n - 1 and R[j1] > L[n]:\n result.append(J)\n break\n i, j = i1, j1\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_indexed_merge_asof_indexed.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_indexed_merge_asof_indexed.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 806, "end_line": 854, "span_ids": ["merge_asof_indexed"], "tokens": 438}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_asof_indexed(left, right, **kwargs):\n dsk = dict()\n name = \"asof-join-indexed-\" + tokenize(left, right, **kwargs)\n meta = pd.merge_asof(left._meta_nonempty, right._meta_nonempty, **kwargs)\n\n if all(map(pd.isnull, left.divisions)):\n # results in an empty df that looks like ``meta``\n return from_pandas(meta.iloc[len(meta) :], npartitions=left.npartitions)\n\n if all(map(pd.isnull, right.divisions)):\n # results in an df that looks like ``left`` with nulls for\n # all ``right.columns``\n return map_partitions(\n pd.merge_asof,\n left,\n right=right,\n left_index=True,\n right_index=True,\n meta=meta,\n )\n\n dependencies = [left, right]\n tails = heads = None\n if kwargs[\"direction\"] in [\"backward\", \"nearest\"]:\n tails = compute_tails(right, by=kwargs[\"right_by\"])\n dependencies.append(tails)\n if kwargs[\"direction\"] in [\"forward\", \"nearest\"]:\n heads = compute_heads(right, by=kwargs[\"right_by\"])\n dependencies.append(heads)\n\n for i, J in enumerate(pair_partitions(left.divisions, right.divisions)):\n frames = []\n for j, lower, upper in J:\n slice = (methods.boundary_slice, (left._name, i), lower, upper, False)\n tail = (tails._name, j) if tails is not None else None\n head = (heads._name, j) if heads is not None else None\n frames.append(\n (\n apply,\n merge_asof_padded,\n [slice, (right._name, j), tail, head],\n kwargs,\n )\n )\n dsk[(name, i)] = (methods.concat, frames)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n result = new_dd_object(graph, name, meta, left.divisions)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_merge_asof.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_merge_asof.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 857, "end_line": 945, "span_ids": ["merge_asof"], "tokens": 662}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(pd.merge_asof)\ndef merge_asof(\n left,\n right,\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n by=None,\n left_by=None,\n right_by=None,\n suffixes=(\"_x\", \"_y\"),\n tolerance=None,\n allow_exact_matches=True,\n direction=\"backward\",\n):\n if direction not in [\"backward\", \"forward\", \"nearest\"]:\n raise ValueError(\n \"Invalid merge_asof direction. Choose from 'backward'\"\n \" 'forward', or 'nearest'\"\n )\n\n kwargs = {\n \"on\": on,\n \"left_on\": left_on,\n \"right_on\": right_on,\n \"left_index\": left_index,\n \"right_index\": right_index,\n \"by\": by,\n \"left_by\": left_by,\n \"right_by\": right_by,\n \"suffixes\": suffixes,\n \"tolerance\": tolerance,\n \"allow_exact_matches\": allow_exact_matches,\n \"direction\": direction,\n }\n\n if left is None or right is None:\n raise ValueError(\"Cannot merge_asof on None\")\n\n # if is_dataframe_like(left) and is_dataframe_like(right):\n if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame):\n return pd.merge_asof(left, right, **kwargs)\n\n if on is not None:\n left_on = right_on = on\n for o in [left_on, right_on]:\n if isinstance(o, _Frame):\n raise NotImplementedError(\n \"Dask collections not currently allowed in merge columns\"\n )\n\n if not is_dask_collection(left):\n left = from_pandas(left, npartitions=1)\n ixname = ixcol = divs = None\n if left_on is not None:\n if right_index:\n divs = left.divisions if left.known_divisions else None\n ixname = left.index.name\n left = left.reset_index()\n ixcol = left.columns[0]\n left = left.set_index(left_on, sorted=True)\n\n if not is_dask_collection(right):\n right = from_pandas(right, npartitions=1)\n if right_on is not None:\n right = right.set_index(right_on, sorted=True)\n\n if by is not None:\n kwargs[\"left_by\"] = kwargs[\"right_by\"] = by\n\n del kwargs[\"on\"], kwargs[\"left_on\"], kwargs[\"right_on\"], kwargs[\"by\"]\n kwargs[\"left_index\"] = kwargs[\"right_index\"] = True\n\n if not left.known_divisions or not right.known_divisions:\n raise ValueError(\"merge_asof input must be sorted!\")\n\n result = merge_asof_indexed(left, right, **kwargs)\n if left_on or right_on:\n result = result.reset_index()\n if ixcol is not None:\n if divs is not None:\n result = result.set_index(ixcol, sorted=True, divisions=divs)\n else:\n result = result.map_partitions(M.set_index, ixcol)\n result = result.map_partitions(M.rename_axis, ixname)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_8_concat_unindexed_dataframes.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_None_8_concat_unindexed_dataframes.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 956, "end_line": 978, "span_ids": ["merge_asof", "concat_and_check", "concat_unindexed_dataframes"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Concat\n###############################################################\n\n\ndef concat_and_check(dfs, ignore_order=False):\n if len(set(map(len, dfs))) != 1:\n raise ValueError(\"Concatenated DataFrames of different lengths\")\n return methods.concat(dfs, axis=1, ignore_order=ignore_order)\n\n\ndef concat_unindexed_dataframes(dfs, ignore_order=False, **kwargs):\n name = \"concat-\" + tokenize(*dfs)\n\n dsk = {\n (name, i): (concat_and_check, [(df._name, i) for df in dfs], ignore_order)\n for i in range(dfs[0].npartitions)\n }\n kwargs.update({\"ignore_order\": ignore_order})\n meta = methods.concat([df._meta for df in dfs], axis=1, **kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dfs)\n return new_dd_object(graph, name, meta, dfs[0].divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_indexed_dataframes_concat_indexed_dataframes.return.new_dd_object_dsk_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_indexed_dataframes_concat_indexed_dataframes.return.new_dd_object_dsk_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 985, "end_line": 1017, "span_ids": ["concat_indexed_dataframes"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concat_indexed_dataframes(dfs, axis=0, join=\"outer\", ignore_order=False, **kwargs):\n \"\"\"Concatenate indexed dataframes together along the index\"\"\"\n warn = axis != 0\n kwargs.update({\"ignore_order\": ignore_order})\n meta = methods.concat(\n [df._meta for df in dfs],\n axis=axis,\n join=join,\n filter_warning=warn,\n **kwargs,\n )\n empties = [strip_unknown_categories(df._meta) for df in dfs]\n\n dfs2, divisions, parts = align_partitions(*dfs)\n\n name = \"concat-indexed-\" + tokenize(join, *dfs)\n\n parts2 = [\n [df if df is not None else empty for df, empty in zip(part, empties)]\n for part in parts\n ]\n\n filter_warning = True\n uniform = False\n\n dsk = {\n (name, i): (methods.concat, part, axis, join, uniform, filter_warning, kwargs)\n for i, part in enumerate(parts2)\n }\n for df in dfs2:\n dsk.update(df.dask)\n\n return new_dd_object(dsk, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_stack_partitions_stack_partitions.return.new_dd_object_dsk_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_stack_partitions_stack_partitions.return.new_dd_object_dsk_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1020, "end_line": 1089, "span_ids": ["stack_partitions"], "tokens": 541}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def stack_partitions(dfs, divisions, join=\"outer\", ignore_order=False, **kwargs):\n \"\"\"Concatenate partitions on axis=0 by doing a simple stack\"\"\"\n # Use _meta_nonempty as pandas.concat will incorrectly cast float to datetime\n # for empty data frames. See https://github.com/pandas-dev/pandas/issues/32934.\n\n kwargs.update({\"ignore_order\": ignore_order})\n\n meta = make_meta(\n methods.concat(\n [df._meta_nonempty for df in dfs],\n join=join,\n filter_warning=False,\n **kwargs,\n )\n )\n empty = strip_unknown_categories(meta)\n\n name = f\"concat-{tokenize(*dfs)}\"\n dsk = {}\n i = 0\n for df in dfs:\n # dtypes of all dfs need to be coherent\n # refer to https://github.com/dask/dask/issues/4685\n # and https://github.com/dask/dask/issues/5968.\n if is_dataframe_like(df):\n\n shared_columns = df.columns.intersection(meta.columns)\n needs_astype = [\n col\n for col in shared_columns\n if df[col].dtype != meta[col].dtype\n and not is_categorical_dtype(df[col].dtype)\n ]\n\n if needs_astype:\n # Copy to avoid mutating the caller inplace\n df = df.copy()\n df[needs_astype] = df[needs_astype].astype(meta[needs_astype].dtypes)\n\n if is_series_like(df) and is_series_like(meta):\n if not df.dtype == meta.dtype and not is_categorical_dtype(df.dtype):\n df = df.astype(meta.dtype)\n else:\n pass # TODO: there are other non-covered cases here\n dsk.update(df.dask)\n # An error will be raised if the schemas or categories don't match. In\n # this case we need to pass along the meta object to transform each\n # partition, so they're all equivalent.\n try:\n df._meta == meta\n match = True\n except (ValueError, TypeError):\n match = False\n\n filter_warning = True\n uniform = False\n\n for key in df.__dask_keys__():\n if match:\n dsk[(name, i)] = key\n else:\n dsk[(name, i)] = (\n apply,\n methods.concat,\n [[empty, key], 0, join, uniform, filter_warning],\n kwargs,\n )\n i += 1\n\n return new_dd_object(dsk, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_concat._Concatenate_DataFrames": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat_concat._Concatenate_DataFrames", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1092, "end_line": 1192, "span_ids": ["concat"], "tokens": 929}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concat(\n dfs,\n axis=0,\n join=\"outer\",\n interleave_partitions=False,\n ignore_unknown_divisions=False,\n ignore_order=False,\n **kwargs,\n):\n \"\"\"Concatenate DataFrames along rows.\n\n - When axis=0 (default), concatenate DataFrames row-wise:\n\n - If all divisions are known and ordered, concatenate DataFrames keeping\n divisions. When divisions are not ordered, specifying\n interleave_partition=True allows concatenate divisions each by each.\n\n - If any of division is unknown, concatenate DataFrames resetting its\n division to unknown (None)\n\n - When axis=1, concatenate DataFrames column-wise:\n\n - Allowed if all divisions are known.\n\n - If any of division is unknown, it raises ValueError.\n\n Parameters\n ----------\n dfs : list\n List of dask.DataFrames to be concatenated\n axis : {0, 1, 'index', 'columns'}, default 0\n The axis to concatenate along\n join : {'inner', 'outer'}, default 'outer'\n How to handle indexes on other axis\n interleave_partitions : bool, default False\n Whether to concatenate DataFrames ignoring its order. If True, every\n divisions are concatenated each by each.\n ignore_unknown_divisions : bool, default False\n By default a warning is raised if any input has unknown divisions.\n Set to True to disable this warning.\n ignore_order : bool, default False\n Whether to ignore order when doing the union of categoricals.\n\n Notes\n -----\n This differs in from ``pd.concat`` in the when concatenating Categoricals\n with different categories. Pandas currently coerces those to objects\n before concatenating. Coercing to objects is very expensive for large\n arrays, so dask preserves the Categoricals by taking the union of\n the categories.\n\n Examples\n --------\n If all divisions are known and ordered, divisions are kept.\n\n >>> import dask.dataframe as dd\n >>> a # doctest: +SKIP\n dd.DataFrame\n >>> b # doctest: +SKIP\n dd.DataFrame\n >>> dd.concat([a, b]) # doctest: +SKIP\n dd.DataFrame\n\n Unable to concatenate if divisions are not ordered.\n\n >>> a # doctest: +SKIP\n dd.DataFrame\n >>> b # doctest: +SKIP\n dd.DataFrame\n >>> dd.concat([a, b]) # doctest: +SKIP\n ValueError: All inputs have known divisions which cannot be concatenated\n in order. Specify interleave_partitions=True to ignore order\n\n Specify interleave_partitions=True to ignore the division order.\n\n >>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP\n dd.DataFrame\n\n If any of division is unknown, the result division will be unknown\n\n >>> a # doctest: +SKIP\n dd.DataFrame\n >>> b # doctest: +SKIP\n dd.DataFrame\n >>> dd.concat([a, b]) # doctest: +SKIP\n dd.DataFrame\n\n By default concatenating with unknown divisions will raise a warning.\n Set ``ignore_unknown_divisions=True`` to disable this:\n\n >>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP\n dd.DataFrame\n\n Different categoricals are unioned\n\n >>> dd.concat([\n ... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1),\n ... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1),\n ... ], interleave_partitions=True).dtype\n CategoricalDtype(categories=['a', 'b', 'c'], ordered=False)\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/numeric.py_pd_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/numeric.py_pd_", "embedding": null, "metadata": {"file_path": "dask/dataframe/numeric.py", "file_name": "numeric.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 54, "span_ids": ["imports", "to_numeric"], "tokens": 355}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\nfrom pandas.api.types import is_scalar as pd_is_scalar\n\nfrom ..array import Array\nfrom ..delayed import delayed\nfrom ..utils import derived_from\nfrom .core import Series\n\n__all__ = (\"to_numeric\",)\n\n\n@derived_from(pd, ua_args=[\"downcast\"])\ndef to_numeric(arg, errors=\"raise\", meta=None):\n \"\"\"\n Return type depends on input. Delayed if scalar, otherwise same as input.\n For errors, only \"raise\" and \"coerce\" are allowed.\n \"\"\"\n if errors not in (\"raise\", \"coerce\"):\n raise ValueError(\"invalid error value specified\")\n\n is_series = isinstance(arg, Series)\n is_array = isinstance(arg, Array)\n is_scalar = pd_is_scalar(arg)\n\n if not any([is_series, is_array, is_scalar]):\n raise TypeError(\n \"arg must be a list, tuple, dask.array.Array, or dask.dataframe.Series\"\n )\n\n if meta is not None:\n if is_scalar:\n raise KeyError(\"``meta`` is not allowed when input is a scalar.\")\n else:\n if is_series or is_array:\n meta = pd.to_numeric(arg._meta)\n\n if is_series:\n return arg.map_partitions(\n pd.to_numeric,\n token=arg._name + \"-to_numeric\",\n meta=meta,\n enforce_metadata=False,\n errors=errors,\n )\n if is_array:\n return arg.map_blocks(\n pd.to_numeric,\n name=arg._name + \"-to_numeric\",\n meta=meta,\n errors=errors,\n )\n if is_scalar:\n return delayed(pd.to_numeric, pure=True)(arg, errors=errors)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py__Dataframe_optimizatio_optimize.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py__Dataframe_optimizatio_optimize.return.dsk", "embedding": null, "metadata": {"file_path": "dask/dataframe/optimize.py", "file_name": "optimize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 46, "span_ids": ["optimize", "docstring"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" Dataframe optimizations \"\"\"\nimport operator\n\nimport numpy as np\n\nfrom .. import config, core\nfrom ..blockwise import Blockwise, fuse_roots, optimize_blockwise\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..optimization import cull, fuse\nfrom ..utils import ensure_dict\n\n\ndef optimize(dsk, keys, **kwargs):\n if not isinstance(keys, (list, set)):\n keys = [keys]\n keys = list(core.flatten(keys))\n\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(id(dsk), dsk, dependencies=())\n else:\n # Perform Blockwise optimizations for HLG input\n dsk = optimize_dataframe_getitem(dsk, keys=keys)\n dsk = optimize_blockwise(dsk, keys=keys)\n dsk = fuse_roots(dsk, keys=keys)\n dsk = dsk.cull(set(keys))\n\n # Do not perform low-level fusion unless the user has\n # specified True explicitly. The configuration will\n # be None by default.\n if not config.get(\"optimization.fuse.active\"):\n return dsk\n\n dependencies = dsk.get_all_dependencies()\n dsk = ensure_dict(dsk)\n\n fuse_subgraphs = config.get(\"optimization.fuse.subgraphs\")\n if fuse_subgraphs is None:\n fuse_subgraphs = True\n dsk, _ = fuse(\n dsk,\n keys,\n dependencies=dependencies,\n fuse_subgraphs=fuse_subgraphs,\n )\n dsk, _ = cull(dsk, keys)\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py__Determine_new_partitio_math": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py__Determine_new_partitio_math", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 71, "span_ids": ["imports", "docstring"], "tokens": 943}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Determine new partition divisions using approximate percentiles.\n\nWe use a custom algorithm to calculate approximate, evenly-distributed\npercentiles of arbitrarily-ordered data for any dtype in a distributed\nfashion with one pass over the data. This is used to determine new\npartition divisions when changing the index of a dask.dataframe. We claim\nno statistical guarantees, but we use a variety of heuristics to try to\nprovide reliable, robust results that are \"good enough\" and can scale to\nlarge number of partitions.\n\nOur approach is similar to standard approaches such as t- and q-digest,\nGK, and sampling-based algorithms, which consist of three parts:\n\n1. **Summarize:** create summaries of subsets of data\n2. **Merge:** combine summaries to make a new summary\n3. **Compress:** periodically compress a summary into a smaller summary\n\nWe summarize the data in each partition by calculating several percentiles.\nThe value at each percentile is given a weight proportional to the length\nof the partition and the differences between the current percentile and\nthe adjacent percentiles. Merging summaries is simply a ``merge_sorted``\nof the values and their weights, which we do with a reduction tree.\n\nPercentiles is a good choice for our case, because we are given a numpy\narray of the partition's data, and percentiles is a relatively cheap\noperation. Moreover, percentiles are, by definition, much less\nsusceptible to the underlying distribution of the data, so the weights\ngiven to each value--even across partitions--should be comparable.\n\nLet us describe this to a child of five. We are given many small cubes\n(of equal size) with numbers on them. Split these into many piles. This\nis like the original data. Let's sort and stack the cubes from one of the\npiles. Next, we are given a bunch of unlabeled blocks of different sizes,\nand most are much larger than the the original cubes. Stack these blocks\nuntil they're the same height as our first stack. Let's write a number on\neach block of the new stack. To do this, choose the number of the cube in\nthe first stack that is located in the middle of an unlabeled block. We\nare finished with this stack once all blocks have a number written on them.\nRepeat this for all the piles of cubes. Finished already? Great! Now\ntake all the stacks of the larger blocks you wrote on and throw them into\na single pile. We'll be sorting these blocks next, which may be easier if\nyou carefully move the blocks over and organize... ah, nevermind--too late.\nOkay, sort and stack all the blocks from that amazing, disorganized pile\nyou just made. This will be very tall, so we had better stack it sideways\non the floor like so. This will also make it easier for us to split the\nstack into groups of approximately equal size, which is our final task...\n\nThis, in a nutshell, is the algorithm we deploy. The main difference\nis that we don't always assign a block the number at its median (ours\nfluctuates around the median). The numbers at the edges of the final\ngroups is what we use as divisions for repartitioning. We also need\nthe overall min and max, so we take the 0th and 100th percentile of\neach partition, and another sample near each edge so we don't give\ndisproportionate weights to extreme values.\n\nChoosing appropriate percentiles to take in each partition is where things\nget interesting. The data is arbitrarily ordered, which means it may be\nsorted, random, or follow some pathological distribution--who knows. We\nhope all partitions are of similar length, but we ought to expect some\nvariation in lengths. The number of partitions may also be changing\nsignificantly, which could affect the optimal choice of percentiles. For\nimproved robustness, we use both evenly-distributed and random percentiles.\nIf the number of partitions isn't changing, then the total number of\npercentiles across all partitions scales as ``npartitions**1.5``. Although\nwe only have a simple compression operation (step 3 above) that combines\nweights of equal values, a more sophisticated one could be added if needed,\nsuch as for extremely large ``npartitions`` or if we find we need to\nincrease the sample size for each partition.\n\n\"\"\"\nimport math", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_np_sample_percentiles.return.qs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_np_sample_percentiles.return.qs", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 73, "end_line": 157, "span_ids": ["sample_percentiles", "imports"], "tokens": 895}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_datetime64tz_dtype\nfrom tlz import merge, merge_sorted, take\n\nfrom ..base import tokenize\nfrom ..utils import is_cupy_type, random_state_data\nfrom .core import Series\nfrom .utils import is_categorical_dtype\n\n\ndef sample_percentiles(num_old, num_new, chunk_length, upsample=1.0, random_state=None):\n \"\"\"Construct percentiles for a chunk for repartitioning.\n\n Adapt the number of total percentiles calculated based on the number\n of current and new partitions. Returned percentiles include equally\n spaced percentiles between [0, 100], and random percentiles. See\n detailed discussion below.\n\n Parameters\n ----------\n num_old: int\n Number of partitions of the current object\n num_new: int\n Number of partitions of the new object\n chunk_length: int\n Number of rows of the partition\n upsample : float\n Multiplicative factor to increase the number of samples\n\n Returns\n -------\n qs : numpy.ndarray of sorted percentiles between 0, 100\n\n Constructing ordered (i.e., not hashed) partitions is hard. Calculating\n approximate percentiles for generic objects in an out-of-core fashion is\n also hard. Fortunately, partition boundaries don't need to be perfect\n in order for partitioning to be effective, so we strive for a \"good enough\"\n method that can scale to many partitions and is reasonably well-behaved for\n a wide variety of scenarios.\n\n Two similar approaches come to mind: (1) take a subsample of every\n partition, then find the best new partitions for the combined subsamples;\n and (2) calculate equally-spaced percentiles on every partition (a\n relatively cheap operation), then merge the results. We do both, but\n instead of random samples, we use random percentiles.\n\n If the number of partitions isn't changing, then the ratio of fixed\n percentiles to random percentiles is 2 to 1. If repartitioning goes from\n a very high number of partitions to a very low number of partitions, then\n we use more random percentiles, because a stochastic approach will be more\n stable to potential correlations in the data that may cause a few equally-\n spaced partitions to under-sample the data.\n\n The more partitions there are, then the more total percentiles will get\n calculated across all partitions. Squaring the number of partitions\n approximately doubles the number of total percentiles calculated, so\n num_total_percentiles ~ sqrt(num_partitions). We assume each partition\n is approximately the same length. This should provide adequate resolution\n and allow the number of partitions to scale.\n\n For numeric data, one could instead use T-Digest for floats and Q-Digest\n for ints to calculate approximate percentiles. Our current method works\n for any dtype.\n \"\"\"\n # *waves hands*\n random_percentage = 1 / (1 + (4 * num_new / num_old) ** 0.5)\n num_percentiles = upsample * num_new * (num_old + 22) ** 0.55 / num_old\n num_fixed = int(num_percentiles * (1 - random_percentage)) + 2\n num_random = int(num_percentiles * random_percentage) + 2\n\n if num_fixed + num_random + 5 >= chunk_length:\n return np.linspace(0, 100, chunk_length + 1)\n\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n q_fixed = np.linspace(0, 100, num_fixed)\n q_random = random_state.rand(num_random) * 100\n q_edges = [60 / (num_fixed - 1), 100 - 60 / (num_fixed - 1)]\n qs = np.concatenate([q_fixed, q_random, q_edges, [0, 100]])\n qs.sort()\n # Make the divisions between percentiles a little more even\n qs = 0.5 * (qs[:-1] + qs[1:])\n return qs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_width_tree_width.if_to_binary_or_num_group.else_.return.num_groups": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_width_tree_width.if_to_binary_or_num_group.else_.return.num_groups", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 160, "end_line": 175, "span_ids": ["tree_width"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tree_width(N, to_binary=False):\n \"\"\"Generate tree width suitable for ``merge_sorted`` given N inputs\n\n The larger N is, the more tasks are reduced in a single task.\n\n In theory, this is designed so all tasks are of comparable effort.\n \"\"\"\n if N < 32:\n group_size = 2\n else:\n group_size = int(math.log(N))\n num_groups = N // group_size\n if to_binary or num_groups < 16:\n return 2 ** int(math.log(N / group_size, 2))\n else:\n return num_groups", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_groups_tree_groups.return.rv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_tree_groups_tree_groups.return.rv", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 178, "end_line": 197, "span_ids": ["tree_groups"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tree_groups(N, num_groups):\n \"\"\"Split an integer N into evenly sized and spaced groups.\n\n >>> tree_groups(16, 6)\n [3, 2, 3, 3, 2, 3]\n \"\"\"\n # Bresenham, you so smooth!\n group_size = N // num_groups\n dx = num_groups\n dy = N - group_size * num_groups\n D = 2 * dy - dx\n rv = []\n for _ in range(num_groups):\n if D < 0:\n rv.append(group_size)\n else:\n rv.append(group_size + 1)\n D -= 2 * dx\n D += 2 * dy\n return rv", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_create_merge_tree_create_merge_tree.return.rv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_create_merge_tree_create_merge_tree.return.rv", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 200, "end_line": 235, "span_ids": ["create_merge_tree"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def create_merge_tree(func, keys, token):\n \"\"\"Create a task tree that merges all the keys with a reduction function.\n\n Parameters\n ----------\n func: callable\n Reduction function that accepts a single list of values to reduce.\n keys: iterable\n Keys to reduce from the source dask graph.\n token: object\n Included in each key of the returned dict.\n\n This creates a k-ary tree where k depends on the current level and is\n greater the further away a node is from the root node. This reduces the\n total number of nodes (thereby reducing scheduler overhead), but still\n has beneficial properties of trees.\n\n For reasonable numbers of keys, N < 1e5, the total number of nodes in the\n tree is roughly ``N**0.78``. For 1e5 < N < 2e5, is it roughly ``N**0.8``.\n \"\"\"\n level = 0\n prev_width = len(keys)\n prev_keys = iter(keys)\n rv = {}\n while prev_width > 1:\n width = tree_width(prev_width)\n groups = tree_groups(prev_width, width)\n keys = [(token, level, i) for i in range(width)]\n\n for num, key in zip(groups, keys):\n rv[key] = (func, list(take(num, prev_keys)))\n\n prev_width = width\n prev_keys = iter(keys)\n level += 1\n return rv", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_to_weights_percentiles_to_weights.return.vals_tolist_weights_to": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_to_weights_percentiles_to_weights.return.vals_tolist_weights_to", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 238, "end_line": 263, "span_ids": ["percentiles_to_weights"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def percentiles_to_weights(qs, vals, length):\n \"\"\"Weigh percentile values by length and the difference between percentiles\n\n >>> percentiles = np.array([0., 25., 50., 90., 100.])\n >>> values = np.array([2, 3, 5, 8, 13])\n >>> length = 10\n >>> percentiles_to_weights(percentiles, values, length)\n ([2, 3, 5, 8, 13], [125.0, 250.0, 325.0, 250.0, 50.0])\n\n The weight of the first element, ``2``, is determined by the difference\n between the first and second percentiles, and then scaled by length:\n\n >>> 0.5 * length * (percentiles[1] - percentiles[0])\n 125.0\n\n The second weight uses the difference of percentiles on both sides, so\n it will be twice the first weight if the percentiles are equally spaced:\n\n >>> 0.5 * length * (percentiles[2] - percentiles[0])\n 250.0\n \"\"\"\n if length == 0:\n return ()\n diff = np.ediff1d(qs, 0.0, 0.0)\n weights = 0.5 * length * (diff[1:] + diff[:-1])\n return vals.tolist(), weights.tolist()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_merge_and_compress_summaries_merge_and_compress_summaries.return.vals_weights": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_merge_and_compress_summaries_merge_and_compress_summaries.return.vals_weights", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 266, "end_line": 293, "span_ids": ["merge_and_compress_summaries"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_and_compress_summaries(vals_and_weights):\n \"\"\"Merge and sort percentile summaries that are already sorted.\n\n Each item is a tuple like ``(vals, weights)`` where vals and weights\n are lists. We sort both by vals.\n\n Equal values will be combined, their weights summed together.\n \"\"\"\n vals_and_weights = [x for x in vals_and_weights if x]\n if not vals_and_weights:\n return ()\n it = merge_sorted(*[zip(x, y) for x, y in vals_and_weights])\n vals = []\n weights = []\n vals_append = vals.append\n weights_append = weights.append\n val, weight = prev_val, prev_weight = next(it)\n for val, weight in it:\n if val == prev_val:\n prev_weight += weight\n else:\n vals_append(prev_val)\n weights_append(prev_weight)\n prev_val, prev_weight = val, weight\n if val == prev_val:\n vals_append(prev_val)\n weights_append(prev_weight)\n return vals, weights", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_process_val_weights_process_val_weights.return.rv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_process_val_weights_process_val_weights.return.rv", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 296, "end_line": 383, "span_ids": ["process_val_weights"], "tokens": 888}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def process_val_weights(vals_and_weights, npartitions, dtype_info):\n \"\"\"Calculate final approximate percentiles given weighted vals\n\n ``vals_and_weights`` is assumed to be sorted. We take a cumulative\n sum of the weights, which makes them percentile-like (their scale is\n [0, N] instead of [0, 100]). Next we find the divisions to create\n partitions of approximately equal size.\n\n It is possible for adjacent values of the result to be the same. Since\n these determine the divisions of the new partitions, some partitions\n may be empty. This can happen if we under-sample the data, or if there\n aren't enough unique values in the column. Increasing ``upsample``\n keyword argument in ``df.set_index`` may help.\n \"\"\"\n dtype, info = dtype_info\n\n if not vals_and_weights:\n try:\n return np.array(None, dtype=dtype)\n except Exception:\n # dtype does not support None value so allow it to change\n return np.array(None, dtype=np.float_)\n\n vals, weights = vals_and_weights\n vals = np.array(vals)\n weights = np.array(weights)\n\n # We want to create exactly `npartition` number of groups of `vals` that\n # are approximately the same weight and non-empty if possible. We use a\n # simple approach (more accurate algorithms exist):\n # 1. Remove all the values with weights larger than the relative\n # percentile width from consideration (these are `jumbo`s)\n # 2. Calculate percentiles with \"interpolation=left\" of percentile-like\n # weights of the remaining values. These are guaranteed to be unique.\n # 3. Concatenate the values from (1) and (2), sort, and return.\n #\n # We assume that all values are unique, which happens in the previous\n # step `merge_and_compress_summaries`.\n\n if len(vals) == npartitions + 1:\n rv = vals\n elif len(vals) < npartitions + 1:\n # The data is under-sampled\n if np.issubdtype(vals.dtype, np.number) and not is_categorical_dtype(dtype):\n # Interpolate extra divisions\n q_weights = np.cumsum(weights)\n q_target = np.linspace(q_weights[0], q_weights[-1], npartitions + 1)\n rv = np.interp(q_target, q_weights, vals)\n else:\n # Distribute the empty partitions\n duplicated_index = np.linspace(\n 0, len(vals) - 1, npartitions - len(vals) + 1, dtype=int\n )\n duplicated_vals = vals[duplicated_index]\n rv = np.concatenate([vals, duplicated_vals])\n rv.sort()\n else:\n target_weight = weights.sum() / npartitions\n jumbo_mask = weights >= target_weight\n jumbo_vals = vals[jumbo_mask]\n\n trimmed_vals = vals[~jumbo_mask]\n trimmed_weights = weights[~jumbo_mask]\n trimmed_npartitions = npartitions - len(jumbo_vals)\n\n # percentile-like, but scaled by weights\n q_weights = np.cumsum(trimmed_weights)\n q_target = np.linspace(0, q_weights[-1], trimmed_npartitions + 1)\n\n left = np.searchsorted(q_weights, q_target, side=\"left\")\n right = np.searchsorted(q_weights, q_target, side=\"right\") - 1\n # stay inbounds\n np.maximum(right, 0, right)\n lower = np.minimum(left, right)\n trimmed = trimmed_vals[lower]\n\n rv = np.concatenate([trimmed, jumbo_vals])\n rv.sort()\n\n if is_categorical_dtype(dtype):\n rv = pd.Categorical.from_codes(rv, info[0], info[1])\n elif is_datetime64tz_dtype(dtype):\n rv = pd.DatetimeIndex(rv).tz_localize(dtype.tz)\n elif \"datetime64\" in str(dtype):\n rv = pd.DatetimeIndex(rv, dtype=dtype)\n elif rv.dtype != dtype:\n rv = rv.astype(dtype)\n return rv", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_summary_dtype_info.return.df_dtype_info": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_percentiles_summary_dtype_info.return.df_dtype_info", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 386, "end_line": 440, "span_ids": ["percentiles_summary", "dtype_info"], "tokens": 415}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def percentiles_summary(df, num_old, num_new, upsample, state):\n \"\"\"Summarize data using percentiles and derived weights.\n\n These summaries can be merged, compressed, and converted back into\n approximate percentiles.\n\n Parameters\n ----------\n df: pandas.Series\n Data to summarize\n num_old: int\n Number of partitions of the current object\n num_new: int\n Number of partitions of the new object\n upsample: float\n Scale factor to increase the number of percentiles calculated in\n each partition. Use to improve accuracy.\n \"\"\"\n from dask.array.dispatch import percentile_lookup as _percentile\n\n length = len(df)\n if length == 0:\n return ()\n random_state = np.random.RandomState(state)\n qs = sample_percentiles(num_old, num_new, length, upsample, random_state)\n data = df\n interpolation = \"linear\"\n\n if is_categorical_dtype(data):\n data = data.cat.codes\n interpolation = \"nearest\"\n elif isinstance(data.dtype, pd.core.dtypes.dtypes.DatetimeTZDtype) or np.issubdtype(\n data.dtype, np.integer\n ):\n interpolation = \"nearest\"\n vals, n = _percentile(data, qs, interpolation=interpolation)\n if (\n is_cupy_type(data)\n and interpolation == \"linear\"\n and np.issubdtype(data.dtype, np.integer)\n ):\n vals = np.round(vals).astype(data.dtype)\n if qs[0] == 0:\n # Ensure the 0th quantile is the minimum value of the data\n vals[0] = data.min()\n vals_and_weights = percentiles_to_weights(qs, vals, length)\n return vals_and_weights\n\n\ndef dtype_info(df):\n info = None\n if is_categorical_dtype(df):\n data = df.values\n info = (data.categories, data.ordered)\n return df.dtype, info", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_partition_quantiles_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/partitionquantiles.py_partition_quantiles_", "embedding": null, "metadata": {"file_path": "dask/dataframe/partitionquantiles.py", "file_name": "partitionquantiles.py", "file_type": "text/x-python", "category": "implementation", "start_line": 431, "end_line": 484, "span_ids": ["partition_quantiles"], "tokens": 517}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def partition_quantiles(df, npartitions, upsample=1.0, random_state=None):\n \"\"\"Approximate quantiles of Series used for repartitioning\"\"\"\n assert isinstance(df, Series)\n # currently, only Series has quantile method\n # Index.quantile(list-like) must be pd.Series, not pd.Index\n return_type = Series\n\n qs = np.linspace(0, 1, npartitions + 1)\n token = tokenize(df, qs, upsample)\n if random_state is None:\n random_state = int(token, 16) % np.iinfo(np.int32).max\n state_data = random_state_data(df.npartitions, random_state)\n\n df_keys = df.__dask_keys__()\n\n name0 = \"re-quantiles-0-\" + token\n dtype_dsk = {(name0, 0): (dtype_info, df_keys[0])}\n\n name1 = \"re-quantiles-1-\" + token\n val_dsk = {\n (name1, i): (\n percentiles_summary,\n key,\n df.npartitions,\n npartitions,\n upsample,\n state,\n )\n for i, (state, key) in enumerate(zip(state_data, df_keys))\n }\n\n name2 = \"re-quantiles-2-\" + token\n merge_dsk = create_merge_tree(merge_and_compress_summaries, sorted(val_dsk), name2)\n if not merge_dsk:\n # Compress the data even if we only have one partition\n merge_dsk = {(name2, 0, 0): (merge_and_compress_summaries, [list(val_dsk)[0]])}\n\n merged_key = max(merge_dsk)\n\n name3 = \"re-quantiles-3-\" + token\n last_dsk = {\n (name3, 0): (\n pd.Series, # TODO: Use `type(df._meta)` when cudf adds `tolist()`\n (process_val_weights, merged_key, npartitions, (name0, 0)),\n qs,\n None,\n df.name,\n )\n }\n\n dsk = merge(df.dask, dtype_dsk, val_dsk, merge_dsk, last_dsk)\n new_divisions = [0.0, 1.0]\n return return_type(dsk, name3, df._meta, new_divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_get_dummies.if_isinstance_data_pd_S_get_dummies.return.map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_get_dummies.if_isinstance_data_pd_S_get_dummies.return.map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 107, "end_line": 178, "span_ids": ["get_dummies"], "tokens": 538}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_dummies(\n data,\n prefix=None,\n prefix_sep=\"_\",\n dummy_na=False,\n columns=None,\n sparse=False,\n drop_first=False,\n dtype=np.uint8,\n **kwargs,\n):\n if isinstance(data, (pd.Series, pd.DataFrame)):\n return pd.get_dummies(\n data,\n prefix=prefix,\n prefix_sep=prefix_sep,\n dummy_na=dummy_na,\n columns=columns,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n **kwargs,\n )\n\n not_cat_msg = (\n \"`get_dummies` with non-categorical dtypes is not \"\n \"supported. Please use `df.categorize()` beforehand to \"\n \"convert to categorical dtype.\"\n )\n\n unknown_cat_msg = (\n \"`get_dummies` with unknown categories is not \"\n \"supported. Please use `column.cat.as_known()` or \"\n \"`df.categorize()` beforehand to ensure known \"\n \"categories\"\n )\n\n if isinstance(data, Series):\n if not methods.is_categorical_dtype(data):\n raise NotImplementedError(not_cat_msg)\n if not has_known_categories(data):\n raise NotImplementedError(unknown_cat_msg)\n elif isinstance(data, DataFrame):\n if columns is None:\n if (data.dtypes == \"object\").any():\n raise NotImplementedError(not_cat_msg)\n columns = data._meta.select_dtypes(include=[\"category\"]).columns\n else:\n if not all(methods.is_categorical_dtype(data[c]) for c in columns):\n raise NotImplementedError(not_cat_msg)\n\n if not all(has_known_categories(data[c]) for c in columns):\n raise NotImplementedError(unknown_cat_msg)\n\n # We explicitly create `meta` on `data._meta` (the empty version) to\n # work around https://github.com/pandas-dev/pandas/issues/21993\n package_name = data._meta.__class__.__module__.split(\".\")[0]\n dummies = sys.modules[package_name].get_dummies\n meta = dummies(\n data._meta,\n prefix=prefix,\n prefix_sep=prefix_sep,\n dummy_na=dummy_na,\n columns=columns,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n **kwargs,\n )\n\n return map_partitions(\n dummies,\n data,\n prefix=prefix,\n prefix_sep=prefix_sep,\n dummy_na=dummy_na,\n columns=columns,\n sparse=sparse,\n drop_first=drop_first,\n meta=meta,\n dtype=dtype,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_6_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_6_", "embedding": null, "metadata": {"file_path": "dask/dataframe/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 281, "end_line": 339, "span_ids": ["pivot_table", "melt"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Melt\n###############################################################\n\n\ndef melt(\n frame,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level=None,\n):\n \"\"\"\n Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one or more columns are identifier variables\n (``id_vars``), while all other columns, considered measured variables (``value_vars``), are \"unpivoted\" to the row\n axis, leaving just two non-identifier columns, 'variable' and 'value'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar\n Name to use for the 'variable' column. If None it uses\n ``frame.columns.name`` or 'variable'.\n value_name : scalar, default 'value'\n Name to use for the 'value' column.\n col_level : int or string, optional\n If columns are a MultiIndex then use this level to melt.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n See Also\n --------\n pandas.DataFrame.melt\n \"\"\"\n\n from dask.dataframe.core import no_default\n\n return frame.map_partitions(\n M.melt,\n meta=no_default,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n token=\"melt\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_datetime_overlap_chunk.return.out_iloc_before_after_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_datetime_overlap_chunk.return.out_iloc_before_after_", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 54, "span_ids": ["imports", "overlap_chunk"], "tokens": 370}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import datetime\nimport inspect\nfrom numbers import Integral\n\nimport pandas as pd\nfrom pandas.api.types import is_datetime64_any_dtype\nfrom pandas.core.window import Rolling as pd_Rolling\n\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import M, derived_from, funcname, has_keyword\nfrom . import methods\nfrom .core import _emulate\nfrom .utils import make_meta\n\n\ndef overlap_chunk(\n func, prev_part, current_part, next_part, before, after, args, kwargs\n):\n\n msg = (\n \"Partition size is less than overlapping \"\n \"window size. Try using ``df.repartition`` \"\n \"to increase the partition size.\"\n )\n\n if prev_part is not None and isinstance(before, Integral):\n if prev_part.shape[0] != before:\n raise NotImplementedError(msg)\n\n if next_part is not None and isinstance(after, Integral):\n if next_part.shape[0] != after:\n raise NotImplementedError(msg)\n\n parts = [p for p in (prev_part, current_part, next_part) if p is not None]\n combined = methods.concat(parts)\n out = func(combined, *args, **kwargs)\n if prev_part is None:\n before = None\n if isinstance(before, datetime.timedelta):\n before = len(prev_part)\n\n expansion = None\n if combined.shape[0] != 0:\n expansion = out.shape[0] // combined.shape[0]\n if before and expansion:\n before *= expansion\n if next_part is None:\n return out.iloc[before:]\n if isinstance(after, datetime.timedelta):\n after = len(next_part)\n if after and expansion:\n after *= expansion\n return out.iloc[before:-after]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap_map_overlap.timedelta_partition_message._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap_map_overlap.timedelta_partition_message._", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 57, "end_line": 117, "span_ids": ["map_overlap"], "tokens": 490}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_overlap(func, df, before, after, *args, **kwargs):\n \"\"\"Apply a function to each partition, sharing rows with adjacent partitions.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n df : dd.DataFrame, dd.Series\n before : int or timedelta\n The rows to prepend to partition ``i`` from the end of\n partition ``i - 1``.\n after : int or timedelta\n The rows to append to partition ``i`` from the beginning\n of partition ``i + 1``.\n args, kwargs :\n Arguments and keywords to pass to the function. The partition will\n be the first argument, and these will be passed *after*.\n\n See Also\n --------\n dd.DataFrame.map_overlap\n \"\"\"\n if isinstance(before, datetime.timedelta) or isinstance(after, datetime.timedelta):\n if not is_datetime64_any_dtype(df.index._meta_nonempty.inferred_type):\n raise TypeError(\n \"Must have a `DatetimeIndex` when using string offset \"\n \"for `before` and `after`\"\n )\n else:\n if not (\n isinstance(before, Integral)\n and before >= 0\n and isinstance(after, Integral)\n and after >= 0\n ):\n raise ValueError(\"before and after must be positive integers\")\n\n if \"token\" in kwargs:\n func_name = kwargs.pop(\"token\")\n token = tokenize(df, before, after, *args, **kwargs)\n else:\n func_name = \"overlap-\" + funcname(func)\n token = tokenize(func, df, before, after, *args, **kwargs)\n\n if \"meta\" in kwargs:\n meta = kwargs.pop(\"meta\")\n else:\n meta = _emulate(func, df, *args, **kwargs)\n meta = make_meta(meta, index=df._meta.index, parent_meta=df._meta)\n\n name = f\"{func_name}-{token}\"\n name_a = \"overlap-prepend-\" + tokenize(df, before)\n name_b = \"overlap-append-\" + tokenize(df, after)\n df_name = df._name\n\n dsk = {}\n\n timedelta_partition_message = (\n \"Partition size is less than specified window. \"\n \"Try using ``df.repartition`` to increase the partition size\"\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap.if_before_and_isinstance__map_overlap.return.df__constructor_graph_na": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_map_overlap.if_before_and_isinstance__map_overlap.return.df__constructor_graph_na", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 119, "end_line": 211, "span_ids": ["map_overlap"], "tokens": 778}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_overlap(func, df, before, after, *args, **kwargs):\n # ... other code\n\n if before and isinstance(before, Integral):\n\n prevs = [None]\n for i in range(df.npartitions - 1):\n key = (name_a, i)\n dsk[key] = (M.tail, (df_name, i), before)\n prevs.append(key)\n\n elif isinstance(before, datetime.timedelta):\n # Assumes monotonic (increasing?) index\n divs = pd.Series(df.divisions)\n deltas = divs.diff().iloc[1:-1]\n\n # In the first case window-size is larger than at least one partition, thus it is\n # necessary to calculate how many partitions must be used for each rolling task.\n # Otherwise, these calculations can be skipped (faster)\n\n if (before > deltas).any():\n pt_z = divs[0]\n prevs = [None]\n for i in range(df.npartitions - 1):\n # Select all indexes of relevant partitions between the current partition and\n # the partition with the highest division outside the rolling window (before)\n pt_i = divs[i + 1]\n\n # lower-bound the search to the first division\n lb = max(pt_i - before, pt_z)\n\n first, j = divs[i], i\n while first > lb and j > 0:\n first = first - deltas[j]\n j = j - 1\n\n key = (name_a, i)\n dsk[key] = (\n _tail_timedelta,\n [(df_name, k) for k in range(j, i + 1)],\n (df_name, i + 1),\n before,\n )\n prevs.append(key)\n\n else:\n prevs = [None]\n for i in range(df.npartitions - 1):\n key = (name_a, i)\n dsk[key] = (\n _tail_timedelta,\n [(df_name, i)],\n (df_name, i + 1),\n before,\n )\n prevs.append(key)\n else:\n prevs = [None] * df.npartitions\n\n if after and isinstance(after, Integral):\n nexts = []\n for i in range(1, df.npartitions):\n key = (name_b, i)\n dsk[key] = (M.head, (df_name, i), after)\n nexts.append(key)\n nexts.append(None)\n elif isinstance(after, datetime.timedelta):\n # TODO: Do we have a use-case for this? Pandas doesn't allow negative rolling windows\n deltas = pd.Series(df.divisions).diff().iloc[1:-1]\n if (after > deltas).any():\n raise ValueError(timedelta_partition_message)\n\n nexts = []\n for i in range(1, df.npartitions):\n key = (name_b, i)\n dsk[key] = (_head_timedelta, (df_name, i - 0), (df_name, i), after)\n nexts.append(key)\n nexts.append(None)\n else:\n nexts = [None] * df.npartitions\n\n for i, (prev, current, next) in enumerate(zip(prevs, df.__dask_keys__(), nexts)):\n dsk[(name, i)] = (\n overlap_chunk,\n func,\n prev,\n current,\n next,\n before,\n after,\n args,\n kwargs,\n )\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])\n return df._constructor(graph, name, meta, df.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling._call_method_Rolling._call_method.return.map_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling._call_method_Rolling._call_method.return.map_overlap_", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 298, "end_line": 336, "span_ids": ["Rolling._call_method"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling:\n\n def _call_method(self, method_name, *args, **kwargs):\n rolling_kwargs = self._rolling_kwargs()\n meta = self.pandas_rolling_method(\n self.obj._meta_nonempty, rolling_kwargs, method_name, *args, **kwargs\n )\n\n if self._has_single_partition:\n # There's no overlap just use map_partitions\n return self.obj.map_partitions(\n self.pandas_rolling_method,\n rolling_kwargs,\n method_name,\n *args,\n token=method_name,\n meta=meta,\n **kwargs,\n )\n # Convert window to overlap\n if self.center:\n before = self.window // 2\n after = self.window - before - 1\n elif self._win_type == \"freq\":\n before = pd.Timedelta(self.window)\n after = 0\n else:\n before = self.window - 1\n after = 0\n return map_overlap(\n self.pandas_rolling_method,\n self.obj,\n before,\n after,\n rolling_kwargs,\n method_name,\n *args,\n token=method_name,\n meta=meta,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.count_Rolling.quantile.return.self__call_method_quanti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.count_Rolling.quantile.return.self__call_method_quanti", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 341, "end_line": 387, "span_ids": ["Rolling.min", "Rolling.quantile", "Rolling.count", "Rolling.sum", "Rolling.median", "Rolling.skew", "Rolling.cov", "Rolling.var", "Rolling.max", "Rolling.mean", "Rolling.kurt", "Rolling.std"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling:\n\n @derived_from(pd_Rolling)\n def count(self):\n return self._call_method(\"count\")\n\n @derived_from(pd_Rolling)\n def cov(self):\n return self._call_method(\"cov\")\n\n @derived_from(pd_Rolling)\n def sum(self):\n return self._call_method(\"sum\")\n\n @derived_from(pd_Rolling)\n def mean(self):\n return self._call_method(\"mean\")\n\n @derived_from(pd_Rolling)\n def median(self):\n return self._call_method(\"median\")\n\n @derived_from(pd_Rolling)\n def min(self):\n return self._call_method(\"min\")\n\n @derived_from(pd_Rolling)\n def max(self):\n return self._call_method(\"max\")\n\n @derived_from(pd_Rolling)\n def std(self, ddof=1):\n return self._call_method(\"std\", ddof=1)\n\n @derived_from(pd_Rolling)\n def var(self, ddof=1):\n return self._call_method(\"var\", ddof=1)\n\n @derived_from(pd_Rolling)\n def skew(self):\n return self._call_method(\"skew\")\n\n @derived_from(pd_Rolling)\n def kurt(self):\n return self._call_method(\"kurt\")\n\n @derived_from(pd_Rolling)\n def quantile(self, quantile):\n return self._call_method(\"quantile\", quantile)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.apply_Rolling.apply.return.self__call_method_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.apply_Rolling.apply.return.self__call_method_", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 386, "end_line": 409, "span_ids": ["Rolling.apply"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling:\n\n @derived_from(pd_Rolling)\n def apply(\n self,\n func,\n raw=None,\n engine=\"cython\",\n engine_kwargs=None,\n args=None,\n kwargs=None,\n ):\n compat_kwargs = {}\n kwargs = kwargs or {}\n args = args or ()\n meta = self.obj._meta.rolling(0)\n if has_keyword(meta.apply, \"engine\"):\n # PANDAS_GT_100\n compat_kwargs = dict(engine=engine, engine_kwargs=engine_kwargs)\n if raw is None:\n # PANDAS_GT_100: The default changed from None to False\n raw = inspect.signature(meta.apply).parameters[\"raw\"]\n\n return self._call_method(\n \"apply\", func, raw=raw, args=args, kwargs=kwargs, **compat_kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_contextlib_logger.logging_getLogger___name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_contextlib_logger.logging_getLogger___name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 23, "span_ids": ["imports"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\nimport logging\nimport math\nimport shutil\nimport tempfile\nimport uuid\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport tlz as toolz\n\nfrom .. import base, config\nfrom ..base import compute, compute_as_if_collection, is_dask_collection, tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..layers import ShuffleLayer, SimpleShuffleLayer\nfrom ..sizeof import sizeof\nfrom ..utils import M, digit\nfrom . import methods\nfrom .core import DataFrame, Series, _Frame, map_partitions, new_dd_object\nfrom .dispatch import group_split_dispatch, hash_object_dispatch\n\nlogger = logging.getLogger(__name__)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_index_set_index.return.set_partition_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_index_set_index.return.set_partition_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 118, "end_line": 178, "span_ids": ["set_index"], "tokens": 455}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def set_index(\n df,\n index,\n npartitions=None,\n shuffle=None,\n compute=False,\n drop=True,\n upsample=1.0,\n divisions=None,\n partition_size=128e6,\n **kwargs,\n):\n \"\"\"See _Frame.set_index for docstring\"\"\"\n if isinstance(index, Series) and index._name == df.index._name:\n return df\n if isinstance(index, (DataFrame, tuple, list)):\n # Accept [\"a\"], but not [[\"a\"]]\n if (\n isinstance(index, list)\n and len(index) == 1\n and not isinstance(index[0], list) # if index = [[\"a\"]], leave it that way\n ):\n index = index[0]\n else:\n raise NotImplementedError(\n \"Dask dataframe does not yet support multi-indexes.\\n\"\n \"You tried to index with this index: %s\\n\"\n \"Indexes must be single columns only.\" % str(index)\n )\n\n if npartitions == \"auto\":\n repartition = True\n npartitions = max(100, df.npartitions)\n else:\n if npartitions is None:\n npartitions = df.npartitions\n repartition = False\n\n if not isinstance(index, Series):\n index2 = df[index]\n else:\n index2 = index\n\n if divisions is None:\n divisions, mins, maxes = _calculate_divisions(\n df, index2, repartition, npartitions, upsample, partition_size\n )\n\n if (\n mins == sorted(mins)\n and maxes == sorted(maxes)\n and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))\n and npartitions == df.npartitions\n ):\n divisions = mins + [maxes[-1]]\n result = set_sorted_index(df, index, drop=drop, divisions=divisions)\n return result.map_partitions(M.sort_index)\n\n return set_partition(\n df, index, divisions, shuffle=shuffle, drop=drop, compute=compute, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_remove_nans_remove_nans.return.divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_remove_nans_remove_nans.return.divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 126, "end_line": 152, "span_ids": ["remove_nans"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def remove_nans(divisions):\n \"\"\"Remove nans from divisions\n\n These sometime pop up when we call min/max on an empty partition\n\n Examples\n --------\n >>> remove_nans((np.nan, 1, 2))\n [1, 1, 2]\n >>> remove_nans((1, np.nan, 2))\n [1, 2, 2]\n >>> remove_nans((1, 2, np.nan))\n [1, 2, 2]\n \"\"\"\n divisions = list(divisions)\n\n for i in range(len(divisions) - 2, -1, -1):\n if pd.isnull(divisions[i]):\n divisions[i] = divisions[i + 1]\n\n for i in range(len(divisions) - 1, -1, -1):\n if not pd.isnull(divisions[i]):\n for j in range(i + 1, len(divisions)):\n divisions[j] = divisions[i]\n break\n\n return divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_partition_set_partition.return.df4_map_partitions_M_sort": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_partition_set_partition.return.df4_map_partitions_M_sort", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 237, "end_line": 325, "span_ids": ["set_partition"], "tokens": 651}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def set_partition(\n df, index, divisions, max_branch=32, drop=True, shuffle=None, compute=None\n):\n \"\"\"Group DataFrame by index\n\n Sets a new index and partitions data along that index according to\n divisions. Divisions are often found by computing approximate quantiles.\n The function ``set_index`` will do both of these steps.\n\n Parameters\n ----------\n df: DataFrame/Series\n Data that we want to re-partition\n index: string or Series\n Column to become the new index\n divisions: list\n Values to form new divisions between partitions\n drop: bool, default True\n Whether to delete columns to be used as the new index\n shuffle: str (optional)\n Either 'disk' for an on-disk shuffle or 'tasks' to use the task\n scheduling framework. Use 'disk' if you are on a single machine\n and 'tasks' if you are on a distributed cluster.\n max_branch: int (optional)\n If using the task-based shuffle, the amount of splitting each\n partition undergoes. Increase this for fewer copies but more\n scheduler overhead.\n\n See Also\n --------\n set_index\n shuffle\n partd\n \"\"\"\n meta = df._meta._constructor_sliced([0])\n if isinstance(divisions, tuple):\n # pd.isna considers tuples to be scalars. Convert to a list.\n divisions = list(divisions)\n\n if np.isscalar(index):\n dtype = df[index].dtype\n else:\n dtype = index.dtype\n\n if pd.isna(divisions).any() and pd.api.types.is_integer_dtype(dtype):\n # Can't construct a Series[int64] when any / all of the divisions are NaN.\n divisions = df._meta._constructor_sliced(divisions)\n else:\n divisions = df._meta._constructor_sliced(divisions, dtype=dtype)\n\n if np.isscalar(index):\n partitions = df[index].map_partitions(\n set_partitions_pre, divisions=divisions, meta=meta\n )\n df2 = df.assign(_partitions=partitions)\n else:\n partitions = index.map_partitions(\n set_partitions_pre, divisions=divisions, meta=meta\n )\n df2 = df.assign(_partitions=partitions, _index=index)\n\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n max_branch=max_branch,\n npartitions=len(divisions) - 1,\n shuffle=shuffle,\n compute=compute,\n ignore_index=True,\n )\n\n if np.isscalar(index):\n df4 = df3.map_partitions(\n set_index_post_scalar,\n index_name=index,\n drop=drop,\n column_dtype=df.columns.dtype,\n )\n else:\n df4 = df3.map_partitions(\n set_index_post_series,\n index_name=index.name,\n drop=drop,\n column_dtype=df.columns.dtype,\n )\n\n df4.divisions = tuple(methods.tolist(divisions))\n\n return df4.map_partitions(M.sort_index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_shuffle.return.df3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_shuffle.return.df3", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 328, "end_line": 403, "span_ids": ["shuffle"], "tokens": 542}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle(\n df,\n index,\n shuffle=None,\n npartitions=None,\n max_branch=32,\n ignore_index=False,\n compute=None,\n):\n \"\"\"Group DataFrame by index\n\n Hash grouping of elements. After this operation all elements that have\n the same index will be in the same partition. Note that this requires\n full dataset read, serialization and shuffle. This is expensive. If\n possible you should avoid shuffles.\n\n This does not preserve a meaningful index/partitioning scheme. This is not\n deterministic if done in parallel.\n\n See Also\n --------\n set_index\n set_partition\n shuffle_disk\n \"\"\"\n list_like = pd.api.types.is_list_like(index) and not is_dask_collection(index)\n if shuffle == \"tasks\" and (isinstance(index, str) or list_like):\n # Avoid creating the \"_partitions\" column if possible.\n # We currently do this if the user is passing in\n # specific column names (and shuffle == \"tasks\").\n if isinstance(index, str):\n index = [index]\n else:\n index = list(index)\n nset = set(index)\n if nset & set(df.columns) == nset:\n return rearrange_by_column(\n df,\n index,\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n ignore_index=ignore_index,\n compute=compute,\n )\n\n if not isinstance(index, _Frame):\n if list_like:\n # Make sure we don't try to select with pd.Series/pd.Index\n index = list(index)\n index = df._select_columns_or_index(index)\n elif hasattr(index, \"to_frame\"):\n # If this is an index, we should still convert to a\n # DataFrame. Otherwise, the hashed values of a column\n # selection will not match (important when merging).\n index = index.to_frame()\n\n partitions = index.map_partitions(\n partitioning_index,\n npartitions=npartitions or df.npartitions,\n meta=df._meta._constructor_sliced([0]),\n transform_divisions=False,\n )\n df2 = df.assign(_partitions=partitions)\n df2._meta.index.name = df._meta.index.name\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n compute=compute,\n ignore_index=ignore_index,\n )\n del df3[\"_partitions\"]\n return df3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_divisions_rearrange_by_divisions.return.df3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_divisions_rearrange_by_divisions.return.df3", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 406, "end_line": 441, "span_ids": ["rearrange_by_divisions"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_divisions(\n df,\n column,\n divisions,\n max_branch=None,\n shuffle=None,\n ascending=True,\n na_position=\"last\",\n duplicates=True,\n):\n \"\"\"Shuffle dataframe so that column separates along divisions\"\"\"\n divisions = df._meta._constructor_sliced(divisions)\n # duplicates need to be removed sometimes to properly sort null dataframes\n if not duplicates:\n divisions = divisions.drop_duplicates()\n meta = df._meta._constructor_sliced([0])\n # Assign target output partitions to every row\n partitions = df[column].map_partitions(\n set_partitions_pre,\n divisions=divisions,\n ascending=ascending,\n na_position=na_position,\n meta=meta,\n )\n df2 = df.assign(_partitions=partitions)\n\n # Perform shuffle\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n max_branch=max_branch,\n npartitions=len(divisions) - 1,\n shuffle=shuffle,\n )\n del df3[\"_partitions\"]\n return df3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_rearrange_by_column.if_shuffle_disk_.else_.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_rearrange_by_column.if_shuffle_disk_.else_.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 398, "end_line": 426, "span_ids": ["rearrange_by_column"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_column(\n df,\n col,\n npartitions=None,\n max_branch=None,\n shuffle=None,\n compute=None,\n ignore_index=False,\n):\n shuffle = shuffle or config.get(\"shuffle\", None) or \"disk\"\n\n # if the requested output partitions < input partitions\n # we repartition first as shuffling overhead is\n # proportionate to the number of input partitions\n\n if npartitions is not None and npartitions < df.npartitions:\n df = df.repartition(npartitions=npartitions)\n\n if shuffle == \"disk\":\n return rearrange_by_column_disk(df, col, npartitions, compute=compute)\n elif shuffle == \"tasks\":\n df2 = rearrange_by_column_tasks(\n df, col, max_branch, npartitions, ignore_index=ignore_index\n )\n if ignore_index:\n df2._meta = df2._meta.reset_index(drop=True)\n return df2\n else:\n raise NotImplementedError(\"Unknown shuffle method %s\" % shuffle)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd_maybe_buffered_partd.__reduce__.if_self_tempdir_.else_.return._maybe_buffered_partd_F": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd_maybe_buffered_partd.__reduce__.if_self_tempdir_.else_.return._maybe_buffered_partd_F", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 772, "end_line": 786, "span_ids": ["maybe_buffered_partd.__reduce__", "maybe_buffered_partd", "maybe_buffered_partd.__init__"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class maybe_buffered_partd:\n \"\"\"\n If serialized, will return non-buffered partd. Otherwise returns a buffered partd\n \"\"\"\n\n def __init__(self, buffer=True, tempdir=None):\n self.tempdir = tempdir or config.get(\"temporary_directory\", None)\n self.buffer = buffer\n self.compression = config.get(\"dataframe.shuffle-compression\", None)\n\n def __reduce__(self):\n if self.tempdir:\n return (maybe_buffered_partd, (False, self.tempdir))\n else:\n return (maybe_buffered_partd, (False,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd.__call___maybe_buffered_partd.__call__.if_self_buffer_.else_.return.partd_PandasBlocks_file_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_maybe_buffered_partd.__call___maybe_buffered_partd.__call__.if_self_buffer_.else_.return.partd_PandasBlocks_file_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 491, "end_line": 517, "span_ids": ["maybe_buffered_partd.__call__"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class maybe_buffered_partd:\n\n def __call__(self, *args, **kwargs):\n import partd\n\n path = tempfile.mkdtemp(suffix=\".partd\", dir=self.tempdir)\n\n try:\n partd_compression = (\n getattr(partd.compressed, self.compression)\n if self.compression\n else None\n )\n except AttributeError as e:\n raise ImportError(\n \"Not able to import and load {} as compression algorithm.\"\n \"Please check if the library is installed and supported by Partd.\".format(\n self.compression\n )\n ) from e\n file = partd.File(path)\n partd.file.cleanup_files.append(path)\n # Envelope partd file with compression, if set and available\n if partd_compression:\n file = partd_compression(file)\n if self.buffer:\n return partd.PandasBlocks(partd.Buffer(partd.Dict(), file))\n else:\n return partd.PandasBlocks(file)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py___partitioning_index.return.hash_object_dispatch_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py___partitioning_index.return.hash_object_dispatch_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 720, "end_line": 742, "span_ids": ["rearrange_by_column_tasks", "partitioning_index"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "########################################################\n# Various convenience functions to be run by the above #\n########################################################\n\n\ndef partitioning_index(df, npartitions):\n \"\"\"\n Computes a deterministic index mapping each record to a partition.\n\n Identical rows are mapped to the same partition.\n\n Parameters\n ----------\n df : DataFrame/Series/Index\n npartitions : int\n The number of partitions to group into.\n\n Returns\n -------\n partitions : ndarray\n An array of int64 values mapping each record to a partition.\n \"\"\"\n return hash_object_dispatch(df, index=False) % int(npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_2_shuffle_group_get.if_i_in_g_.else_.return.head": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_2_shuffle_group_get.if_i_in_g_.else_.return.head", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 790, "end_line": 814, "span_ids": ["shuffle_group_get", "shuffle_group_2"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle_group_2(df, cols, ignore_index, nparts):\n if not len(df):\n return {}, df\n\n if isinstance(cols, str):\n cols = [cols]\n\n if cols and cols[0] == \"_partitions\":\n ind = df[cols[0]].astype(np.int32)\n else:\n ind = (\n hash_object_dispatch(df[cols] if cols else df, index=False) % int(nparts)\n ).astype(np.int32)\n\n n = ind.max() + 1\n result2 = group_split_dispatch(df, ind.values.view(), n, ignore_index=ignore_index)\n return result2, df.iloc[:0]\n\n\ndef shuffle_group_get(g_head, i):\n g, head = g_head\n if i in g:\n return g[i]\n else:\n return head", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_shuffle_group.return.group_split_dispatch_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_shuffle_group.return.group_split_dispatch_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 870, "end_line": 916, "span_ids": ["shuffle_group"], "tokens": 403}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle_group(df, cols, stage, k, npartitions, ignore_index, nfinal):\n \"\"\"Splits dataframe into groups\n\n The group is determined by their final partition, and which stage we are in\n in the shuffle\n\n Parameters\n ----------\n df: DataFrame\n cols: str or list\n Column name(s) on which to split the dataframe. If ``cols`` is not\n \"_partitions\", hashing will be used to determine target partition\n stage: int\n We shuffle dataframes with many partitions we in a few stages to avoid\n a quadratic number of tasks. This number corresponds to which stage\n we're in, starting from zero up to some small integer\n k: int\n Desired number of splits from this dataframe\n npartition: int\n Total number of output partitions for the full dataframe\n nfinal: int\n Total number of output partitions after repartitioning\n\n Returns\n -------\n out: Dict[int, DataFrame]\n A dictionary mapping integers in {0..k} to dataframes such that the\n hash values of ``df[col]`` are well partitioned.\n \"\"\"\n if isinstance(cols, str):\n cols = [cols]\n\n if cols and cols[0] == \"_partitions\":\n ind = df[cols[0]]\n else:\n ind = hash_object_dispatch(df[cols] if cols else df, index=False)\n if nfinal and nfinal != npartitions:\n ind = ind % int(nfinal)\n\n c = ind.values\n typ = np.min_scalar_type(npartitions * 2)\n\n c = np.mod(c, npartitions).astype(typ, copy=False)\n np.floor_divide(c, k**stage, out=c)\n np.mod(c, k, out=c)\n\n return group_split_dispatch(df, c, k, ignore_index=ignore_index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ensure_cleanup_on_exception_ensure_cleanup_on_exception.try_.except_Exception_.raise": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_ensure_cleanup_on_exception_ensure_cleanup_on_exception.try_.except_Exception_.raise", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 866, "end_line": 883, "span_ids": ["ensure_cleanup_on_exception"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextlib.contextmanager\ndef ensure_cleanup_on_exception(p):\n \"\"\"Ensure a partd.File is cleaned up.\n\n We have several tasks referring to a `partd.File` instance. We want to\n ensure that the file is cleaned up if and only if there's an exception\n in the tasks using the `partd.File`.\n \"\"\"\n try:\n yield\n except Exception:\n # the function (e.g. shuffle_group_3) had an internal exception.\n # We'll cleanup our temporary files and re-raise.\n try:\n p.drop()\n except Exception:\n logger.exception(\"ignoring exception in ensure_cleanup_on_exception\")\n raise", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_3_get_overlap.return.df_loc_index_if_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_shuffle_group_3_get_overlap.return.df_loc_index_if_index_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 886, "end_line": 911, "span_ids": ["shuffle_group_3", "get_overlap", "set_index_post_scalar", "set_index_post_series", "drop_overlap"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shuffle_group_3(df, col, npartitions, p):\n with ensure_cleanup_on_exception(p):\n g = df.groupby(col)\n d = {i: g.get_group(i) for i in g.groups}\n p.append(d, fsync=True)\n\n\ndef set_index_post_scalar(df, index_name, drop, column_dtype):\n df2 = df.drop(\"_partitions\", axis=1).set_index(index_name, drop=drop)\n df2.columns = df2.columns.astype(column_dtype)\n return df2\n\n\ndef set_index_post_series(df, index_name, drop, column_dtype):\n df2 = df.drop(\"_partitions\", axis=1).set_index(\"_index\", drop=True)\n df2.index.name = index_name\n df2.columns = df2.columns.astype(column_dtype)\n return df2\n\n\ndef drop_overlap(df, index):\n return df.drop(index) if index in df.index else df\n\n\ndef get_overlap(df, index):\n return df.loc[[index]] if index in df.index else df._constructor()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_fix_overlap_fix_overlap.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_fix_overlap_fix_overlap.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 967, "end_line": 1006, "span_ids": ["fix_overlap"], "tokens": 512}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fix_overlap(ddf, mins, maxes, lens):\n \"\"\"Ensures that the upper bound on each partition of ddf (except the last) is exclusive\n\n This is accomplished by first removing empty partitions, then altering existing\n partitions as needed to include all the values for a particular index value in\n one partition.\n \"\"\"\n name = \"fix-overlap-\" + tokenize(ddf, mins, maxes, lens)\n\n non_empties = [i for i, l in enumerate(lens) if l != 0]\n # drop empty partitions by mapping each partition in a new graph to a particular\n # partition on the old graph.\n dsk = {(name, i): (ddf._name, div) for i, div in enumerate(non_empties)}\n divisions = tuple(mins) + (maxes[-1],)\n\n overlap = [i for i in range(1, len(mins)) if mins[i] >= maxes[i - 1]]\n\n frames = []\n for i in overlap:\n\n # `frames` is a list of data from previous partitions that we may want to\n # move to partition i. Here, we add \"overlap\" from the previous partition\n # (i-1) to this list.\n frames.append((get_overlap, dsk[(name, i - 1)], divisions[i]))\n\n # Make sure that any data added from partition i-1 to `frames` is removed\n # from partition i-1.\n dsk[(name, i - 1)] = (drop_overlap, dsk[(name, i - 1)], divisions[i])\n\n # We do not want to move \"overlap\" from the previous partition (i-1) into\n # this partition (i) if the data from this partition will need to be moved\n # to the next partition (i+1) anyway. If we concatenate data too early,\n # we may lose rows (https://github.com/dask/dask/issues/6972).\n if i == len(mins) - 2 or divisions[i] != divisions[i + 1]:\n frames.append(dsk[(name, i)])\n dsk[(name, i)] = (methods.concat, frames)\n frames = []\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, ddf._meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_sorted_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_sorted_index_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 949, "end_line": 973, "span_ids": ["set_sorted_index"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def set_sorted_index(df, index, drop=True, divisions=None, **kwargs):\n if not isinstance(index, Series):\n meta = df._meta.set_index(index, drop=drop)\n else:\n meta = df._meta.set_index(index._meta, drop=drop)\n\n result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)\n\n if not divisions:\n return compute_and_set_divisions(result, **kwargs)\n elif len(divisions) != len(df.divisions):\n msg = (\n \"When doing `df.set_index(col, sorted=True, divisions=...)`, \"\n \"divisions indicates known splits in the index column. In this \"\n \"case divisions must be the same length as the existing \"\n \"divisions in `df`\\n\\n\"\n \"If the intent is to repartition into new divisions after \"\n \"setting the index, you probably want:\\n\\n\"\n \"`df.set_index(col, sorted=True).repartition(divisions=divisions)`\"\n )\n raise ValueError(msg)\n\n result.divisions = tuple(divisions)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_contextlib_MyAccessor.method.return.self_item": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_contextlib_MyAccessor.method.return.self_item", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 35, "span_ids": ["MyAccessor", "imports", "MyAccessor.method", "MyAccessor.prop", "MyAccessor.__init__", "ensure_removed"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\n\nimport numpy as np\nimport pytest\n\npd = pytest.importorskip(\"pandas\")\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\n\n\n@contextlib.contextmanager\ndef ensure_removed(obj, attr):\n \"\"\"Ensure that an attribute added to 'obj' during the test is\n removed when we're done\"\"\"\n try:\n yield\n finally:\n try:\n delattr(obj, attr)\n except AttributeError:\n pass\n obj._accessors.discard(attr)\n\n\nclass MyAccessor:\n def __init__(self, obj):\n self.obj = obj\n self.item = \"item\"\n\n @property\n def prop(self):\n return self.item\n\n def method(self):\n return self.item", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_register_test_accessor_works.with_ensure_removed_dd_Se.assert_b_mine_method_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_register_test_accessor_works.with_ensure_removed_dd_Se.assert_b_mine_method_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 38, "end_line": 66, "span_ids": ["test_register", "test_accessor_works"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"obj, registrar\",\n [\n (dd.Series, dd.extensions.register_series_accessor),\n (dd.DataFrame, dd.extensions.register_dataframe_accessor),\n (dd.Index, dd.extensions.register_index_accessor),\n ],\n)\ndef test_register(obj, registrar):\n with ensure_removed(obj, \"mine\"):\n before = set(dir(obj))\n registrar(\"mine\")(MyAccessor)\n instance = dd.from_pandas(obj._partition_type([], dtype=float), 2)\n assert instance.mine.prop == \"item\"\n after = set(dir(obj))\n assert (before ^ after) == {\"mine\"}\n assert \"mine\" in obj._accessors\n\n\ndef test_accessor_works():\n with ensure_removed(dd.Series, \"mine\"):\n dd.extensions.register_series_accessor(\"mine\")(MyAccessor)\n\n a = pd.Series([1, 2])\n b = dd.from_pandas(a, 2)\n assert b.mine.obj is b\n\n assert b.mine.prop == \"item\"\n assert b.mine.method() == \"item\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_df_ddf_df_ddf.return.df_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_df_ddf_df_ddf.return.df_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 69, "end_line": 89, "span_ids": ["df_ddf"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef df_ddf():\n import numpy as np\n\n df = pd.DataFrame(\n {\n \"str_col\": [\"abc\", \"bcd\", \"cdef\", \"DEFG\"],\n \"int_col\": [1, 2, 3, 4],\n \"dt_col\": np.array(\n [int(1e9), int(1.1e9), int(1.2e9), None], dtype=\"M8[ns]\"\n ),\n },\n index=[\"E\", \"f\", \"g\", \"h\"],\n )\n\n df[\"string_col\"] = df[\"str_col\"].astype(\"string\")\n df.loc[\"E\", \"string_col\"] = pd.NA\n\n ddf = dd.from_pandas(df, 2)\n\n return df, ddf", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_dt_accessor_test_dt_accessor_not_available.assert_dt_accessor_in_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_dt_accessor_test_dt_accessor_not_available.assert_dt_accessor_in_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 120, "span_ids": ["test_dt_accessor_not_available", "test_dt_accessor"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dt_accessor(df_ddf):\n df, ddf = df_ddf\n\n assert \"date\" in dir(ddf.dt_col.dt)\n\n # pandas loses Series.name via datetime accessor\n # see https://github.com/pydata/pandas/issues/10712\n assert_eq(ddf.dt_col.dt.date, df.dt_col.dt.date, check_names=False)\n\n # to_pydatetime returns a numpy array in pandas, but a Series in dask\n assert_eq(\n ddf.dt_col.dt.to_pydatetime(),\n pd.Series(df.dt_col.dt.to_pydatetime(), index=df.index, dtype=object),\n )\n\n assert set(ddf.dt_col.dt.date.dask) == set(ddf.dt_col.dt.date.dask)\n assert set(ddf.dt_col.dt.to_pydatetime().dask) == set(\n ddf.dt_col.dt.to_pydatetime().dask\n )\n\n\ndef test_dt_accessor_not_available(df_ddf):\n df, ddf = df_ddf\n\n # Not available on invalid dtypes\n with pytest.raises(AttributeError) as exc:\n ddf.str_col.dt\n assert \".dt accessor\" in str(exc.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_test_str_accessor.for_regex_in_True_False.assert_set_ddf_str_col_st": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_test_str_accessor.for_regex_in_True_False.assert_set_ddf_str_col_st", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 122, "end_line": 174, "span_ids": ["test_str_accessor"], "tokens": 556}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_str_accessor(df_ddf):\n df, ddf = df_ddf\n\n # implemented methods are present in tab completion\n assert \"upper\" in dir(ddf.str_col.str)\n assert \"upper\" in dir(ddf.string_col.str)\n assert \"upper\" in dir(ddf.index.str)\n\n # not implemented methods don't show up\n assert \"get_dummies\" not in dir(ddf.str_col.str)\n assert not hasattr(ddf.str_col.str, \"get_dummies\")\n\n # Test simple method on both series and index\n assert_eq(ddf.str_col.str.upper(), df.str_col.str.upper())\n assert set(ddf.str_col.str.upper().dask) == set(ddf.str_col.str.upper().dask)\n\n assert_eq(ddf.string_col.str.upper(), df.string_col.str.upper())\n assert set(ddf.string_col.str.upper().dask) == set(ddf.string_col.str.upper().dask)\n\n assert_eq(ddf.index.str.upper(), df.index.str.upper())\n assert set(ddf.index.str.upper().dask) == set(ddf.index.str.upper().dask)\n\n # make sure to pass through args & kwargs\n assert_eq(ddf.str_col.str.contains(\"a\"), df.str_col.str.contains(\"a\"))\n assert_eq(ddf.string_col.str.contains(\"a\"), df.string_col.str.contains(\"a\"))\n assert set(ddf.str_col.str.contains(\"a\").dask) == set(\n ddf.str_col.str.contains(\"a\").dask\n )\n\n assert_eq(\n ddf.str_col.str.contains(\"d\", case=False),\n df.str_col.str.contains(\"d\", case=False),\n )\n assert set(ddf.str_col.str.contains(\"d\", case=False).dask) == set(\n ddf.str_col.str.contains(\"d\", case=False).dask\n )\n\n for na in [True, False]:\n assert_eq(\n ddf.str_col.str.contains(\"a\", na=na), df.str_col.str.contains(\"a\", na=na)\n )\n assert set(ddf.str_col.str.contains(\"a\", na=na).dask) == set(\n ddf.str_col.str.contains(\"a\", na=na).dask\n )\n\n for regex in [True, False]:\n assert_eq(\n ddf.str_col.str.contains(\"a\", regex=regex),\n df.str_col.str.contains(\"a\", regex=regex),\n )\n assert set(ddf.str_col.str.contains(\"a\", regex=regex).dask) == set(\n ddf.str_col.str.contains(\"a\", regex=regex).dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_not_available_test_str_accessor_extractall.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_not_available_test_str_accessor_extractall.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 183, "end_line": 204, "span_ids": ["test_str_accessor_getitem", "test_str_accessor_not_available", "test_str_accessor_extractall"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_str_accessor_not_available(df_ddf):\n df, ddf = df_ddf\n\n # Not available on invalid dtypes\n with pytest.raises(AttributeError) as exc:\n ddf.int_col.str\n assert \".str accessor\" in str(exc.value)\n\n assert \"str\" not in dir(ddf.int_col)\n\n\ndef test_str_accessor_getitem(df_ddf):\n df, ddf = df_ddf\n assert_eq(ddf.str_col.str[:2], df.str_col.str[:2])\n assert_eq(ddf.str_col.str[1], df.str_col.str[1])\n\n\ndef test_str_accessor_extractall(df_ddf):\n df, ddf = df_ddf\n assert_eq(\n ddf.str_col.str.extractall(\"(.*)b(.*)\"), df.str_col.str.extractall(\"(.*)b(.*)\")\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.cases_test_arithmetics.ddf8.dd_from_pandas_pdf8_4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.cases_test_arithmetics.ddf8.dd_from_pandas_pdf8_4_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 45, "end_line": 86, "span_ids": ["test_arithmetics"], "tokens": 728}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics():\n # ... other code\n cases = [\n (ddf1, ddf1, pdf1, pdf1),\n (ddf1, ddf1.repartition([0, 1, 3, 6, 9]), pdf1, pdf1),\n (ddf2, ddf3, pdf2, pdf3),\n (ddf2.repartition([0, 3, 6, 7]), ddf3.repartition([0, 7]), pdf2, pdf3),\n (ddf2.repartition([0, 7]), ddf3.repartition([0, 2, 4, 5, 7]), pdf2, pdf3),\n (ddf1, ddf4, pdf1, pdf4),\n (ddf1, ddf4.repartition([0, 9]), pdf1, pdf4),\n (ddf1.repartition([0, 3, 9]), ddf4.repartition([0, 5, 9]), pdf1, pdf4),\n # dask + pandas\n (ddf1, pdf4, pdf1, pdf4),\n (ddf2, pdf3, pdf2, pdf3),\n ]\n\n for (l, r, el, er) in cases:\n check_series_arithmetics(l.a, r.b, el.a, er.b)\n check_frame_arithmetics(l, r, el, er)\n\n # different index, pandas raises ValueError in comparison ops\n\n pdf5 = pd.DataFrame(\n {\"a\": [3, 2, 1, 5, 2, 8, 1, 4, 10], \"b\": [7, 8, 9, 4, 2, 3, 1, 0, 5]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf5 = dd.from_pandas(pdf5, 2)\n\n pdf6 = pd.DataFrame(\n {\"a\": [3, 2, 1, 5, 2, 8, 1, 4, 10], \"b\": [7, 8, 9, 5, 7, 8, 4, 2, 5]},\n index=[0, 1, 2, 3, 4, 5, 6, 7, 9],\n )\n ddf6 = dd.from_pandas(pdf6, 4)\n\n pdf7 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8], \"b\": [5, 6, 7, 8, 1, 2, 3, 4]},\n index=list(\"aaabcdeh\"),\n )\n pdf8 = pd.DataFrame(\n {\"a\": [5, 6, 7, 8, 4, 3, 2, 1], \"b\": [2, 4, 5, 3, 4, 2, 1, 0]},\n index=list(\"abcdefgh\"),\n )\n ddf7 = dd.from_pandas(pdf7, 3)\n ddf8 = dd.from_pandas(pdf8, 4)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.pdf9_test_arithmetics.None_1.check_frame_arithmetics_l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics.pdf9_test_arithmetics.None_1.check_frame_arithmetics_l", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 88, "end_line": 130, "span_ids": ["test_arithmetics"], "tokens": 587}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics():\n # ... other code\n\n pdf9 = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"b\": [5, 6, 7, 8, 1, 2, 3, 4],\n \"c\": [5, 6, 7, 8, 1, 2, 3, 4],\n },\n index=list(\"aaabcdeh\"),\n )\n pdf10 = pd.DataFrame(\n {\n \"b\": [5, 6, 7, 8, 4, 3, 2, 1],\n \"c\": [2, 4, 5, 3, 4, 2, 1, 0],\n \"d\": [2, 4, 5, 3, 4, 2, 1, 0],\n },\n index=list(\"abcdefgh\"),\n )\n ddf9 = dd.from_pandas(pdf9, 3)\n ddf10 = dd.from_pandas(pdf10, 4)\n\n # Arithmetics with different index\n cases = [\n (ddf5, ddf6, pdf5, pdf6),\n (ddf5.repartition([0, 9]), ddf6, pdf5, pdf6),\n (ddf5.repartition([0, 5, 9]), ddf6.repartition([0, 7, 9]), pdf5, pdf6),\n (ddf7, ddf8, pdf7, pdf8),\n (ddf7.repartition([\"a\", \"c\", \"h\"]), ddf8.repartition([\"a\", \"h\"]), pdf7, pdf8),\n (\n ddf7.repartition([\"a\", \"b\", \"e\", \"h\"]),\n ddf8.repartition([\"a\", \"e\", \"h\"]),\n pdf7,\n pdf8,\n ),\n (ddf9, ddf10, pdf9, pdf10),\n (ddf9.repartition([\"a\", \"c\", \"h\"]), ddf10.repartition([\"a\", \"h\"]), pdf9, pdf10),\n # dask + pandas\n (ddf5, pdf6, pdf5, pdf6),\n (ddf7, pdf8, pdf7, pdf8),\n (ddf9, pdf10, pdf9, pdf10),\n ]\n\n for (l, r, el, er) in cases:\n check_series_arithmetics(l.a, r.b, el.a, er.b, allow_comparison_ops=False)\n check_frame_arithmetics(l, r, el, er, allow_comparison_ops=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_arithmetic_names_test_deterministic_arithmetic_names.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_arithmetic_names_test_deterministic_arithmetic_names.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 134, "end_line": 140, "span_ids": ["test_deterministic_arithmetic_names"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deterministic_arithmetic_names():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n assert sorted((a.x + a.y**2).dask) == sorted((a.x + a.y**2).dask)\n assert sorted((a.x + a.y**2).dask) != sorted((a.x + a.y**3).dask)\n assert sorted((a.x + a.y**2).dask) != sorted((a.x - a.y**2).dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index_test_arithmetics_different_index.ddf6.dd_from_pandas_pdf6_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index_test_arithmetics_different_index.ddf6.dd_from_pandas_pdf6_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 172, "span_ids": ["test_arithmetics_different_index"], "tokens": 507}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics_different_index():\n # index are different, but overwraps\n pdf1 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [3, 5, 2, 5, 7]}, index=[1, 2, 3, 4, 5]\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n pdf2 = pd.DataFrame(\n {\"a\": [3, 2, 6, 7, 8], \"b\": [9, 4, 2, 6, 2]}, index=[3, 4, 5, 6, 7]\n )\n ddf2 = dd.from_pandas(pdf2, 2)\n\n # index are not overwrapped\n pdf3 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [3, 5, 2, 5, 7]}, index=[1, 2, 3, 4, 5]\n )\n ddf3 = dd.from_pandas(pdf3, 2)\n pdf4 = pd.DataFrame(\n {\"a\": [3, 2, 6, 7, 8], \"b\": [9, 4, 2, 6, 2]}, index=[10, 11, 12, 13, 14]\n )\n ddf4 = dd.from_pandas(pdf4, 2)\n\n # index is included in another\n pdf5 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [3, 5, 2, 5, 7]}, index=[1, 3, 5, 7, 9]\n )\n ddf5 = dd.from_pandas(pdf5, 2)\n pdf6 = pd.DataFrame(\n {\"a\": [3, 2, 6, 7, 8], \"b\": [9, 4, 2, 6, 2]}, index=[2, 3, 4, 5, 6]\n )\n ddf6 = dd.from_pandas(pdf6, 2)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.cases_test_arithmetics_different_index.ddf8.dd_from_pandas_pdf8_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.cases_test_arithmetics_different_index.ddf8.dd_from_pandas_pdf8_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 174, "end_line": 214, "span_ids": ["test_arithmetics_different_index"], "tokens": 718}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics_different_index():\n # ... other code\n\n cases = [\n (ddf1, ddf2, pdf1, pdf2),\n (ddf2, ddf1, pdf2, pdf1),\n (ddf1.repartition([1, 3, 5]), ddf2.repartition([3, 4, 7]), pdf1, pdf2),\n (ddf2.repartition([3, 4, 5, 7]), ddf1.repartition([1, 2, 4, 5]), pdf2, pdf1),\n (ddf3, ddf4, pdf3, pdf4),\n (ddf4, ddf3, pdf4, pdf3),\n (\n ddf3.repartition([1, 2, 3, 4, 5]),\n ddf4.repartition([10, 11, 12, 13, 14]),\n pdf3,\n pdf4,\n ),\n (ddf4.repartition([10, 14]), ddf3.repartition([1, 3, 4, 5]), pdf4, pdf3),\n (ddf5, ddf6, pdf5, pdf6),\n (ddf6, ddf5, pdf6, pdf5),\n (ddf5.repartition([1, 7, 8, 9]), ddf6.repartition([2, 3, 4, 6]), pdf5, pdf6),\n (ddf6.repartition([2, 6]), ddf5.repartition([1, 3, 7, 9]), pdf6, pdf5),\n # dask + pandas\n (ddf1, pdf2, pdf1, pdf2),\n (ddf2, pdf1, pdf2, pdf1),\n (ddf3, pdf4, pdf3, pdf4),\n (ddf4, pdf3, pdf4, pdf3),\n (ddf5, pdf6, pdf5, pdf6),\n (ddf6, pdf5, pdf6, pdf5),\n ]\n\n for (l, r, el, er) in cases:\n check_series_arithmetics(l.a, r.b, el.a, er.b, allow_comparison_ops=False)\n check_frame_arithmetics(l, r, el, er, allow_comparison_ops=False)\n\n pdf7 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8], \"b\": [5, 6, 7, 8, 1, 2, 3, 4]},\n index=[0, 2, 4, 8, 9, 10, 11, 13],\n )\n pdf8 = pd.DataFrame(\n {\"a\": [5, 6, 7, 8, 4, 3, 2, 1], \"b\": [2, 4, 5, 3, 4, 2, 1, 0]},\n index=[1, 3, 4, 8, 9, 11, 12, 13],\n )\n ddf7 = dd.from_pandas(pdf7, 3)\n ddf8 = dd.from_pandas(pdf8, 2)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.pdf9_test_arithmetics_different_index.None_1.check_frame_arithmetics_l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_arithmetics_different_index.pdf9_test_arithmetics_different_index.None_1.check_frame_arithmetics_l", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 216, "end_line": 262, "span_ids": ["test_arithmetics_different_index"], "tokens": 618}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_arithmetics_different_index():\n # ... other code\n\n pdf9 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8], \"b\": [5, 6, 7, 8, 1, 2, 3, 4]},\n index=[0, 2, 4, 8, 9, 10, 11, 13],\n )\n pdf10 = pd.DataFrame(\n {\"a\": [5, 6, 7, 8, 4, 3, 2, 1], \"b\": [2, 4, 5, 3, 4, 2, 1, 0]},\n index=[0, 3, 4, 8, 9, 11, 12, 13],\n )\n ddf9 = dd.from_pandas(pdf9, 3)\n ddf10 = dd.from_pandas(pdf10, 2)\n\n cases = [\n (ddf7, ddf8, pdf7, pdf8),\n (ddf8, ddf7, pdf8, pdf7),\n # (ddf7.repartition([0, 13]),\n # ddf8.repartition([0, 4, 11, 14], force=True),\n # pdf7, pdf8),\n (\n ddf8.repartition([-5, 10, 15], force=True),\n ddf7.repartition([-1, 4, 11, 14], force=True),\n pdf8,\n pdf7,\n ),\n (\n ddf7.repartition([0, 8, 12, 13]),\n ddf8.repartition([0, 2, 8, 12, 13], force=True),\n pdf7,\n pdf8,\n ),\n (\n ddf8.repartition([-5, 0, 10, 20], force=True),\n ddf7.repartition([-1, 4, 11, 13], force=True),\n pdf8,\n pdf7,\n ),\n (ddf9, ddf10, pdf9, pdf10),\n (ddf10, ddf9, pdf10, pdf9),\n # dask + pandas\n (ddf7, pdf8, pdf7, pdf8),\n (ddf8, pdf7, pdf8, pdf7),\n (ddf9, pdf10, pdf9, pdf10),\n (ddf10, pdf9, pdf10, pdf9),\n ]\n\n for (l, r, el, er) in cases:\n check_series_arithmetics(l.a, r.b, el.a, er.b, allow_comparison_ops=False)\n check_frame_arithmetics(l, r, el, er, allow_comparison_ops=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_series_arithmetics_check_series_arithmetics.None_1.assert_eq_l_r_el": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_series_arithmetics_check_series_arithmetics.None_1.assert_eq_l_r_el", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 266, "end_line": 348, "span_ids": ["check_series_arithmetics"], "tokens": 878}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_series_arithmetics(l, r, el, er, allow_comparison_ops=True):\n assert isinstance(l, dd.Series)\n assert isinstance(r, (dd.Series, pd.Series))\n assert isinstance(el, pd.Series)\n assert isinstance(er, pd.Series)\n\n # l, r may be repartitioned, test whether repartition keeps original data\n assert_eq(l, el)\n assert_eq(r, er)\n\n assert_eq(l + r, el + er)\n assert_eq(l * r, el * er)\n assert_eq(l - r, el - er)\n assert_eq(l / r, el / er)\n assert_eq(l // r, el // er)\n assert_eq(l**r, el**er)\n assert_eq(l % r, el % er)\n\n if allow_comparison_ops:\n # comparison is allowed if data have same index\n assert_eq(l & r, el & er)\n assert_eq(l | r, el | er)\n assert_eq(l ^ r, el ^ er)\n assert_eq(l > r, el > er)\n assert_eq(l < r, el < er)\n assert_eq(l >= r, el >= er)\n assert_eq(l <= r, el <= er)\n assert_eq(l == r, el == er)\n assert_eq(l != r, el != er)\n assert_eq(l.lt(r), el.lt(er))\n assert_eq(l.gt(r), el.gt(er))\n assert_eq(l.le(r), el.le(er))\n assert_eq(l.ge(r), el.ge(er))\n assert_eq(l.ne(r), el.ne(er))\n assert_eq(l.eq(r), el.eq(er))\n\n assert_eq(l + 2, el + 2)\n assert_eq(l * 2, el * 2)\n assert_eq(l - 2, el - 2)\n assert_eq(l / 2, el / 2)\n assert_eq(l & True, el & True)\n assert_eq(l | True, el | True)\n assert_eq(l ^ True, el ^ True)\n assert_eq(l // 2, el // 2)\n assert_eq(l**2, el**2)\n assert_eq(l % 2, el % 2)\n assert_eq(l > 2, el > 2)\n assert_eq(l < 2, el < 2)\n assert_eq(l >= 2, el >= 2)\n assert_eq(l <= 2, el <= 2)\n assert_eq(l == 2, el == 2)\n assert_eq(l != 2, el != 2)\n\n assert_eq(2 + r, 2 + er)\n assert_eq(2 * r, 2 * er)\n assert_eq(2 - r, 2 - er)\n assert_eq(2 / r, 2 / er)\n assert_eq(True & r, True & er)\n assert_eq(True | r, True | er)\n assert_eq(True ^ r, True ^ er)\n assert_eq(2 // r, 2 // er)\n assert_eq(2**r, 2**er)\n assert_eq(2 % r, 2 % er)\n assert_eq(2 > r, 2 > er)\n assert_eq(2 < r, 2 < er)\n assert_eq(2 >= r, 2 >= er)\n assert_eq(2 <= r, 2 <= er)\n assert_eq(2 == r, 2 == er)\n assert_eq(2 != r, 2 != er)\n\n assert_eq(l.lt(2), el.lt(2))\n assert_eq(l.gt(2), el.gt(2))\n assert_eq(l.le(2), el.le(2))\n assert_eq(l.ge(2), el.ge(2))\n assert_eq(l.ne(2), el.ne(2))\n assert_eq(l.eq(2), el.eq(2))\n\n assert_eq(-l, -el)\n assert_eq(abs(l), abs(el))\n\n if allow_comparison_ops:\n # comparison is allowed if data have same index\n assert_eq(~(l == r), ~(el == er))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_frame_arithmetics_check_frame_arithmetics.None_1.assert_eq_l_r_el": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_check_frame_arithmetics_check_frame_arithmetics.None_1.assert_eq_l_r_el", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 351, "end_line": 432, "span_ids": ["check_frame_arithmetics"], "tokens": 878}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_frame_arithmetics(l, r, el, er, allow_comparison_ops=True):\n assert isinstance(l, dd.DataFrame)\n assert isinstance(r, (dd.DataFrame, pd.DataFrame))\n assert isinstance(el, pd.DataFrame)\n assert isinstance(er, pd.DataFrame)\n # l, r may be repartitioned, test whether repartition keeps original data\n assert_eq(l, el)\n assert_eq(r, er)\n\n assert_eq(l + r, el + er)\n assert_eq(l * r, el * er)\n assert_eq(l - r, el - er)\n assert_eq(l / r, el / er)\n assert_eq(l // r, el // er)\n assert_eq(l**r, el**er)\n assert_eq(l % r, el % er)\n\n if allow_comparison_ops:\n # comparison is allowed if data have same index\n assert_eq(l & r, el & er)\n assert_eq(l | r, el | er)\n assert_eq(l ^ r, el ^ er)\n assert_eq(l > r, el > er)\n assert_eq(l < r, el < er)\n assert_eq(l >= r, el >= er)\n assert_eq(l <= r, el <= er)\n assert_eq(l == r, el == er)\n assert_eq(l != r, el != er)\n assert_eq(l.lt(r), el.lt(er))\n assert_eq(l.gt(r), el.gt(er))\n assert_eq(l.le(r), el.le(er))\n assert_eq(l.ge(r), el.ge(er))\n assert_eq(l.ne(r), el.ne(er))\n assert_eq(l.eq(r), el.eq(er))\n\n assert_eq(l + 2, el + 2)\n assert_eq(l * 2, el * 2)\n assert_eq(l - 2, el - 2)\n assert_eq(l / 2, el / 2)\n assert_eq(l & True, el & True)\n assert_eq(l | True, el | True)\n assert_eq(l ^ True, el ^ True)\n assert_eq(l // 2, el // 2)\n assert_eq(l**2, el**2)\n assert_eq(l % 2, el % 2)\n assert_eq(l > 2, el > 2)\n assert_eq(l < 2, el < 2)\n assert_eq(l >= 2, el >= 2)\n assert_eq(l <= 2, el <= 2)\n assert_eq(l == 2, el == 2)\n assert_eq(l != 2, el != 2)\n\n assert_eq(2 + l, 2 + el)\n assert_eq(2 * l, 2 * el)\n assert_eq(2 - l, 2 - el)\n assert_eq(2 / l, 2 / el)\n assert_eq(True & l, True & el)\n assert_eq(True | l, True | el)\n assert_eq(True ^ l, True ^ el)\n assert_eq(2 // l, 2 // el)\n assert_eq(2**l, 2**el)\n assert_eq(2 % l, 2 % el)\n assert_eq(2 > l, 2 > el)\n assert_eq(2 < l, 2 < el)\n assert_eq(2 >= l, 2 >= el)\n assert_eq(2 <= l, 2 <= el)\n assert_eq(2 == l, 2 == el)\n assert_eq(2 != l, 2 != el)\n\n assert_eq(l.lt(2), el.lt(2))\n assert_eq(l.gt(2), el.gt(2))\n assert_eq(l.le(2), el.le(2))\n assert_eq(l.ge(2), el.ge(2))\n assert_eq(l.ne(2), el.ne(2))\n assert_eq(l.eq(2), el.eq(2))\n\n assert_eq(-l, -el)\n assert_eq(abs(l), abs(el))\n\n if allow_comparison_ops:\n # comparison is allowed if data have same index\n assert_eq(~(l == r), ~(el == er))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_test_scalar_arithmetics.assert_eq_l_r_el": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_test_scalar_arithmetics.assert_eq_l_r_el", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 435, "end_line": 502, "span_ids": ["test_scalar_arithmetics"], "tokens": 715}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_scalar_arithmetics():\n el = np.int64(10)\n er = np.int64(4)\n l = dd.core.Scalar({(\"l\", 0): el}, \"l\", \"i8\")\n r = dd.core.Scalar({(\"r\", 0): er}, \"r\", \"i8\")\n\n assert isinstance(l, dd.core.Scalar)\n assert isinstance(r, dd.core.Scalar)\n\n assert_eq(l, el)\n assert_eq(r, er)\n\n assert_eq(l + r, el + er)\n assert_eq(l * r, el * er)\n assert_eq(l - r, el - er)\n assert_eq(l / r, el / er)\n assert_eq(l // r, el // er)\n assert_eq(l**r, el**er)\n assert_eq(l % r, el % er)\n\n assert_eq(l & r, el & er)\n assert_eq(l | r, el | er)\n assert_eq(l ^ r, el ^ er)\n assert_eq(l > r, el > er)\n assert_eq(l < r, el < er)\n assert_eq(l >= r, el >= er)\n assert_eq(l <= r, el <= er)\n assert_eq(l == r, el == er)\n assert_eq(l != r, el != er)\n\n assert_eq(l + 2, el + 2)\n assert_eq(l * 2, el * 2)\n assert_eq(l - 2, el - 2)\n assert_eq(l / 2, el / 2)\n assert_eq(l & True, el & True)\n assert_eq(l | True, el | True)\n assert_eq(l ^ True, el ^ True)\n assert_eq(l // 2, el // 2)\n assert_eq(l**2, el**2)\n assert_eq(l % 2, el % 2)\n assert_eq(l > 2, el > 2)\n assert_eq(l < 2, el < 2)\n assert_eq(l >= 2, el >= 2)\n assert_eq(l <= 2, el <= 2)\n assert_eq(l == 2, el == 2)\n assert_eq(l != 2, el != 2)\n\n assert_eq(2 + r, 2 + er)\n assert_eq(2 * r, 2 * er)\n assert_eq(2 - r, 2 - er)\n assert_eq(2 / r, 2 / er)\n assert_eq(True & r, True & er)\n assert_eq(True | r, True | er)\n assert_eq(True ^ r, True ^ er)\n assert_eq(2 // r, 2 // er)\n assert_eq(2**r, 2**er)\n assert_eq(2 % r, 2 % er)\n assert_eq(2 > r, 2 > er)\n assert_eq(2 < r, 2 < er)\n assert_eq(2 >= r, 2 >= er)\n assert_eq(2 <= r, 2 <= er)\n assert_eq(2 == r, 2 == er)\n assert_eq(2 != r, 2 != er)\n\n assert_eq(-l, -el)\n assert_eq(abs(l), abs(el))\n\n assert_eq(~(l == r), ~(el == er))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_with_dask_instances_test_scalar_arithmetics_with_dask_instances.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_scalar_arithmetics_with_dask_instances_test_scalar_arithmetics_with_dask_instances.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 504, "end_line": 548, "span_ids": ["test_scalar_arithmetics_with_dask_instances"], "tokens": 428}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_scalar_arithmetics_with_dask_instances():\n s = dd.core.Scalar({(\"s\", 0): 10}, \"s\", \"i8\")\n e = 10\n\n pds = pd.Series([1, 2, 3, 4, 5, 6, 7])\n dds = dd.from_pandas(pds, 2)\n\n pdf = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n ddf = dd.from_pandas(pdf, 2)\n\n # pandas Series\n result = pds + s # this result pd.Series (automatically computed)\n assert isinstance(result, pd.Series)\n assert_eq(result, pds + e)\n\n result = s + pds # this result dd.Series\n assert isinstance(result, dd.Series)\n assert_eq(result, pds + e)\n\n # dask Series\n result = dds + s # this result dd.Series\n assert isinstance(result, dd.Series)\n assert_eq(result, pds + e)\n\n result = s + dds # this result dd.Series\n assert isinstance(result, dd.Series)\n assert_eq(result, pds + e)\n\n # pandas DataFrame\n result = pdf + s # this result pd.DataFrame (automatically computed)\n assert isinstance(result, pd.DataFrame)\n assert_eq(result, pdf + e)\n\n result = s + pdf # this result dd.DataFrame\n assert isinstance(result, dd.DataFrame)\n assert_eq(result, pdf + e)\n\n # dask DataFrame\n result = ddf + s # this result dd.DataFrame\n assert isinstance(result, dd.DataFrame)\n assert_eq(result, pdf + e)\n\n result = s + ddf # this result dd.DataFrame\n assert isinstance(result, dd.DataFrame)\n assert_eq(result, pdf + e)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods_test_frame_series_arithmetic_methods.s.dd_core_Scalar_s_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods_test_frame_series_arithmetic_methods.s.dd_core_Scalar_s_0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 551, "end_line": 578, "span_ids": ["test_frame_series_arithmetic_methods"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"1.0.2\",\n reason=\"https://github.com/pandas-dev/pandas/issues/32685\",\n)\ndef test_frame_series_arithmetic_methods():\n pdf1 = pd.DataFrame(\n {\n \"A\": np.arange(10),\n \"B\": [np.nan, 1, 2, 3, 4] * 2,\n \"C\": [np.nan] * 10,\n \"D\": np.arange(10),\n },\n index=list(\"abcdefghij\"),\n columns=list(\"ABCD\"),\n )\n pdf2 = pd.DataFrame(\n np.random.randn(10, 4), index=list(\"abcdefghjk\"), columns=list(\"ABCX\")\n )\n ps1 = pdf1.A\n ps2 = pdf2.A\n ps3 = pd.Series(np.random.randn(10), index=list(\"ABCDXabcde\"))\n\n ddf1 = dd.from_pandas(pdf1, 2)\n ddf2 = dd.from_pandas(pdf2, 2)\n ds1 = ddf1.A\n ds2 = ddf2.A\n\n s = dd.core.Scalar({(\"s\", 0): 4}, \"s\", \"i8\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf.pytest_raises_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf_test_frame_series_arithmetic_methods.for_l_r_el_er_in_ddf.pytest_raises_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 623, "end_line": 647, "span_ids": ["test_frame_series_arithmetic_methods"], "tokens": 484}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"1.0.2\",\n reason=\"https://github.com/pandas-dev/pandas/issues/32685\",\n)\ndef test_frame_series_arithmetic_methods():\n # ... other code\n\n for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:\n assert_eq(l, el)\n assert_eq(r, er)\n\n # must specify axis=0 to add Series to each column\n # axis=1 is not supported (add to each row)\n assert_eq(l.add(r, axis=0), el.add(er, axis=0))\n assert_eq(l.sub(r, axis=0), el.sub(er, axis=0))\n assert_eq(l.mul(r, axis=0), el.mul(er, axis=0))\n assert_eq(l.div(r, axis=0), el.div(er, axis=0))\n assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))\n assert_eq(l.truediv(r, axis=0), el.truediv(er, axis=0))\n assert_eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))\n assert_eq(l.mod(r, axis=0), el.mod(er, axis=0))\n assert_eq(l.pow(r, axis=0), el.pow(er, axis=0))\n\n assert_eq(l.radd(r, axis=0), el.radd(er, axis=0))\n assert_eq(l.rsub(r, axis=0), el.rsub(er, axis=0))\n assert_eq(l.rmul(r, axis=0), el.rmul(er, axis=0))\n assert_eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))\n assert_eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))\n assert_eq(l.rmod(r, axis=0), el.rmod(er, axis=0))\n assert_eq(l.rpow(r, axis=0), el.rpow(er, axis=0))\n\n pytest.raises(ValueError, lambda: l.add(r, axis=1))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.None_2_test_frame_series_arithmetic_methods.None_2.for_axis_in_0_1_index.assert_eq_l_rmul_r_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.None_2_test_frame_series_arithmetic_methods.None_2.for_axis_in_0_1_index.assert_eq_l_rmul_r_axis_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 641, "end_line": 661, "span_ids": ["test_frame_series_arithmetic_methods"], "tokens": 449}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"1.0.2\",\n reason=\"https://github.com/pandas-dev/pandas/issues/32685\",\n)\ndef test_frame_series_arithmetic_methods():\n # ... other code\n\n for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps3, pdf1, ps3)]:\n assert_eq(l, el)\n assert_eq(r, er)\n\n for axis in [0, 1, \"index\", \"columns\"]:\n assert_eq(l.add(r, axis=axis), el.add(er, axis=axis))\n assert_eq(l.sub(r, axis=axis), el.sub(er, axis=axis))\n assert_eq(l.mul(r, axis=axis), el.mul(er, axis=axis))\n assert_eq(l.div(r, axis=axis), el.div(er, axis=axis))\n assert_eq(l.divide(r, axis=axis), el.divide(er, axis=axis))\n assert_eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))\n assert_eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))\n assert_eq(l.mod(r, axis=axis), el.mod(er, axis=axis))\n assert_eq(l.pow(r, axis=axis), el.pow(er, axis=axis))\n assert_eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))\n assert_eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))\n assert_eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))\n assert_eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))\n assert_eq(l.radd(r, axis=axis), el.radd(er, axis=axis))\n assert_eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))\n assert_eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_test_reductions.boolds.dd_from_pandas_bools_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_test_reductions.boolds.dd_from_pandas_bools_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 674, "end_line": 709, "span_ids": ["test_reductions"], "tokens": 447}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions(split_every):\n dsk = {\n (\"x\", 0): pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [True, True, False]}, index=[0, 1, 3]\n ),\n (\"x\", 1): pd.DataFrame(\n {\"a\": [4, 5, 6], \"b\": [3, 2, 1], \"c\": [False, False, False]},\n index=[5, 6, 8],\n ),\n (\"x\", 2): pd.DataFrame(\n {\n \"a\": [13094304034, 3489385935, 100006774],\n \"b\": [0, 0, 0],\n \"c\": [True, True, True],\n },\n index=[9, 9, 9],\n ),\n }\n meta = make_meta(\n {\"a\": \"i8\", \"b\": \"i8\", \"c\": \"bool\"},\n index=pd.Index([], \"i8\"),\n parent_meta=pd.DataFrame(),\n )\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n pdf1 = ddf1.compute()\n\n nans1 = pd.Series([1] + [np.nan] * 4 + [2] + [np.nan] * 3)\n nands1 = dd.from_pandas(nans1, 2)\n nans2 = pd.Series([1] + [np.nan] * 8)\n nands2 = dd.from_pandas(nans2, 2)\n nans3 = pd.Series([np.nan] * 9)\n nands3 = dd.from_pandas(nans3, 2)\n\n bools = pd.Series([True, False, True, False, True], dtype=bool)\n boolds = dd.from_pandas(bools, 2)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.assert_dask_graph_ddf1_b__test_reductions.assert_eq_ddf1_index_coun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.assert_dask_graph_ddf1_b__test_reductions.assert_eq_ddf1_index_coun", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 781, "end_line": 799, "span_ids": ["test_reductions"], "tokens": 384}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions(split_every):\n # ... other code\n\n assert_dask_graph(ddf1.b.sum(split_every=split_every), \"series-sum\")\n assert_dask_graph(ddf1.b.prod(split_every=split_every), \"series-prod\")\n assert_dask_graph(ddf1.b.min(split_every=split_every), \"series-min\")\n assert_dask_graph(ddf1.b.max(split_every=split_every), \"series-max\")\n assert_dask_graph(ddf1.b.count(split_every=split_every), \"series-count\")\n assert_dask_graph(ddf1.b.std(split_every=split_every), \"series-std\")\n assert_dask_graph(ddf1.b.var(split_every=split_every), \"series-var\")\n assert_dask_graph(ddf1.b.sem(split_every=split_every), \"series-sem\")\n assert_dask_graph(ddf1.b.std(ddof=0, split_every=split_every), \"series-std\")\n assert_dask_graph(ddf1.b.var(ddof=0, split_every=split_every), \"series-var\")\n assert_dask_graph(ddf1.b.sem(ddof=0, split_every=split_every), \"series-sem\")\n assert_dask_graph(ddf1.b.mean(split_every=split_every), \"series-mean\")\n # nunique is performed using drop-duplicates\n assert_dask_graph(ddf1.b.nunique(split_every=split_every), \"drop-duplicates\")\n\n # testing index\n assert_eq(ddf1.index.min(split_every=split_every), pdf1.index.min())\n assert_eq(ddf1.index.max(split_every=split_every), pdf1.index.max())\n assert_eq(ddf1.index.count(split_every=split_every), pd.notnull(pdf1.index).sum())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_timedelta_test_reductions_timedelta.assert_eq_dds_count_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_timedelta_test_reductions_timedelta.assert_eq_dds_count_split", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 802, "end_line": 810, "span_ids": ["test_reductions_timedelta"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_timedelta(split_every):\n ds = pd.Series(pd.to_timedelta([2, 3, 4, np.nan, 5]))\n dds = dd.from_pandas(ds, 2)\n\n assert_eq(dds.sum(split_every=split_every), ds.sum())\n assert_eq(dds.min(split_every=split_every), ds.min())\n assert_eq(dds.max(split_every=split_every), ds.max())\n assert_eq(dds.count(split_every=split_every), ds.count())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_out_test_reductions_out.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_out_test_reductions_out.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 820, "end_line": 864, "span_ids": ["test_reductions_out"], "tokens": 470}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"frame,axis,out\",\n [\n (\n pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n 0,\n pd.Series([], dtype=\"float64\"),\n ),\n (\n pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n 1,\n pd.Series([], dtype=\"float64\"),\n ),\n (pd.Series([1, 2.5, 6]), None, None),\n ],\n)\n@pytest.mark.parametrize(\n \"redfunc\", [\"sum\", \"prod\", \"product\", \"min\", \"max\", \"mean\", \"var\", \"std\"]\n)\ndef test_reductions_out(frame, axis, out, redfunc):\n dsk_in = dd.from_pandas(frame, 3)\n dsk_out = dd.from_pandas(pd.Series([0]), 1).sum()\n\n if out is not None:\n dsk_out = dd.from_pandas(out, 3)\n\n np_redfunc = getattr(np, redfunc)\n pd_redfunc = getattr(frame.__class__, redfunc)\n dsk_redfunc = getattr(dsk_in.__class__, redfunc)\n\n if redfunc in [\"var\", \"std\"]:\n # numpy has default ddof value 0 while\n # dask and pandas have 1, so ddof should be passed\n # explicitly when calling np.var(dask)\n np_redfunc(dsk_in, axis=axis, ddof=1, out=dsk_out)\n else:\n np_redfunc(dsk_in, axis=axis, out=dsk_out)\n\n assert_eq(dsk_out, pd_redfunc(frame, axis=axis))\n\n dsk_redfunc(dsk_in, axis=axis, split_every=False, out=dsk_out)\n assert_eq(dsk_out, pd_redfunc(frame, axis=axis))\n\n dsk_redfunc(dsk_in, axis=axis, split_every=2, out=dsk_out)\n assert_eq(dsk_out, pd_redfunc(frame, axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_allany_test_allany.None_19": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_allany_test_allany.None_19", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 858, "end_line": 904, "span_ids": ["test_allany"], "tokens": 537}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_allany(split_every):\n df = pd.DataFrame(\n np.random.choice([True, False], size=(100, 4)), columns=[\"A\", \"B\", \"C\", \"D\"]\n )\n df[\"E\"] = list(\"abcde\") * 20\n ddf = dd.from_pandas(df, 10)\n\n assert_eq(ddf.all(split_every=split_every), df.all())\n assert_eq(ddf.all(axis=1, split_every=split_every), df.all(axis=1))\n assert_eq(ddf.all(axis=0, split_every=split_every), df.all(axis=0))\n\n assert_eq(ddf.any(split_every=split_every), df.any())\n assert_eq(ddf.any(axis=1, split_every=split_every), df.any(axis=1))\n assert_eq(ddf.any(axis=0, split_every=split_every), df.any(axis=0))\n\n assert_eq(ddf.A.all(split_every=split_every), df.A.all())\n assert_eq(ddf.A.any(split_every=split_every), df.A.any())\n\n # testing numpy functions with out param\n ddf_out_axis_default = dd.from_pandas(\n pd.Series([False, False, False, False, False], index=[\"A\", \"B\", \"C\", \"D\", \"E\"]),\n 10,\n )\n ddf_out_axis1 = dd.from_pandas(\n pd.Series(np.random.choice([True, False], size=(100,))), 10\n )\n\n # all\n ddf.all(split_every=split_every, out=ddf_out_axis_default)\n assert_eq(ddf_out_axis_default, df.all())\n\n ddf.all(axis=1, split_every=split_every, out=ddf_out_axis1)\n assert_eq(ddf_out_axis1, df.all(axis=1))\n\n ddf.all(split_every=split_every, axis=0, out=ddf_out_axis_default)\n assert_eq(ddf_out_axis_default, df.all(axis=0))\n\n # any\n ddf.any(split_every=split_every, out=ddf_out_axis_default)\n assert_eq(ddf_out_axis_default, df.any())\n\n ddf.any(axis=1, split_every=split_every, out=ddf_out_axis1)\n assert_eq(ddf_out_axis1, df.any(axis=1))\n\n ddf.any(split_every=split_every, axis=0, out=ddf_out_axis_default)\n assert_eq(ddf_out_axis_default, df.any(axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_reduction_names_test_deterministic_reduction_names.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_deterministic_reduction_names_test_deterministic_reduction_names.assert_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 916, "end_line": 960, "span_ids": ["test_deterministic_reduction_names"], "tokens": 387}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_deterministic_reduction_names(split_every):\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for x in [ddf, ddf.x]:\n assert (\n x.sum(split_every=split_every)._name == x.sum(split_every=split_every)._name\n )\n assert (\n x.prod(split_every=split_every)._name\n == x.prod(split_every=split_every)._name\n )\n assert (\n x.product(split_every=split_every)._name\n == x.product(split_every=split_every)._name\n )\n assert (\n x.min(split_every=split_every)._name == x.min(split_every=split_every)._name\n )\n assert (\n x.max(split_every=split_every)._name == x.max(split_every=split_every)._name\n )\n assert (\n x.count(split_every=split_every)._name\n == x.count(split_every=split_every)._name\n )\n assert (\n x.std(split_every=split_every)._name == x.std(split_every=split_every)._name\n )\n assert (\n x.var(split_every=split_every)._name == x.var(split_every=split_every)._name\n )\n assert (\n x.sem(split_every=split_every)._name == x.sem(split_every=split_every)._name\n )\n assert (\n x.mean(split_every=split_every)._name\n == x.mean(split_every=split_every)._name\n )\n\n assert (\n ddf.x.nunique(split_every=split_every)._name\n == ddf.x.nunique(split_every=split_every)._name\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_test_reductions_frame.assert_eq_ddf1_mean_split": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_test_reductions_frame.assert_eq_ddf1_mean_split", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1063, "end_line": 1088, "span_ids": ["test_reductions_frame"], "tokens": 483}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame(split_every):\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n }\n meta = make_meta(\n {\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"), parent_meta=pd.DataFrame()\n )\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n pdf1 = ddf1.compute()\n\n assert_eq(ddf1.sum(split_every=split_every), pdf1.sum())\n assert_eq(ddf1.prod(split_every=split_every), pdf1.prod())\n assert_eq(ddf1.product(split_every=split_every), pdf1.product())\n assert_eq(ddf1.min(split_every=split_every), pdf1.min())\n assert_eq(ddf1.max(split_every=split_every), pdf1.max())\n assert_eq(ddf1.count(split_every=split_every), pdf1.count())\n assert_eq(ddf1.std(split_every=split_every), pdf1.std())\n assert_eq(ddf1.var(split_every=split_every), pdf1.var())\n assert_eq(ddf1.sem(split_every=split_every), pdf1.sem())\n assert_eq(ddf1.std(ddof=0, split_every=split_every), pdf1.std(ddof=0))\n assert_eq(ddf1.var(ddof=0, split_every=split_every), pdf1.var(ddof=0))\n assert_eq(ddf1.sem(ddof=0, split_every=split_every), pdf1.sem(ddof=0))\n assert_eq(ddf1.mean(split_every=split_every), pdf1.mean())\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan_test_reductions_frame_nan.assert_eq_df_mean_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan_test_reductions_frame_nan.assert_eq_df_mean_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1225, "end_line": 1251, "span_ids": ["test_reductions_frame_nan"], "tokens": 383}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame_nan(split_every):\n df = pd.DataFrame(\n {\n \"a\": [1, 2, np.nan, 4, 5, 6, 7, 8],\n \"b\": [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],\n \"c\": [np.nan] * 8,\n }\n )\n ddf = dd.from_pandas(df, 3)\n assert_eq(df.sum(), ddf.sum(split_every=split_every))\n assert_eq(df.prod(), ddf.prod(split_every=split_every))\n assert_eq(df.product(), ddf.product(split_every=split_every))\n assert_eq(df.min(), ddf.min(split_every=split_every))\n assert_eq(df.max(), ddf.max(split_every=split_every))\n assert_eq(df.count(), ddf.count(split_every=split_every))\n with warnings.catch_warnings():\n # dask.dataframe should probably filter this, to match pandas, but\n # it seems quite difficult.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(df.std(), ddf.std(split_every=split_every))\n assert_eq(df.var(), ddf.var(split_every=split_every))\n assert_eq(df.sem(), ddf.sem(split_every=split_every))\n assert_eq(df.std(ddof=0), ddf.std(ddof=0, split_every=split_every))\n assert_eq(df.var(ddof=0), ddf.var(ddof=0, split_every=split_every))\n assert_eq(df.sem(ddof=0), ddf.sem(ddof=0, split_every=split_every))\n assert_eq(df.mean(), ddf.mean(split_every=split_every))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_comparison_nan_test_series_comparison_nan.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_comparison_nan_test_series_comparison_nan.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1300, "end_line": 1313, "span_ids": ["test_series_comparison_nan"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"comparison\", [\"lt\", \"gt\", \"le\", \"ge\", \"ne\", \"eq\"])\ndef test_series_comparison_nan(comparison):\n s = pd.Series([1, 2, 3, 4, 5, 6, 7])\n s_nan = pd.Series([1, -1, 8, np.nan, 5, 6, 2.4])\n ds = dd.from_pandas(s, 3)\n ds_nan = dd.from_pandas(s_nan, 3)\n\n fill_value = 7\n comparison_pd = getattr(s, comparison)\n comparison_dd = getattr(ds, comparison)\n assert_eq(\n comparison_dd(ds_nan, fill_value=fill_value),\n comparison_pd(s_nan, fill_value=fill_value),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_moment_test_empty_df_reductions.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_moment_test_empty_df_reductions.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1339, "end_line": 1364, "span_ids": ["test_empty_df_reductions", "test_moment"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not scipy\")\ndef test_moment():\n from dask.array import stats\n from dask.array.utils import assert_eq\n\n df = pd.Series(list(range(10)))\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(stats.moment(ddf, 2, 0), scipy.stats.moment(df, 2, 0))\n\n\n@pytest.mark.parametrize(\"func\", [\"sum\", \"count\", \"mean\", \"var\", \"sem\"])\ndef test_empty_df_reductions(func):\n pdf = pd.DataFrame()\n ddf = dd.from_pandas(pdf, npartitions=1)\n\n dsk_func = getattr(ddf.__class__, func)\n pd_func = getattr(pdf.__class__, func)\n\n assert_eq(dsk_func(ddf), pd_func(pdf))\n\n idx = pd.date_range(\"2000\", periods=4)\n pdf = pd.DataFrame(index=idx)\n ddf = dd.from_pandas(pdf, npartitions=1)\n\n assert_eq(dsk_func(ddf), pd_func(pdf))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_pd_test_meta.dd_utils_assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_pd_test_meta.dd_utils_assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_boolean.py", "file_name": "test_boolean.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 19, "span_ids": ["imports", "test_meta"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\n\nimport dask.dataframe as dd\n\n\ndef test_meta():\n values = pd.array([True, False, None], dtype=\"boolean\")\n ds = dd.from_pandas(pd.Series(values), 2)\n assert ds.dtype == pd.BooleanDtype()\n\n dd.utils.assert_eq(ds._meta_nonempty, pd.Series([True, pd.NA], dtype=\"boolean\"))\n\n ddf = dd.from_pandas(pd.DataFrame({\"A\": values}), 2)\n assert ddf.dtypes[\"A\"] == pd.BooleanDtype()\n\n dd.utils.assert_eq(\n ddf._meta_nonempty,\n pd.DataFrame({\"A\": pd.array([True, pd.NA], dtype=\"boolean\")}),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_test_ops_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_boolean.py_test_ops_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_boolean.py", "file_name": "test_boolean.py", "file_type": "text/x-python", "category": "test", "start_line": 28, "end_line": 38, "span_ids": ["test_ops"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ops():\n s1 = pd.Series(pd.array([True, False, None] * 3, dtype=\"boolean\"))\n s2 = pd.Series(pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype=\"boolean\"))\n\n ds1 = dd.from_pandas(s1, 2)\n ds2 = dd.from_pandas(s2, 2)\n\n dd.utils.assert_eq(ds1 | ds2, s1 | s2)\n dd.utils.assert_eq(ds1 & ds2, s1 & s2)\n dd.utils.assert_eq(ds1 ^ ds2, s1 ^ s2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_operator_frames6._i_set_index_i_y_i_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_operator_frames6._i_set_index_i_y_i_x_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 75, "span_ids": ["imports"], "tokens": 532}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe import _compat\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.core import _concat\nfrom dask.dataframe.utils import (\n assert_eq,\n clear_known_categories,\n is_categorical_dtype,\n make_meta,\n)\n\n# Generate a list of categorical series and indices\ncat_series = []\nfor ordered in [True, False]:\n s = pd.Series(pd.Categorical(list(\"bacbac\"), ordered=ordered))\n ds = dd.from_pandas(s, npartitions=2)\n cat_series.append((s, ds))\ns = pd.Series(range(6), index=pd.Categorical(list(\"bacbac\")))\nds = dd.from_pandas(s, npartitions=2)\ncat_series.append((ds.compute().index, ds.index))\n\n\na = pd.DataFrame(\n {\n \"v\": list(\"abcde\"),\n \"w\": list(\"xxxxx\"),\n \"x\": np.arange(5),\n \"y\": list(\"abcbc\"),\n \"z\": np.arange(5, dtype=\"f8\"),\n }\n)\n\nb = pd.DataFrame(\n {\n \"v\": list(\"fghij\"),\n \"w\": list(\"yyyyy\"),\n \"x\": np.arange(5, 10),\n \"y\": list(\"abbba\"),\n \"z\": np.arange(5, 10, dtype=\"f8\"),\n }\n)\n\nc = pd.DataFrame(\n {\n \"v\": list(\"klmno\"),\n \"w\": list(\"zzzzz\"),\n \"x\": np.arange(10, 15),\n \"y\": list(\"bcbcc\"),\n \"z\": np.arange(10, 15, dtype=\"f8\"),\n }\n)\n\nframes = [a, b, c]\nframes2 = []\nfor df in frames:\n df.w = df.w.astype(\"category\")\n df.y = df.y.astype(\"category\")\n frames2.append(\n df.assign(\n w=df.w.cat.set_categories(list(\"xyz\")),\n y=df.y.cat.set_categories(list(\"abc\")),\n )\n )\nframes3 = [i.set_index(i.y) for i in frames]\nframes4 = [i.set_index(i.y) for i in frames2]\nframes5 = [i.set_index([i.y, i.x]) for i in frames]\nframes6 = [i.set_index([i.y, i.x]) for i in frames2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_concat_unions_categoricals_test_concat_unions_categoricals.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_concat_unions_categoricals_test_concat_unions_categoricals.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 78, "end_line": 117, "span_ids": ["test_concat_unions_categoricals"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_unions_categoricals():\n # Categorical DataFrame, regular index\n tm.assert_frame_equal(_concat(frames), pd.concat(frames2))\n\n # Categorical Series, regular index\n tm.assert_series_equal(\n _concat([i.y for i in frames]), pd.concat([i.y for i in frames2])\n )\n\n # Categorical Index\n tm.assert_index_equal(\n _concat([i.index for i in frames3]), pd.concat([i for i in frames4]).index\n )\n\n # Categorical DataFrame, Categorical Index\n tm.assert_frame_equal(_concat(frames3), pd.concat(frames4))\n\n # Non-categorical DataFrame, Categorical Index\n tm.assert_frame_equal(\n _concat([i[[\"x\", \"z\"]] for i in frames3]),\n pd.concat([i[[\"x\", \"z\"]] for i in frames4]),\n )\n\n # Categorical Series, Categorical Index\n tm.assert_series_equal(\n _concat([i.z for i in frames3]), pd.concat([i.z for i in frames4])\n )\n\n # Non-categorical Series, Categorical Index\n tm.assert_series_equal(\n _concat([i.x for i in frames3]), pd.concat([i.x for i in frames4])\n )\n\n # MultiIndex with Categorical Index\n tm.assert_index_equal(\n _concat([i.index for i in frames5]), pd.concat([i for i in frames6]).index\n )\n\n # DataFrame, MultiIndex with CategoricalIndex\n tm.assert_frame_equal(_concat(frames5), pd.concat(frames6))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_unknown_categoricals_test_is_categorical_dtype.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_unknown_categoricals_test_is_categorical_dtype.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 119, "end_line": 149, "span_ids": ["test_is_categorical_dtype", "test_unknown_categoricals"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unknown_categoricals(shuffle_method):\n ddf = dd.DataFrame(\n {(\"unknown\", i): df for (i, df) in enumerate(frames)},\n \"unknown\",\n make_meta(\n {\"v\": \"object\", \"w\": \"category\", \"x\": \"i8\", \"y\": \"category\", \"z\": \"f8\"},\n parent_meta=frames[0],\n ),\n [None] * 4,\n )\n # Compute\n df = ddf.compute()\n\n assert_eq(ddf.w.value_counts(), df.w.value_counts())\n assert_eq(ddf.w.nunique(), df.w.nunique())\n\n assert_eq(ddf.groupby(ddf.w).sum(), df.groupby(df.w).sum())\n assert_eq(ddf.groupby(ddf.w).y.nunique(), df.groupby(df.w).y.nunique())\n assert_eq(ddf.y.groupby(ddf.w).count(), df.y.groupby(df.w).count())\n\n\ndef test_is_categorical_dtype():\n df = pd.DataFrame({\"cat\": pd.Categorical([1, 2, 3, 4]), \"x\": [1, 2, 3, 4]})\n\n assert is_categorical_dtype(df[\"cat\"])\n assert not is_categorical_dtype(df[\"x\"])\n\n ddf = dd.from_pandas(df, 2)\n\n assert is_categorical_dtype(ddf[\"cat\"])\n assert not is_categorical_dtype(ddf[\"x\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_test_categorize.None_2.ddf_categorize_split_ever": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_test_categorize.None_2.ddf_categorize_split_ever", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 152, "end_line": 212, "span_ids": ["test_categorize"], "tokens": 637}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorize():\n # rename y to y_ to avoid pandas future warning about ambiguous\n # levels\n meta = clear_known_categories(frames4[0]).rename(columns={\"y\": \"y_\"})\n ddf = dd.DataFrame(\n {(\"unknown\", i): df for (i, df) in enumerate(frames3)},\n \"unknown\",\n meta,\n [None] * 4,\n ).rename(columns={\"y\": \"y_\"})\n ddf = ddf.assign(w=ddf.w.cat.set_categories([\"x\", \"y\", \"z\"]))\n assert ddf.w.cat.known\n assert not ddf.y_.cat.known\n assert not ddf.index.cat.known\n df = ddf.compute()\n\n for index in [None, True, False]:\n known_index = index is not False\n # By default categorize object and unknown cat columns\n ddf2 = ddf.categorize(index=index)\n assert ddf2.y_.cat.known\n assert ddf2.v.cat.known\n assert ddf2.index.cat.known == known_index\n assert_eq(ddf2, df.astype({\"v\": \"category\"}), check_categorical=False)\n\n # Specifying split_every works\n ddf2 = ddf.categorize(index=index, split_every=2)\n assert ddf2.y_.cat.known\n assert ddf2.v.cat.known\n assert ddf2.index.cat.known == known_index\n assert_eq(ddf2, df.astype({\"v\": \"category\"}), check_categorical=False)\n\n # Specifying one column doesn't affect others\n ddf2 = ddf.categorize(\"v\", index=index)\n assert not ddf2.y_.cat.known\n assert ddf2.v.cat.known\n assert ddf2.index.cat.known == known_index\n assert_eq(ddf2, df.astype({\"v\": \"category\"}), check_categorical=False)\n\n ddf2 = ddf.categorize(\"y_\", index=index)\n assert ddf2.y_.cat.known\n assert ddf2.v.dtype == \"object\"\n assert ddf2.index.cat.known == known_index\n assert_eq(ddf2, df)\n\n ddf_known_index = ddf.categorize(columns=[], index=True)\n assert ddf_known_index.index.cat.known\n assert_eq(ddf_known_index, df)\n\n # Specifying known categorical or no columns is a no-op:\n assert ddf.categorize([\"w\"], index=False) is ddf\n assert ddf.categorize([], index=False) is ddf\n assert ddf_known_index.categorize([\"w\"]) is ddf_known_index\n assert ddf_known_index.categorize([]) is ddf_known_index\n\n # Bad split_every fails\n with pytest.raises(ValueError):\n ddf.categorize(split_every=1)\n\n with pytest.raises(ValueError):\n ddf.categorize(split_every=\"foo\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_dtype_test_categorical_dtype.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_dtype_test_categorical_dtype.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 215, "end_line": 230, "span_ids": ["test_categorical_dtype"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_dtype():\n cat_dtype = dd.categorical.categorical_dtype(\n meta=a, categories=[\"a\", \"b\", \"c\"], ordered=False\n )\n assert_eq(cat_dtype.categories, pd.Index([\"a\", \"b\", \"c\"]))\n assert_eq(cat_dtype.ordered, False)\n\n cat_dtype = dd.categorical.categorical_dtype(meta=a, categories=[\"a\", \"b\", \"c\"])\n assert_eq(cat_dtype.categories, pd.Index([\"a\", \"b\", \"c\"]))\n assert_eq(cat_dtype.ordered, False)\n\n cat_dtype = dd.categorical.categorical_dtype(\n meta=a, categories=[1, 100, 200], ordered=True\n )\n assert_eq(cat_dtype.categories, pd.Index([1, 100, 200]))\n assert_eq(cat_dtype.ordered, True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_index_test_categorize_index.assert_ddf_categorize_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_index_test_categorize_index.assert_ddf_categorize_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 233, "end_line": 262, "span_ids": ["test_categorize_index"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorize_index():\n # Object dtype\n ddf = dd.from_pandas(_compat.makeDataFrame(), npartitions=5)\n df = ddf.compute()\n\n ddf2 = ddf.categorize()\n assert ddf2.index.cat.known\n assert_eq(\n ddf2,\n df.set_index(pd.CategoricalIndex(df.index)),\n check_divisions=False,\n check_categorical=False,\n )\n\n assert ddf.categorize(index=False) is ddf\n\n # Non-object dtype\n ddf = dd.from_pandas(df.set_index(df.A.rename(\"idx\")), npartitions=5)\n df = ddf.compute()\n\n ddf2 = ddf.categorize(index=True)\n assert ddf2.index.cat.known\n assert_eq(\n ddf2,\n df.set_index(pd.CategoricalIndex(df.index)),\n check_divisions=False,\n check_categorical=False,\n )\n\n assert ddf.categorize() is ddf", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_test_categorical_set_index.with_dask_config_set_sche.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_test_categorical_set_index.with_dask_config_set_sche.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 284, "span_ids": ["test_categorical_set_index"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_set_index(shuffle_method):\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [\"a\", \"b\", \"b\", \"c\"]})\n df[\"y\"] = pd.Categorical(df[\"y\"], categories=[\"a\", \"b\", \"c\"], ordered=True)\n a = dd.from_pandas(df, npartitions=2)\n\n with dask.config.set(scheduler=\"sync\"):\n b = a.set_index(\"y\", npartitions=a.npartitions)\n d1, d2 = b.get_partition(0), b.get_partition(1)\n assert list(d1.index.compute()) == [\"a\"]\n assert list(sorted(d2.index.compute())) == [\"b\", \"b\", \"c\"]\n\n b = a.set_index(a.y, npartitions=a.npartitions)\n d1, d2 = b.get_partition(0), b.get_partition(1)\n assert list(d1.index.compute()) == [\"a\"]\n assert list(sorted(d2.index.compute())) == [\"b\", \"b\", \"c\"]\n\n b = a.set_index(\"y\", divisions=[\"a\", \"b\", \"c\"], npartitions=a.npartitions)\n d1, d2 = b.get_partition(0), b.get_partition(1)\n assert list(d1.index.compute()) == [\"a\"]\n assert list(sorted(d2.index.compute())) == [\"b\", \"b\", \"c\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_npartitions_vs_ncategories_test_categorical_set_index_npartitions_vs_ncategories._Test_passes_if_this_wor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_set_index_npartitions_vs_ncategories_test_categorical_set_index_npartitions_vs_ncategories._Test_passes_if_this_wor", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 288, "end_line": 302, "span_ids": ["test_categorical_set_index_npartitions_vs_ncategories"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ncategories\", [1, 3, 6])\n@pytest.mark.parametrize(\"npartitions\", [1, 3, 6])\ndef test_categorical_set_index_npartitions_vs_ncategories(npartitions, ncategories):\n \"\"\"https://github.com/dask/dask/issues/5343\"\"\"\n rows_per_category = 10\n n_rows = ncategories * rows_per_category\n\n categories = [\"CAT\" + str(i) for i in range(ncategories)]\n pdf = pd.DataFrame(\n {\"id\": categories * rows_per_category, \"value\": np.random.random(n_rows)}\n )\n ddf = dd.from_pandas(pdf, npartitions=npartitions)\n ddf[\"id\"] = ddf[\"id\"].astype(\"category\").cat.as_ordered()\n ddf = ddf.set_index(\"id\")\n # Test passes if this worked and didn't raise any warnings", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_repartition_on_categoricals_test_repartition_on_categoricals.assert_eq_df_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_repartition_on_categoricals_test_repartition_on_categoricals.assert_eq_df_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 305, "end_line": 315, "span_ids": ["test_repartition_on_categoricals"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 4])\ndef test_repartition_on_categoricals(npartitions):\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"abababcbcb\")})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf[\"y\"] = ddf[\"y\"].astype(\"category\")\n ddf2 = ddf.repartition(npartitions=npartitions)\n\n df = df.copy()\n df[\"y\"] = df[\"y\"].astype(\"category\")\n assert_eq(df, ddf)\n assert_eq(df, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_accessor_presence_test_categorical_accessor_presence.assert_not_hasattr_ddf_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorical_accessor_presence_test_categorical_accessor_presence.assert_not_hasattr_ddf_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 318, "end_line": 331, "span_ids": ["test_categorical_accessor_presence"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_accessor_presence():\n df = pd.DataFrame({\"x\": list(\"a\" * 5 + \"b\" * 5 + \"c\" * 5), \"y\": range(15)})\n df.x = df.x.astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert \"cat\" in dir(ddf.x)\n assert \"cat\" not in dir(ddf.y)\n assert hasattr(ddf.x, \"cat\")\n assert not hasattr(ddf.y, \"cat\")\n\n df2 = df.set_index(df.x)\n ddf2 = dd.from_pandas(df2, npartitions=2, sort=False)\n assert hasattr(ddf2.index, \"categories\")\n assert not hasattr(ddf.index, \"categories\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_nan_test_return_type_known_categories.assert_isinstance_ret_typ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_test_categorize_nan_test_return_type_known_categories.assert_isinstance_ret_typ", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 361, "span_ids": ["assert_array_index_eq", "get_cat", "test_return_type_known_categories", "test_categorize_nan"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorize_nan():\n df = dd.from_pandas(\n pd.DataFrame({\"A\": [\"a\", \"b\", \"a\", float(\"nan\")]}), npartitions=2\n )\n with warnings.catch_warnings(record=True) as record:\n df.categorize().compute()\n assert not record\n\n\ndef get_cat(x):\n return x if isinstance(x, pd.CategoricalIndex) else x.cat\n\n\ndef assert_array_index_eq(left, right, check_divisions=False):\n \"\"\"left and right are equal, treating index and array as equivalent\"\"\"\n assert_eq(\n left,\n pd.Index(right) if isinstance(right, np.ndarray) else right,\n check_divisions=check_divisions,\n )\n\n\ndef test_return_type_known_categories():\n df = pd.DataFrame({\"A\": [\"a\", \"b\", \"c\"]})\n df[\"A\"] = df[\"A\"].astype(\"category\")\n dask_df = dd.from_pandas(df, 2)\n ret_type = dask_df.A.cat.as_known()\n assert isinstance(ret_type, dd.core.Series)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor_TestCategoricalAccessor.test_callable.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor_TestCategoricalAccessor.test_callable.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 364, "end_line": 412, "span_ids": ["TestCategoricalAccessor", "TestCategoricalAccessor.test_properties", "TestCategoricalAccessor.test_callable"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCategoricalAccessor:\n @pytest.mark.parametrize(\"series\", cat_series)\n @pytest.mark.parametrize(\n \"prop, compare\",\n [\n (\"categories\", assert_array_index_eq),\n (\"ordered\", assert_eq),\n (\"codes\", assert_array_index_eq),\n ],\n )\n def test_properties(self, series, prop, compare):\n s, ds = series\n expected = getattr(get_cat(s), prop)\n result = getattr(get_cat(ds), prop)\n compare(result, expected, check_divisions=False)\n\n @pytest.mark.parametrize(\"series\", cat_series)\n @pytest.mark.parametrize(\n \"method, kwargs\",\n [\n (\"add_categories\", dict(new_categories=[\"d\", \"e\"])),\n (\"as_ordered\", {}),\n (\"as_unordered\", {}),\n (\"as_ordered\", {}),\n (\"remove_categories\", dict(removals=[\"a\"])),\n (\"rename_categories\", dict(new_categories=[\"d\", \"e\", \"f\"])),\n (\"reorder_categories\", dict(new_categories=[\"a\", \"b\", \"c\"])),\n (\"set_categories\", dict(new_categories=[\"a\", \"e\", \"b\"])),\n (\"remove_unused_categories\", {}),\n ],\n )\n def test_callable(self, series, method, kwargs):\n op = operator.methodcaller(method, **kwargs)\n\n # Series\n s, ds = series\n expected = op(get_cat(s))\n result = op(get_cat(ds))\n assert_eq(result, expected, check_divisions=False)\n assert_eq(\n get_cat(result._meta).categories,\n get_cat(expected).categories,\n check_divisions=False,\n )\n assert_eq(\n get_cat(result._meta).ordered,\n get_cat(expected).ordered,\n check_divisions=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor.test_categorical_empty_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_categorical.py_TestCategoricalAccessor.test_categorical_empty_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_categorical.py", "file_name": "test_categorical.py", "file_type": "text/x-python", "category": "test", "start_line": 414, "end_line": 462, "span_ids": ["TestCategoricalAccessor.test_categorical_empty", "TestCategoricalAccessor.test_categorical_non_string_raises", "TestCategoricalAccessor.test_unknown_categories", "TestCategoricalAccessor.test_categorical_string_ops"], "tokens": 395}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCategoricalAccessor:\n\n def test_categorical_empty(self):\n # GH 1705\n\n def make_empty():\n return pd.DataFrame({\"A\": pd.Categorical([np.nan, np.nan])})\n\n def make_full():\n return pd.DataFrame({\"A\": pd.Categorical([\"a\", \"a\"])})\n\n a = dd.from_delayed([dask.delayed(make_empty)(), dask.delayed(make_full)()])\n # Used to raise an IndexError\n a.A.cat.categories\n\n @pytest.mark.parametrize(\"series\", cat_series)\n def test_unknown_categories(self, series):\n a, da = series\n assert da.cat.known\n da = da.cat.as_unknown()\n assert not da.cat.known\n\n with pytest.raises(NotImplementedError):\n da.cat.categories\n with pytest.raises(NotImplementedError):\n da.cat.codes\n\n db = da.cat.set_categories([\"a\", \"b\", \"c\"])\n assert db.cat.known\n tm.assert_index_equal(db.cat.categories, get_cat(a).categories)\n assert_array_index_eq(db.cat.codes, get_cat(a).codes)\n\n db = da.cat.as_known()\n assert db.cat.known\n res = db.compute()\n tm.assert_index_equal(db.cat.categories, get_cat(res).categories)\n assert_array_index_eq(db.cat.codes, get_cat(res).codes)\n\n def test_categorical_string_ops(self):\n a = pd.Series([\"a\", \"a\", \"b\"], dtype=\"category\")\n da = dd.from_pandas(a, 2)\n result = da.str.upper()\n expected = a.str.upper()\n assert_eq(result, expected)\n\n def test_categorical_non_string_raises(self):\n a = pd.Series([1, 2, 3], dtype=\"category\")\n da = dd.from_pandas(a, 2)\n with pytest.raises(AttributeError):\n da.str.upper()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_from_decimal_import_Decim_test_register_extension_type.assert_eq_df_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_from_decimal_import_Decim_test_register_extension_type.assert_eq_df_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_extensions.py", "file_name": "test_extensions.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 33, "span_ids": ["imports", "test_register_extension_type", "__1", "_"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from decimal import Decimal\n\nimport pytest\n\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\n\npd = pytest.importorskip(\"pandas\")\n\nfrom pandas.tests.extension.decimal.array import DecimalArray, DecimalDtype\n\nfrom dask.dataframe.extensions import make_array_nonempty, make_scalar\n\n\n@make_array_nonempty.register(DecimalDtype)\ndef _(dtype):\n return DecimalArray._from_sequence([Decimal(\"0\"), Decimal(\"NaN\")], dtype=dtype)\n\n\n@make_scalar.register(Decimal)\ndef _(x):\n return Decimal(\"1\")\n\n\ndef test_register_extension_type():\n arr = DecimalArray._from_sequence([Decimal(\"1.0\")] * 10)\n ser = pd.Series(arr)\n dser = dd.from_pandas(ser, 2)\n assert_eq(ser, dser)\n\n df = pd.DataFrame({\"A\": ser})\n ddf = dd.from_pandas(df, 2)\n assert_eq(df, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_test_reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_extensions.py_test_reduction_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_extensions.py", "file_name": "test_extensions.py", "file_type": "text/x-python", "category": "test", "start_line": 36, "end_line": 51, "span_ids": ["test_reduction", "test_scalar"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reduction():\n ser = pd.Series(DecimalArray._from_sequence([Decimal(\"0\"), Decimal(\"1\")]))\n dser = dd.from_pandas(ser, 2)\n assert_eq(ser.mean(skipna=False), dser.mean(skipna=False))\n\n # It's unclear whether this can be reliably provided, at least with the current\n # implementation, which uses pandas.DataFrame.sum(), returning a (homogenous)\n # series which has potentially cast values.\n\n # assert_eq(ser.to_frame().mean(skipna=False), dser.to_frame().mean(skipna=False))\n\n\ndef test_scalar():\n result = dd.utils.make_meta(Decimal(\"1.0\"), parent_meta=pd.DataFrame())\n assert result == Decimal(\"1.0\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_test_dataframe_format.assert_ddf__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_test_dataframe_format.assert_ddf__repr_html__", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 45, "end_line": 136, "span_ids": ["test_dataframe_format"], "tokens": 697}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_format():\n pytest.importorskip(\"jinja2\")\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"B\": list(\"ABCDEFGH\"),\n \"C\": pd.Categorical(list(\"AAABBBCC\")),\n }\n )\n ddf = dd.from_pandas(df, 3)\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \"0 int64 object category[known]\\n\"\n \"3 ... ... ...\\n\"\n \"6 ... ... ...\\n\"\n \"7 ... ... ...\\n\"\n \"Dask Name: from_pandas, 3 tasks\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp = (\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \"0 int64 object category[known]\\n\"\n \"3 ... ... ...\\n\"\n \"6 ... ... ...\\n\"\n \"7 ... ... ...\"\n )\n assert ddf.to_string() == exp\n\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ABC
npartitions=3
0int64objectcategory[known]
3.........
6.........
7.........
\"\"\"\n\n exp = \"\"\"
Dask DataFrame Structure:
\n{exp_table}\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div and has style\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_with_index_test_dataframe_format_with_index.assert_ddf__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_with_index_test_dataframe_format_with_index.assert_ddf__repr_html__", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 139, "end_line": 221, "span_ids": ["test_dataframe_format_with_index"], "tokens": 619}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_format_with_index():\n pytest.importorskip(\"jinja2\")\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"B\": list(\"ABCDEFGH\"),\n \"C\": pd.Categorical(list(\"AAABBBCC\")),\n },\n index=list(\"ABCDEFGH\"),\n )\n ddf = dd.from_pandas(df, 3)\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \"A int64 object category[known]\\n\"\n \"D ... ... ...\\n\"\n \"G ... ... ...\\n\"\n \"H ... ... ...\\n\"\n \"Dask Name: from_pandas, 3 tasks\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ABC
npartitions=3
Aint64objectcategory[known]
D.........
G.........
H.........
\"\"\"\n\n exp = \"\"\"
Dask DataFrame Structure:
\n{exp_table}\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div and has style\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_unknown_divisions_test_dataframe_format_unknown_divisions.assert_ddf__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_unknown_divisions_test_dataframe_format_unknown_divisions.assert_ddf__repr_html__", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 224, "end_line": 318, "span_ids": ["test_dataframe_format_unknown_divisions"], "tokens": 704}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_format_unknown_divisions():\n pytest.importorskip(\"jinja2\")\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"B\": list(\"ABCDEFGH\"),\n \"C\": pd.Categorical(list(\"AAABBBCC\")),\n }\n )\n ddf = dd.from_pandas(df, 3)\n ddf = ddf.clear_divisions()\n assert not ddf.known_divisions\n\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \" int64 object category[known]\\n\"\n \" ... ... ...\\n\"\n \" ... ... ...\\n\"\n \" ... ... ...\\n\"\n \"Dask Name: from_pandas, 3 tasks\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp = (\n \" A B C\\n\"\n \"npartitions=3 \\n\"\n \" int64 object category[known]\\n\"\n \" ... ... ...\\n\"\n \" ... ... ...\\n\"\n \" ... ... ...\"\n )\n assert ddf.to_string() == exp\n\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ABC
npartitions=3
int64objectcategory[known]
.........
.........
.........
\"\"\"\n\n exp = \"\"\"
Dask DataFrame Structure:
\n{exp_table}\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div and has style\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 3 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_long_test_dataframe_format_long.assert_ddf__repr_html__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_dataframe_format_long_test_dataframe_format_long.assert_ddf__repr_html__", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 321, "end_line": 420, "span_ids": ["test_dataframe_format_long"], "tokens": 763}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_format_long():\n pytest.importorskip(\"jinja2\")\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8] * 10,\n \"B\": list(\"ABCDEFGH\") * 10,\n \"C\": pd.Categorical(list(\"AAABBBCC\") * 10),\n }\n )\n ddf = dd.from_pandas(df, 10)\n exp = (\n \"Dask DataFrame Structure:\\n\"\n \" A B C\\n\"\n \"npartitions=10 \\n\"\n \"0 int64 object category[known]\\n\"\n \"8 ... ... ...\\n\"\n \"... ... ... ...\\n\"\n \"72 ... ... ...\\n\"\n \"79 ... ... ...\\n\"\n \"Dask Name: from_pandas, 10 tasks\"\n )\n assert repr(ddf) == exp\n assert str(ddf) == exp\n\n exp = (\n \" A B C\\n\"\n \"npartitions=10 \\n\"\n \"0 int64 object category[known]\\n\"\n \"8 ... ... ...\\n\"\n \"... ... ... ...\\n\"\n \"72 ... ... ...\\n\"\n \"79 ... ... ...\"\n )\n assert ddf.to_string() == exp\n\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ABC
npartitions=10
0int64objectcategory[known]
8.........
............
72.........
79.........
\"\"\"\n\n exp = \"\"\"
Dask DataFrame Structure:
\n{exp_table}\n
Dask Name: from_pandas, 10 tasks
\"\"\".format(\n exp_table=exp_table\n )\n assert ddf.to_html() == exp\n\n # table is boxed with div\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 10 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_test_series_format.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_test_series_format.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 418, "end_line": 450, "span_ids": ["test_series_format"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_format():\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=list(\"ABCDEFGH\"))\n ds = dd.from_pandas(s, 3)\n exp = \"\"\"Dask Series Structure:\nnpartitions=3\nA int64\nD ...\nG ...\nH ...\ndtype: int64\nDask Name: from_pandas, 3 tasks\"\"\"\n assert repr(ds) == exp\n assert str(ds) == exp\n\n exp = \"\"\"npartitions=3\nA int64\nD ...\nG ...\nH ...\"\"\"\n assert ds.to_string() == exp\n\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=list(\"ABCDEFGH\"), name=\"XXX\")\n ds = dd.from_pandas(s, 3)\n exp = \"\"\"Dask Series Structure:\nnpartitions=3\nA int64\nD ...\nG ...\nH ...\nName: XXX, dtype: int64\nDask Name: from_pandas, 3 tasks\"\"\"\n assert repr(ds) == exp\n assert str(ds) == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_long_test_series_format_long.assert_ds_to_string_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_series_format_long_test_series_format_long.assert_ds_to_string_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 453, "end_line": 465, "span_ids": ["test_series_format_long"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_format_long():\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10] * 10, index=list(\"ABCDEFGHIJ\") * 10)\n ds = dd.from_pandas(s, 10)\n exp = (\n \"Dask Series Structure:\\nnpartitions=10\\nA int64\\nB ...\\n\"\n \" ... \\nJ ...\\nJ ...\\ndtype: int64\\n\"\n \"Dask Name: from_pandas, 10 tasks\"\n )\n assert repr(ds) == exp\n assert str(ds) == exp\n\n exp = \"npartitions=10\\nA int64\\nB ...\\n ... \\nJ ...\\nJ ...\"\n assert ds.to_string() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_index_format_test_index_format.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_index_format_test_index_format.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 468, "end_line": 499, "span_ids": ["test_index_format"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_format():\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=list(\"ABCDEFGH\"))\n ds = dd.from_pandas(s, 3)\n exp = \"\"\"Dask Index Structure:\nnpartitions=3\nA object\nD ...\nG ...\nH ...\ndtype: object\nDask Name: from_pandas, 6 tasks\"\"\"\n assert repr(ds.index) == exp\n assert str(ds.index) == exp\n\n s = pd.Series(\n [1, 2, 3, 4, 5, 6, 7, 8],\n index=pd.CategoricalIndex([1, 2, 3, 4, 5, 6, 7, 8], name=\"YYY\"),\n )\n ds = dd.from_pandas(s, 3)\n exp = dedent(\n \"\"\"\\\n Dask Index Structure:\n npartitions=3\n 1 category[known]\n 4 ...\n 7 ...\n 8 ...\n Name: YYY, dtype: category\n Dask Name: from_pandas, 6 tasks\"\"\"\n )\n assert repr(ds.index) == exp\n assert str(ds.index) == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_categorical_format_test_duplicate_columns_repr.repr_frame_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_categorical_format_test_duplicate_columns_repr.repr_frame_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 502, "end_line": 529, "span_ids": ["test_categorical_format", "test_duplicate_columns_repr"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_format():\n s = pd.Series([\"a\", \"b\", \"c\"]).astype(\"category\")\n known = dd.from_pandas(s, npartitions=1)\n unknown = known.cat.as_unknown()\n exp = (\n \"Dask Series Structure:\\n\"\n \"npartitions=1\\n\"\n \"0 category[known]\\n\"\n \"2 ...\\n\"\n \"dtype: category\\n\"\n \"Dask Name: from_pandas, 1 tasks\"\n )\n assert repr(known) == exp\n exp = (\n \"Dask Series Structure:\\n\"\n \"npartitions=1\\n\"\n \"0 category[unknown]\\n\"\n \"2 ...\\n\"\n \"dtype: category\\n\"\n \"Dask Name: from_pandas, 1 tasks\"\n )\n assert repr(unknown) == exp\n\n\ndef test_duplicate_columns_repr():\n arr = da.from_array(np.arange(10).reshape(5, 2), chunks=(5, 2))\n frame = dd.from_dask_array(arr, columns=[\"a\", \"a\"])\n repr(frame)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_empty_repr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_test_empty_repr_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 537, "end_line": 574, "span_ids": ["test_empty_repr"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_repr():\n pytest.importorskip(\"jinja2\")\n df = pd.DataFrame()\n ddf = dd.from_pandas(df, npartitions=1)\n exp = (\n \"Empty Dask DataFrame Structure:\\n\"\n \"Columns: []\\n\"\n \"Divisions: [, ]\\n\"\n \"Dask Name: from_pandas, 1 tasks\"\n )\n assert repr(ddf) == exp\n exp_table = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
npartitions=1
\"\"\"\n exp = \"\"\"
Dask DataFrame Structure:
\n
\n{style}{exp_table}\n
\n
Dask Name: from_pandas, 1 tasks
\"\"\".format(\n style=style, exp_table=exp_table\n )\n assert ddf._repr_html_() == exp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_xfail_test_groupby_internal_repr_xfail.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_xfail_test_groupby_internal_repr_xfail.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 48, "end_line": 60, "span_ids": ["test_groupby_internal_repr_xfail"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"uncertain how to handle. See issue #3481.\")\ndef test_groupby_internal_repr_xfail():\n pdf = pd.DataFrame({\"x\": [0, 1, 2, 3, 4, 6, 7, 8, 9, 10], \"y\": list(\"abcbabbcda\")})\n ddf = dd.from_pandas(pdf, 3)\n\n gp = pdf.groupby(\"y\")[\"x\"]\n dp = ddf.groupby(\"y\")[\"x\"]\n assert isinstance(dp.obj, dd.Series)\n assert_eq(dp.obj, gp.obj)\n\n gp = pdf.groupby(pdf.y)[\"x\"]\n dp = ddf.groupby(ddf.y)[\"x\"]\n assert isinstance(dp.obj, dd.Series)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_test_groupby_internal_repr.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_internal_repr_test_groupby_internal_repr.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 63, "end_line": 98, "span_ids": ["test_groupby_internal_repr"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_internal_repr():\n pdf = pd.DataFrame({\"x\": [0, 1, 2, 3, 4, 6, 7, 8, 9, 10], \"y\": list(\"abcbabbcda\")})\n ddf = dd.from_pandas(pdf, 3)\n\n gp = pdf.groupby(\"y\")\n dp = ddf.groupby(\"y\")\n assert isinstance(dp, dd.groupby.DataFrameGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)\n assert isinstance(dp.obj, dd.DataFrame)\n assert_eq(dp.obj, gp.obj)\n\n gp = pdf.groupby(\"y\")[\"x\"]\n dp = ddf.groupby(\"y\")[\"x\"]\n assert isinstance(dp, dd.groupby.SeriesGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.SeriesGroupBy)\n\n gp = pdf.groupby(\"y\")[[\"x\"]]\n dp = ddf.groupby(\"y\")[[\"x\"]]\n assert isinstance(dp, dd.groupby.DataFrameGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)\n # slicing should not affect to internal\n assert isinstance(dp.obj, dd.DataFrame)\n assert_eq(dp.obj, gp.obj)\n\n gp = pdf.groupby(pdf.y)[\"x\"]\n dp = ddf.groupby(ddf.y)[\"x\"]\n assert isinstance(dp, dd.groupby.SeriesGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.SeriesGroupBy)\n\n gp = pdf.groupby(pdf.y)[[\"x\"]]\n dp = ddf.groupby(ddf.y)[[\"x\"]]\n assert isinstance(dp, dd.groupby.DataFrameGroupBy)\n assert isinstance(dp._meta, pd.core.groupby.DataFrameGroupBy)\n # slicing should not affect to internal\n assert isinstance(dp.obj, dd.DataFrame)\n assert_eq(dp.obj, gp.obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_multilevel_test_groupby_dir.assert_b_c_d_e_not_in_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_multilevel_test_groupby_dir.assert_b_c_d_e_not_in_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 215, "end_line": 257, "span_ids": ["test_groupby_dir", "test_full_groupby_multilevel"], "tokens": 422}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: [\"a\"],\n lambda df: [\"a\", \"b\"],\n lambda df: df[\"a\"],\n lambda df: [df[\"a\"], df[\"b\"]],\n pytest.param(\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n marks=pytest.mark.xfail(reason=\"not yet supported\"),\n ),\n ],\n)\n@pytest.mark.parametrize(\"reverse\", [True, False])\ndef test_full_groupby_multilevel(grouper, reverse):\n index = [0, 1, 3, 5, 6, 8, 9, 9, 9]\n if reverse:\n index = index[::-1]\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"d\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0],\n },\n index=index,\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n def func(df):\n return df.assign(b=df.b - df.b.mean())\n\n with pytest.warns(UserWarning, match=\"`meta` is not specified\"):\n assert_eq(\n df.groupby(grouper(df)).apply(func), ddf.groupby(grouper(ddf)).apply(func)\n )\n\n\ndef test_groupby_dir():\n df = pd.DataFrame({\"a\": range(10), \"b c d e\": range(10)})\n ddf = dd.from_pandas(df, npartitions=2)\n g = ddf.groupby(\"a\")\n assert \"a\" in dir(g)\n assert \"b c d e\" not in dir(g)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_getitem_test_groupby_multilevel_getitem.if_agg_func_mean_.else_.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_getitem_test_groupby_multilevel_getitem.if_agg_func_mean_.else_.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 307, "end_line": 359, "span_ids": ["test_groupby_multilevel_getitem"], "tokens": 475}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: df.groupby(\"a\")[\"b\"],\n lambda df: df.groupby([\"a\", \"b\"]),\n lambda df: df.groupby([\"a\", \"b\"])[\"c\"],\n lambda df: df.groupby(df[\"a\"])[[\"b\", \"c\"]],\n lambda df: df.groupby(\"a\")[[\"b\", \"c\"]],\n lambda df: df.groupby(\"a\")[[\"b\"]],\n lambda df: df.groupby([\"a\", \"b\", \"c\"]),\n ],\n)\ndef test_groupby_multilevel_getitem(grouper, agg_func):\n # nunique is not implemented for DataFrameGroupBy\n if agg_func == \"nunique\":\n return\n\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 1, 2, 3],\n \"b\": [1, 2, 1, 4, 2, 1],\n \"c\": [1, 3, 2, 1, 1, 2],\n \"d\": [1, 2, 1, 1, 2, 2],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n dask_group = grouper(ddf)\n pandas_group = grouper(df)\n\n # covariance/correlation only works with N+1 columns\n if isinstance(pandas_group, pd.core.groupby.SeriesGroupBy) and agg_func in (\n \"cov\",\n \"corr\",\n ):\n return\n\n dask_agg = getattr(dask_group, agg_func)\n pandas_agg = getattr(pandas_group, agg_func)\n\n assert isinstance(dask_group, dd.groupby._GroupBy)\n assert isinstance(pandas_group, pd.core.groupby.GroupBy)\n\n if agg_func == \"mean\":\n assert_eq(dask_agg(), pandas_agg().astype(float))\n else:\n a = dask_agg()\n with warnings.catch_warnings():\n # pandas does `.cov([[1], [1]])` which numpy warns on (all NaN).\n # Pandas does strange things with exceptions in groupby.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n b = pandas_agg()\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_agg_test_groupby_multilevel_agg.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multilevel_agg_test_groupby_multilevel_agg.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 362, "end_line": 383, "span_ids": ["test_groupby_multilevel_agg"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_multilevel_agg():\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 1, 2, 3],\n \"b\": [1, 2, 1, 4, 2, 1],\n \"c\": [1, 3, 2, 1, 1, 2],\n \"d\": [1, 2, 1, 1, 2, 2],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n sol = df.groupby([\"a\"]).mean()\n res = ddf.groupby([\"a\"]).mean()\n assert_eq(res, sol)\n\n sol = df.groupby([\"a\", \"c\"]).mean()\n res = ddf.groupby([\"a\", \"c\"]).mean()\n assert_eq(res, sol)\n\n sol = df.groupby([df[\"a\"], df[\"c\"]]).mean()\n res = ddf.groupby([ddf[\"a\"], ddf[\"c\"]]).mean()\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_get_group_test_groupby_get_group.for_ddkey_pdkey_in_b_.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_get_group_test_groupby_get_group.for_ddkey_pdkey_in_b_.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 386, "end_line": 404, "span_ids": ["test_groupby_get_group"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_get_group():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 6], \"b\": [4, 2, 7]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 2, 6], \"b\": [3, 3, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [4, 3, 7], \"b\": [1, 1, 3]}, index=[9, 9, 9]),\n }\n meta = dsk[(\"x\", 0)]\n d = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n full = d.compute()\n\n for ddkey, pdkey in [(\"b\", \"b\"), (d.b, full.b), (d.b + 1, full.b + 1)]:\n ddgrouped = d.groupby(ddkey)\n pdgrouped = full.groupby(pdkey)\n # DataFrame\n assert_eq(ddgrouped.get_group(2), pdgrouped.get_group(2))\n assert_eq(ddgrouped.get_group(3), pdgrouped.get_group(3))\n # Series\n assert_eq(ddgrouped.a.get_group(3), pdgrouped.a.get_group(3))\n assert_eq(ddgrouped.a.get_group(2), pdgrouped.a.get_group(2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_groupby_nunique_test_series_groupby_propagates_names.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_groupby_nunique_test_series_groupby_propagates_names.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 407, "end_line": 432, "span_ids": ["test_dataframe_groupby_nunique_across_group_same_value", "test_dataframe_groupby_nunique", "test_series_groupby_propagates_names"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dataframe_groupby_nunique():\n strings = list(\"aaabbccccdddeee\")\n data = np.random.randn(len(strings))\n ps = pd.DataFrame(dict(strings=strings, data=data))\n s = dd.from_pandas(ps, npartitions=3)\n expected = ps.groupby(\"strings\")[\"data\"].nunique()\n assert_eq(s.groupby(\"strings\")[\"data\"].nunique(), expected)\n\n\ndef test_dataframe_groupby_nunique_across_group_same_value():\n strings = list(\"aaabbccccdddeee\")\n data = list(map(int, \"123111223323412\"))\n ps = pd.DataFrame(dict(strings=strings, data=data))\n s = dd.from_pandas(ps, npartitions=3)\n expected = ps.groupby(\"strings\")[\"data\"].nunique()\n assert_eq(s.groupby(\"strings\")[\"data\"].nunique(), expected)\n\n\ndef test_series_groupby_propagates_names():\n df = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\n ddf = dd.from_pandas(df, 2)\n func = lambda df: df[\"y\"].sum()\n with pytest.warns(UserWarning): # meta inference\n result = ddf.groupby(\"x\").apply(func)\n expected = df.groupby(\"x\").apply(func)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_test_series_groupby.for_dg_pdg_in_dask_gro.assert_eq_dg_prod_pdg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_test_series_groupby.for_dg_pdg_in_dask_gro.assert_eq_dg_prod_pdg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 435, "end_line": 453, "span_ids": ["test_series_groupby"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_groupby():\n s = pd.Series([1, 2, 2, 1, 1])\n pd_group = s.groupby(s)\n\n ss = dd.from_pandas(s, npartitions=2)\n dask_group = ss.groupby(ss)\n\n pd_group2 = s.groupby(s + 1)\n dask_group2 = ss.groupby(ss + 1)\n\n for dg, pdg in [(dask_group, pd_group), (pd_group2, dask_group2)]:\n assert_eq(dg.count(), pdg.count())\n assert_eq(dg.sum(), pdg.sum())\n assert_eq(dg.min(), pdg.min())\n assert_eq(dg.max(), pdg.max())\n assert_eq(dg.size(), pdg.size())\n assert_eq(dg.first(), pdg.first())\n assert_eq(dg.last(), pdg.last())\n assert_eq(dg.prod(), pdg.prod())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_errors_test_series_groupby_errors.None_4._dask_should_raise_the_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_errors_test_series_groupby_errors.None_4._dask_should_raise_the_s", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 456, "end_line": 476, "span_ids": ["test_series_groupby_errors"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_groupby_errors():\n s = pd.Series([1, 2, 2, 1, 1])\n\n ss = dd.from_pandas(s, npartitions=2)\n\n msg = \"No group keys passed!\"\n with pytest.raises(ValueError) as err:\n s.groupby([]) # pandas\n assert msg in str(err.value)\n with pytest.raises(ValueError) as err:\n ss.groupby([]) # dask should raise the same error\n assert msg in str(err.value)\n\n sss = dd.from_pandas(s, npartitions=5)\n with pytest.raises(NotImplementedError):\n ss.groupby(sss)\n\n with pytest.raises(KeyError):\n s.groupby(\"x\") # pandas\n with pytest.raises(KeyError):\n ss.groupby(\"x\") # dask should raise the same error", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_index_array_test_groupby_set_index.pytest_raises_TypeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_index_array_test_groupby_set_index.pytest_raises_TypeError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 479, "end_line": 501, "span_ids": ["test_groupby_set_index", "test_groupby_index_array"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_index_array():\n df = _compat.makeTimeDataFrame()\n ddf = dd.from_pandas(df, npartitions=2)\n\n # first select column, then group\n assert_eq(\n df.A.groupby(df.index.month).nunique(),\n ddf.A.groupby(ddf.index.month).nunique(),\n check_names=False,\n )\n\n # first group, then select column\n assert_eq(\n df.groupby(df.index.month).A.nunique(),\n ddf.groupby(ddf.index.month).A.nunique(),\n check_names=False,\n )\n\n\ndef test_groupby_set_index():\n df = _compat.makeTimeDataFrame()\n ddf = dd.from_pandas(df, npartitions=2)\n pytest.raises(TypeError, lambda: ddf.groupby(df.index.month, as_index=False))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series_test_split_apply_combine_on_series.for_ddkey_pdkey_in_b_.None_1.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series_test_split_apply_combine_on_series.for_ddkey_pdkey_in_b_.None_1.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 502, "end_line": 552, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 754}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\ndef test_split_apply_combine_on_series(empty):\n if empty:\n pdf = pd.DataFrame({\"a\": [1.0], \"b\": [1.0]}, index=[0]).iloc[:0]\n # There's a bug in pandas where df.groupby(...).var(ddof=0) results in\n # no columns. Just skip these checks for now.\n ddofs = []\n else:\n ddofs = [0, 1, 2]\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7], \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n for ddkey, pdkey in [(\"b\", \"b\"), (ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:\n assert_eq(ddf.groupby(ddkey).a.min(), pdf.groupby(pdkey).a.min())\n assert_eq(ddf.groupby(ddkey).a.max(), pdf.groupby(pdkey).a.max())\n assert_eq(ddf.groupby(ddkey).a.count(), pdf.groupby(pdkey).a.count())\n assert_eq(ddf.groupby(ddkey).a.mean(), pdf.groupby(pdkey).a.mean())\n assert_eq(ddf.groupby(ddkey).a.nunique(), pdf.groupby(pdkey).a.nunique())\n assert_eq(ddf.groupby(ddkey).a.size(), pdf.groupby(pdkey).a.size())\n assert_eq(ddf.groupby(ddkey).a.first(), pdf.groupby(pdkey).a.first())\n assert_eq(ddf.groupby(ddkey).a.last(), pdf.groupby(pdkey).a.last())\n assert_eq(ddf.groupby(ddkey).a.tail(), pdf.groupby(pdkey).a.tail())\n assert_eq(ddf.groupby(ddkey).a.head(), pdf.groupby(pdkey).a.head())\n for ddof in ddofs:\n assert_eq(ddf.groupby(ddkey).a.var(ddof), pdf.groupby(pdkey).a.var(ddof))\n assert_eq(ddf.groupby(ddkey).a.std(ddof), pdf.groupby(pdkey).a.std(ddof))\n\n assert_eq(ddf.groupby(ddkey).sum(), pdf.groupby(pdkey).sum())\n assert_eq(ddf.groupby(ddkey).min(), pdf.groupby(pdkey).min())\n assert_eq(ddf.groupby(ddkey).max(), pdf.groupby(pdkey).max())\n assert_eq(ddf.groupby(ddkey).count(), pdf.groupby(pdkey).count())\n assert_eq(ddf.groupby(ddkey).mean(), pdf.groupby(pdkey).mean())\n assert_eq(ddf.groupby(ddkey).size(), pdf.groupby(pdkey).size())\n assert_eq(ddf.groupby(ddkey).first(), pdf.groupby(pdkey).first())\n assert_eq(ddf.groupby(ddkey).last(), pdf.groupby(pdkey).last())\n assert_eq(ddf.groupby(ddkey).prod(), pdf.groupby(pdkey).prod())\n\n for ddof in ddofs:\n assert_eq(\n ddf.groupby(ddkey).var(ddof),\n pdf.groupby(pdkey).var(ddof),\n check_dtype=False,\n )\n assert_eq(\n ddf.groupby(ddkey).std(ddof),\n pdf.groupby(pdkey).std(ddof),\n check_dtype=False,\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf.for_ddof_in_ddofs_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf_test_split_apply_combine_on_series.for_ddkey_pdkey_in_ddf.for_ddof_in_ddofs_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 554, "end_line": 588, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\ndef test_split_apply_combine_on_series(empty):\n # ... other code\n\n for ddkey, pdkey in [(ddf.b, pdf.b), (ddf.b + 1, pdf.b + 1)]:\n assert_eq(\n ddf.a.groupby(ddkey).sum(), pdf.a.groupby(pdkey).sum(), check_names=False\n )\n assert_eq(\n ddf.a.groupby(ddkey).max(), pdf.a.groupby(pdkey).max(), check_names=False\n )\n assert_eq(\n ddf.a.groupby(ddkey).count(),\n pdf.a.groupby(pdkey).count(),\n check_names=False,\n )\n assert_eq(\n ddf.a.groupby(ddkey).mean(), pdf.a.groupby(pdkey).mean(), check_names=False\n )\n assert_eq(\n ddf.a.groupby(ddkey).nunique(),\n pdf.a.groupby(pdkey).nunique(),\n check_names=False,\n )\n assert_eq(\n ddf.a.groupby(ddkey).first(),\n pdf.a.groupby(pdkey).first(),\n check_names=False,\n )\n assert_eq(\n ddf.a.groupby(ddkey).last(), pdf.a.groupby(pdkey).last(), check_names=False\n )\n assert_eq(\n ddf.a.groupby(ddkey).prod(), pdf.a.groupby(pdkey).prod(), check_names=False\n )\n\n for ddof in ddofs:\n assert_eq(ddf.a.groupby(ddkey).var(ddof), pdf.a.groupby(pdkey).var(ddof))\n assert_eq(ddf.a.groupby(ddkey).std(ddof), pdf.a.groupby(pdkey).std(ddof))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_i_in_0_4_7__test_split_apply_combine_on_series.for_i_in_0_4_7_.for_ddof_in_ddofs_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_i_in_0_4_7__test_split_apply_combine_on_series.for_i_in_0_4_7_.for_ddof_in_ddofs_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 590, "end_line": 644, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 1109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\ndef test_split_apply_combine_on_series(empty):\n # ... other code\n\n for i in [0, 4, 7]:\n assert_eq(ddf.groupby(ddf.b > i).a.sum(), pdf.groupby(pdf.b > i).a.sum())\n assert_eq(ddf.groupby(ddf.b > i).a.min(), pdf.groupby(pdf.b > i).a.min())\n assert_eq(ddf.groupby(ddf.b > i).a.max(), pdf.groupby(pdf.b > i).a.max())\n assert_eq(ddf.groupby(ddf.b > i).a.count(), pdf.groupby(pdf.b > i).a.count())\n assert_eq(ddf.groupby(ddf.b > i).a.mean(), pdf.groupby(pdf.b > i).a.mean())\n assert_eq(\n ddf.groupby(ddf.b > i).a.nunique(), pdf.groupby(pdf.b > i).a.nunique()\n )\n assert_eq(ddf.groupby(ddf.b > i).a.size(), pdf.groupby(pdf.b > i).a.size())\n assert_eq(ddf.groupby(ddf.b > i).a.first(), pdf.groupby(pdf.b > i).a.first())\n assert_eq(ddf.groupby(ddf.b > i).a.last(), pdf.groupby(pdf.b > i).a.last())\n assert_eq(ddf.groupby(ddf.b > i).a.tail(), pdf.groupby(pdf.b > i).a.tail())\n assert_eq(ddf.groupby(ddf.b > i).a.head(), pdf.groupby(pdf.b > i).a.head())\n assert_eq(ddf.groupby(ddf.b > i).a.prod(), pdf.groupby(pdf.b > i).a.prod())\n\n assert_eq(ddf.groupby(ddf.a > i).b.sum(), pdf.groupby(pdf.a > i).b.sum())\n assert_eq(ddf.groupby(ddf.a > i).b.min(), pdf.groupby(pdf.a > i).b.min())\n assert_eq(ddf.groupby(ddf.a > i).b.max(), pdf.groupby(pdf.a > i).b.max())\n assert_eq(ddf.groupby(ddf.a > i).b.count(), pdf.groupby(pdf.a > i).b.count())\n assert_eq(ddf.groupby(ddf.a > i).b.mean(), pdf.groupby(pdf.a > i).b.mean())\n assert_eq(\n ddf.groupby(ddf.a > i).b.nunique(), pdf.groupby(pdf.a > i).b.nunique()\n )\n assert_eq(ddf.groupby(ddf.b > i).b.size(), pdf.groupby(pdf.b > i).b.size())\n assert_eq(ddf.groupby(ddf.b > i).b.first(), pdf.groupby(pdf.b > i).b.first())\n assert_eq(ddf.groupby(ddf.b > i).b.last(), pdf.groupby(pdf.b > i).b.last())\n assert_eq(ddf.groupby(ddf.b > i).b.tail(), pdf.groupby(pdf.b > i).b.tail())\n assert_eq(ddf.groupby(ddf.b > i).b.head(), pdf.groupby(pdf.b > i).b.head())\n assert_eq(ddf.groupby(ddf.b > i).b.prod(), pdf.groupby(pdf.b > i).b.prod())\n\n assert_eq(ddf.groupby(ddf.b > i).sum(), pdf.groupby(pdf.b > i).sum())\n assert_eq(ddf.groupby(ddf.b > i).min(), pdf.groupby(pdf.b > i).min())\n assert_eq(ddf.groupby(ddf.b > i).max(), pdf.groupby(pdf.b > i).max())\n assert_eq(ddf.groupby(ddf.b > i).count(), pdf.groupby(pdf.b > i).count())\n assert_eq(ddf.groupby(ddf.b > i).mean(), pdf.groupby(pdf.b > i).mean())\n assert_eq(ddf.groupby(ddf.b > i).size(), pdf.groupby(pdf.b > i).size())\n assert_eq(ddf.groupby(ddf.b > i).first(), pdf.groupby(pdf.b > i).first())\n assert_eq(ddf.groupby(ddf.b > i).last(), pdf.groupby(pdf.b > i).last())\n assert_eq(ddf.groupby(ddf.b > i).prod(), pdf.groupby(pdf.b > i).prod())\n\n assert_eq(ddf.groupby(ddf.a > i).sum(), pdf.groupby(pdf.a > i).sum())\n assert_eq(ddf.groupby(ddf.a > i).min(), pdf.groupby(pdf.a > i).min())\n assert_eq(ddf.groupby(ddf.a > i).max(), pdf.groupby(pdf.a > i).max())\n assert_eq(ddf.groupby(ddf.a > i).count(), pdf.groupby(pdf.a > i).count())\n assert_eq(ddf.groupby(ddf.a > i).mean(), pdf.groupby(pdf.a > i).mean())\n assert_eq(ddf.groupby(ddf.a > i).size(), pdf.groupby(pdf.a > i).size())\n assert_eq(ddf.groupby(ddf.a > i).first(), pdf.groupby(pdf.a > i).first())\n assert_eq(ddf.groupby(ddf.a > i).last(), pdf.groupby(pdf.a > i).last())\n assert_eq(ddf.groupby(ddf.a > i).prod(), pdf.groupby(pdf.a > i).prod())\n\n for ddof in ddofs:\n assert_eq(\n ddf.groupby(ddf.b > i).std(ddof), pdf.groupby(pdf.b > i).std(ddof)\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_reduction_split_test_groupby_reduction_split.assert_call_ddf_a_groupby": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_reduction_split_test_groupby_reduction_split.assert_call_ddf_a_groupby", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 724, "end_line": 781, "span_ids": ["test_groupby_reduction_split"], "tokens": 714}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"keyword\", [\"split_every\", \"split_out\"])\ndef test_groupby_reduction_split(keyword):\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 100, \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 100}\n )\n ddf = dd.from_pandas(pdf, npartitions=15)\n\n def call(g, m, **kwargs):\n return getattr(g, m)(**kwargs)\n\n # DataFrame\n for m in AGG_FUNCS:\n # nunique is not implemented for DataFrameGroupBy\n # covariance/correlation is not a series aggregation\n if m in (\"nunique\", \"cov\", \"corr\"):\n continue\n res = call(ddf.groupby(\"b\"), m, **{keyword: 2})\n sol = call(pdf.groupby(\"b\"), m)\n assert_eq(res, sol)\n assert call(ddf.groupby(\"b\"), m)._name != res._name\n\n res = call(ddf.groupby(\"b\"), \"var\", ddof=2, **{keyword: 2})\n sol = call(pdf.groupby(\"b\"), \"var\", ddof=2)\n assert_eq(res, sol)\n assert call(ddf.groupby(\"b\"), \"var\", ddof=2)._name != res._name\n\n # Series, post select\n for m in AGG_FUNCS:\n # covariance/correlation is not a series aggregation\n if m in (\"cov\", \"corr\"):\n continue\n res = call(ddf.groupby(\"b\").a, m, **{keyword: 2})\n sol = call(pdf.groupby(\"b\").a, m)\n assert_eq(res, sol)\n assert call(ddf.groupby(\"b\").a, m)._name != res._name\n\n res = call(ddf.groupby(\"b\").a, \"var\", ddof=2, **{keyword: 2})\n sol = call(pdf.groupby(\"b\").a, \"var\", ddof=2)\n assert_eq(res, sol)\n assert call(ddf.groupby(\"b\").a, \"var\", ddof=2)._name != res._name\n\n # Series, pre select\n for m in AGG_FUNCS:\n # covariance/correlation is not a series aggregation\n if m in (\"cov\", \"corr\"):\n continue\n res = call(ddf.a.groupby(ddf.b), m, **{keyword: 2})\n sol = call(pdf.a.groupby(pdf.b), m)\n # There's a bug in pandas 0.18.0 with `pdf.a.groupby(pdf.b).count()`\n # not forwarding the series name. Skip name checks here for now.\n assert_eq(res, sol, check_names=False)\n assert call(ddf.a.groupby(ddf.b), m)._name != res._name\n\n res = call(ddf.a.groupby(ddf.b), \"var\", ddof=2, **{keyword: 2})\n sol = call(pdf.a.groupby(pdf.b), \"var\", ddof=2)\n\n assert_eq(res, sol)\n assert call(ddf.a.groupby(ddf.b), \"var\", ddof=2)._name != res._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_test_apply_or_transform_shuffle.with_pytest_warns_UserWar.assert_eq_func_grouped_pd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_test_apply_or_transform_shuffle.with_pytest_warns_UserWar.assert_eq_func_grouped_pd", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 781, "end_line": 820, "span_ids": ["test_apply_or_transform_shuffle"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouped\",\n [\n lambda df: df.groupby(\"A\"),\n lambda df: df.groupby(df[\"A\"]),\n lambda df: df.groupby(df[\"A\"] + 1),\n lambda df: df.groupby(\"A\")[\"B\"],\n # SeriesGroupBy:\n lambda df: df.groupby(\"A\")[\"B\"],\n lambda df: df.groupby(df[\"A\"])[\"B\"],\n lambda df: df.groupby(df[\"A\"] + 1)[\"B\"],\n # Series.groupby():\n lambda df: df.B.groupby(df[\"A\"]),\n lambda df: df.B.groupby(df[\"A\"] + 1),\n # DataFrameGroupBy with column slice:\n lambda df: df.groupby(\"A\")[[\"B\", \"C\"]],\n lambda df: df.groupby(df[\"A\"])[[\"B\", \"C\"]],\n lambda df: df.groupby(df[\"A\"] + 1)[[\"B\", \"C\"]],\n ],\n)\n@pytest.mark.parametrize(\n \"func\",\n [\n lambda grp: grp.apply(lambda x: x.sum()),\n lambda grp: grp.transform(lambda x: x.sum()),\n ],\n)\ndef test_apply_or_transform_shuffle(grouped, func):\n pdf = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4] * 5,\n \"B\": np.random.randn(20),\n \"C\": np.random.randn(20),\n \"D\": np.random.randn(20),\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n\n with pytest.warns(UserWarning): # meta inference\n assert_eq(func(grouped(pdf)), func(grouped(ddf)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_multilevel_test_apply_or_transform_shuffle_multilevel.with_pytest_warns_UserWar.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_apply_or_transform_shuffle_multilevel_test_apply_or_transform_shuffle_multilevel.with_pytest_warns_UserWar.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 823, "end_line": 869, "span_ids": ["test_apply_or_transform_shuffle_multilevel"], "tokens": 371}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: \"AA\",\n lambda df: [\"AA\", \"AB\"],\n lambda df: df[\"AA\"],\n lambda df: [df[\"AA\"], df[\"AB\"]],\n lambda df: df[\"AA\"] + 1,\n pytest.param(\n lambda df: [df[\"AA\"] + 1, df[\"AB\"] + 1],\n marks=pytest.mark.xfail(\"NotImplemented\"),\n ),\n ],\n)\n@pytest.mark.parametrize(\n \"func\",\n [\n lambda grouped: grouped.apply(lambda x: x.sum()),\n lambda grouped: grouped.transform(lambda x: x.sum()),\n ],\n)\ndef test_apply_or_transform_shuffle_multilevel(grouper, func):\n pdf = pd.DataFrame(\n {\n \"AB\": [1, 2, 3, 4] * 5,\n \"AA\": [1, 2, 3, 4] * 5,\n \"B\": np.random.randn(20),\n \"C\": np.random.randn(20),\n \"D\": np.random.randn(20),\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n\n with pytest.warns(UserWarning):\n # DataFrameGroupBy\n assert_eq(func(ddf.groupby(grouper(ddf))), func(pdf.groupby(grouper(pdf))))\n\n # SeriesGroupBy\n assert_eq(\n func(ddf.groupby(grouper(ddf))[\"B\"]), func(pdf.groupby(grouper(pdf))[\"B\"])\n )\n\n # DataFrameGroupBy with column slice\n assert_eq(\n func(ddf.groupby(grouper(ddf))[[\"B\", \"C\"]]),\n func(pdf.groupby(grouper(pdf))[[\"B\", \"C\"]]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_numeric_column_names_test_numeric_column_names.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_numeric_column_names_test_numeric_column_names.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 872, "end_line": 882, "span_ids": ["test_numeric_column_names"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_numeric_column_names():\n # df.groupby(0)[df.columns] fails if all columns are numbers (pandas bug)\n # This ensures that we cast all column iterables to list beforehand.\n df = pd.DataFrame({0: [0, 1, 0, 1], 1: [1, 2, 3, 4], 2: [0, 1, 0, 1]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.groupby(0).sum(), df.groupby(0).sum())\n assert_eq(ddf.groupby([0, 2]).sum(), df.groupby([0, 2]).sum())\n assert_eq(\n ddf.groupby(0).apply(lambda x: x, meta={0: int, 1: int, 2: int}),\n df.groupby(0).apply(lambda x: x),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multiprocessing_test_groupby_multiprocessing.with_dask_config_set_sche.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multiprocessing_test_groupby_multiprocessing.with_dask_config_set_sche.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 906, "end_line": 913, "span_ids": ["test_groupby_multiprocessing"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_multiprocessing():\n df = pd.DataFrame({\"A\": [1, 2, 3, 4, 5], \"B\": [\"1\", \"1\", \"a\", \"a\", \"a\"]})\n ddf = dd.from_pandas(df, npartitions=3)\n with dask.config.set(scheduler=\"processes\"):\n assert_eq(\n ddf.groupby(\"B\").apply(lambda x: x, meta={\"A\": int, \"B\": object}),\n df.groupby(\"B\").apply(lambda x: x),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__single_element_groups_test_aggregate__single_element_groups.assert_eq_expected_ddf_g": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate__single_element_groups_test_aggregate__single_element_groups.assert_eq_expected_ddf_g", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1032, "end_line": 1051, "span_ids": ["test_aggregate__single_element_groups"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_aggregate__single_element_groups(agg_func):\n spec = agg_func\n\n # nunique/cov is not supported in specs\n if spec in (\"nunique\", \"cov\", \"corr\"):\n return\n\n pdf = pd.DataFrame(\n {\"a\": [1, 1, 3, 3], \"b\": [4, 4, 16, 16], \"c\": [1, 1, 4, 4], \"d\": [1, 1, 3, 3]},\n columns=[\"c\", \"b\", \"a\", \"d\"],\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = pdf.groupby([\"a\", \"d\"]).agg(spec)\n\n # NOTE: for std the result is not recast ot the original dtype\n if spec in {\"mean\", \"var\"}:\n expected = expected.astype(float)\n\n assert_eq(expected, ddf.groupby([\"a\", \"d\"]).agg(spec))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate_build_agg_args__reuse_of_intermediates_test_aggregate_build_agg_args__reuse_of_intermediates.assert_len_with_mean_fina": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate_build_agg_args__reuse_of_intermediates_test_aggregate_build_agg_args__reuse_of_intermediates.assert_len_with_mean_fina", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1054, "end_line": 1078, "span_ids": ["test_aggregate_build_agg_args__reuse_of_intermediates"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_aggregate_build_agg_args__reuse_of_intermediates():\n \"\"\"Aggregate reuses intermediates. For example, with sum, count, and mean\n the sums and counts are only calculated once across the graph and reused to\n compute the mean.\n \"\"\"\n from dask.dataframe.groupby import _build_agg_args\n\n no_mean_spec = [(\"foo\", \"sum\", \"input\"), (\"bar\", \"count\", \"input\")]\n\n with_mean_spec = [\n (\"foo\", \"sum\", \"input\"),\n (\"bar\", \"count\", \"input\"),\n (\"baz\", \"mean\", \"input\"),\n ]\n\n no_mean_chunks, no_mean_aggs, no_mean_finalizers = _build_agg_args(no_mean_spec)\n with_mean_chunks, with_mean_aggs, with_mean_finalizers = _build_agg_args(\n with_mean_spec\n )\n\n assert len(no_mean_chunks) == len(with_mean_chunks)\n assert len(no_mean_aggs) == len(with_mean_aggs)\n\n assert len(no_mean_finalizers) == len(no_mean_spec)\n assert len(with_mean_finalizers) == len(with_mean_spec)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_aggregations_multilevel_test_dataframe_aggregations_multilevel.if_agg_func_nunique_.if_agg_func_in_cov_c.else_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_dataframe_aggregations_multilevel_test_dataframe_aggregations_multilevel.if_agg_func_nunique_.if_agg_func_in_cov_c.else_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1149, "end_line": 1203, "span_ids": ["test_dataframe_aggregations_multilevel"], "tokens": 582}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: [\"a\"],\n lambda df: [\"a\", \"b\"],\n lambda df: df[\"a\"],\n lambda df: [df[\"a\"], df[\"b\"]],\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n ],\n)\ndef test_dataframe_aggregations_multilevel(grouper, agg_func):\n def call(g, m, **kwargs):\n return getattr(g, m)(**kwargs)\n\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"d\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\", \"d\"],\n )\n\n ddf = dd.from_pandas(pdf, npartitions=10)\n\n # covariance only works with N+1 columns\n if agg_func not in (\"cov\", \"corr\"):\n assert_eq(\n call(pdf.groupby(grouper(pdf))[\"c\"], agg_func),\n call(ddf.groupby(grouper(ddf))[\"c\"], agg_func, split_every=2),\n )\n\n # not supported by pandas\n if agg_func != \"nunique\":\n assert_eq(\n call(pdf.groupby(grouper(pdf))[[\"c\", \"d\"]], agg_func),\n call(ddf.groupby(grouper(ddf))[[\"c\", \"d\"]], agg_func, split_every=2),\n )\n\n if agg_func in (\"cov\", \"corr\"):\n # there are sorting issues between pandas and chunk cov w/dask\n df = call(pdf.groupby(grouper(pdf)), agg_func).sort_index()\n cols = sorted(list(df.columns))\n df = df[cols]\n dddf = call(ddf.groupby(grouper(ddf)), agg_func, split_every=2).compute()\n dddf = dddf.sort_index()\n cols = sorted(list(dddf.columns))\n dddf = dddf[cols]\n assert_eq(df, dddf)\n else:\n assert_eq(\n call(pdf.groupby(grouper(pdf)), agg_func),\n call(ddf.groupby(grouper(ddf)), agg_func, split_every=2),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_aggregations_multilevel_test_series_aggregations_multilevel.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_aggregations_multilevel_test_series_aggregations_multilevel.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1206, "end_line": 1244, "span_ids": ["test_series_aggregations_multilevel"], "tokens": 377}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: df[\"a\"],\n lambda df: [df[\"a\"], df[\"b\"]],\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n ],\n)\ndef test_series_aggregations_multilevel(grouper, agg_func):\n \"\"\"\n similar to ``test_dataframe_aggregations_multilevel``, but series do not\n support all groupby args.\n \"\"\"\n\n def call(g, m, **kwargs):\n return getattr(g, m)(**kwargs)\n\n # covariance/correlation is not a series aggregation\n if agg_func in (\"cov\", \"corr\"):\n return\n\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\"],\n )\n\n ddf = dd.from_pandas(pdf, npartitions=10)\n\n assert_eq(\n call(pdf[\"c\"].groupby(grouper(pdf)), agg_func),\n call(ddf[\"c\"].groupby(grouper(ddf)), agg_func, split_every=2),\n # for pandas ~ 0.18, the name is not not properly propagated for\n # the mean aggregation\n check_names=(agg_func not in {\"mean\", \"nunique\"}),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_meta_content_test_groupby_meta_content.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_meta_content_test_groupby_meta_content.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1191, "end_line": 1232, "span_ids": ["test_groupby_meta_content"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: df[\"a\"],\n lambda df: df[\"a\"] > 2,\n lambda df: [df[\"a\"], df[\"b\"]],\n lambda df: [df[\"a\"] > 2],\n pytest.param(\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n marks=pytest.mark.xfail(\n not PANDAS_GT_150,\n reason=\"index dtype does not coincide: boolean != empty\",\n ),\n ),\n ],\n)\n@pytest.mark.parametrize(\n \"group_and_slice\",\n [\n lambda df, grouper: df.groupby(grouper(df)),\n lambda df, grouper: df[\"c\"].groupby(grouper(df)),\n lambda df, grouper: df.groupby(grouper(df))[\"c\"],\n ],\n)\ndef test_groupby_meta_content(group_and_slice, grouper):\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\"],\n )\n\n ddf = dd.from_pandas(pdf, npartitions=10)\n\n expected = group_and_slice(pdf, grouper).first().head(0)\n meta = group_and_slice(ddf, grouper)._meta.first()\n meta_nonempty = group_and_slice(ddf, grouper)._meta_nonempty.first().head(0)\n\n assert_eq(expected, meta)\n assert_eq(expected, meta_nonempty)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_non_aligned_index_test_groupy_non_aligned_index.None_4.ddf3_groupby_ddf7_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_non_aligned_index_test_groupy_non_aligned_index.None_4.ddf3_groupby_ddf7_a_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1290, "end_line": 1321, "span_ids": ["test_groupy_non_aligned_index"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupy_non_aligned_index():\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\"],\n )\n\n ddf3 = dd.from_pandas(pdf, npartitions=3)\n ddf7 = dd.from_pandas(pdf, npartitions=7)\n\n # working examples\n ddf3.groupby([\"a\", \"b\"])\n ddf3.groupby([ddf3[\"a\"], ddf3[\"b\"]])\n\n # misaligned divisions\n with pytest.raises(NotImplementedError):\n ddf3.groupby(ddf7[\"a\"])\n\n with pytest.raises(NotImplementedError):\n ddf3.groupby([ddf7[\"a\"], ddf7[\"b\"]])\n\n with pytest.raises(NotImplementedError):\n ddf3.groupby([ddf7[\"a\"], ddf3[\"b\"]])\n\n with pytest.raises(NotImplementedError):\n ddf3.groupby([ddf3[\"a\"], ddf7[\"b\"]])\n\n with pytest.raises(NotImplementedError):\n ddf3.groupby([ddf7[\"a\"], \"b\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_series_wrong_grouper_test_groupy_series_wrong_grouper.None_3.s_groupby_s_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupy_series_wrong_grouper_test_groupy_series_wrong_grouper.None_3.s_groupby_s_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1324, "end_line": 1352, "span_ids": ["test_groupy_series_wrong_grouper"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupy_series_wrong_grouper():\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 10,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 10,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 10,\n },\n columns=[\"c\", \"b\", \"a\"],\n )\n\n df = dd.from_pandas(df, npartitions=3)\n s = df[\"a\"]\n\n # working index values\n s.groupby(s)\n s.groupby([s, s])\n\n # non working index values\n with pytest.raises(KeyError):\n s.groupby(\"foo\")\n\n with pytest.raises(KeyError):\n s.groupby([s, \"foo\"])\n\n with pytest.raises(ValueError):\n s.groupby(df)\n\n with pytest.raises(ValueError):\n s.groupby([s, df])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_hash_groupby_aggregate_test_hash_groupby_aggregate.assert_eq_result_df_grou": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_hash_groupby_aggregate_test_hash_groupby_aggregate.assert_eq_result_df_grou", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1355, "end_line": 1372, "span_ids": ["test_hash_groupby_aggregate"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 4, 20])\n@pytest.mark.parametrize(\"split_every\", [2, 5])\n@pytest.mark.parametrize(\"split_out\", [None, 1, 5, 20])\ndef test_hash_groupby_aggregate(npartitions, split_every, split_out):\n df = pd.DataFrame({\"x\": np.arange(100) % 10, \"y\": np.ones(100)})\n ddf = dd.from_pandas(df, npartitions)\n\n result = ddf.groupby(\"x\").y.var(split_every=split_every, split_out=split_out)\n\n dsk = result.__dask_optimize__(result.dask, result.__dask_keys__())\n from dask.core import get_deps\n\n dependencies, dependents = get_deps(dsk)\n\n assert result.npartitions == (split_out or 1)\n assert len([k for k, v in dependencies.items() if not v]) == npartitions\n\n assert_eq(result, df.groupby(\"x\").y.var())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_out_multi_column_groupby_test_split_out_multi_column_groupby.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_out_multi_column_groupby_test_split_out_multi_column_groupby.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1375, "end_line": 1385, "span_ids": ["test_split_out_multi_column_groupby"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_split_out_multi_column_groupby():\n df = pd.DataFrame(\n {\"x\": np.arange(100) % 10, \"y\": np.ones(100), \"z\": [1, 2, 3, 4, 5] * 20}\n )\n\n ddf = dd.from_pandas(df, npartitions=10)\n\n result = ddf.groupby([\"x\", \"y\"]).z.mean(split_out=4)\n expected = df.groupby([\"x\", \"y\"]).z.mean()\n\n assert_eq(result, expected, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_split_out_num_test_groupby_split_out_num.with_pytest_raises_TypeEr.ddf_groupby_A_split_ou": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_split_out_num_test_groupby_split_out_num.with_pytest_raises_TypeEr.ddf_groupby_A_split_ou", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1293, "end_line": 1304, "span_ids": ["test_groupby_split_out_num"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_split_out_num():\n # GH 1841\n ddf = dd.from_pandas(\n pd.DataFrame({\"A\": [1, 1, 2, 2], \"B\": [1, 2, 3, 4]}), npartitions=2\n )\n assert ddf.groupby(\"A\").sum().npartitions == 1\n assert ddf.groupby(\"A\").sum(split_out=2).npartitions == 2\n assert ddf.groupby(\"A\").sum(split_out=3).npartitions == 3\n\n with pytest.raises(TypeError):\n # groupby doesn't accept split_out\n ddf.groupby(\"A\", split_out=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_not_supported_test_groupby_numeric_column.assert_eq_ddf_groupby_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_not_supported_test_groupby_numeric_column.assert_eq_ddf_groupby_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1402, "end_line": 1420, "span_ids": ["test_groupby_numeric_column", "test_groupby_not_supported"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_not_supported():\n ddf = dd.from_pandas(\n pd.DataFrame({\"A\": [1, 1, 2, 2], \"B\": [1, 2, 3, 4]}), npartitions=2\n )\n with pytest.raises(TypeError):\n ddf.groupby(\"A\", axis=1)\n with pytest.raises(TypeError):\n ddf.groupby(\"A\", level=1)\n with pytest.raises(TypeError):\n ddf.groupby(\"A\", as_index=False)\n with pytest.raises(TypeError):\n ddf.groupby(\"A\", squeeze=True)\n\n\ndef test_groupby_numeric_column():\n df = pd.DataFrame({\"A\": [\"foo\", \"foo\", \"bar\"], 0: [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(ddf.groupby(ddf.A)[0].sum(), df.groupby(df.A)[0].sum())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_test_cumulative.assert_eq_getattr_g_func": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_test_cumulative.assert_eq_getattr_g_func", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1328, "end_line": 1345, "span_ids": ["test_cumulative"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"sel\", [\"c\", \"d\", [\"c\", \"d\"]])\n@pytest.mark.parametrize(\"key\", [\"a\", [\"a\", \"b\"]])\n@pytest.mark.parametrize(\"func\", [\"cumsum\", \"cumprod\", \"cumcount\"])\ndef test_cumulative(func, key, sel):\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 6,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 6,\n \"c\": np.random.randn(54),\n \"d\": np.random.randn(54),\n },\n columns=[\"a\", \"b\", \"c\", \"d\"],\n )\n df.iloc[[-18, -12, -6], -1] = np.nan\n ddf = dd.from_pandas(df, npartitions=10)\n\n g, dg = (d.groupby(key)[sel] for d in (df, ddf))\n assert_eq(getattr(g, func)(), getattr(dg, func)())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_axis1_test_cumulative_axis1.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_cumulative_axis1_test_cumulative_axis1.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1443, "end_line": 1456, "span_ids": ["test_cumulative_axis1"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"cumsum\", \"cumprod\"])\ndef test_cumulative_axis1(func):\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 6, 4, 4, 6, 4, 3, 7] * 2,\n \"b\": np.random.randn(18),\n \"c\": np.random.randn(18),\n }\n )\n df.iloc[-6, -1] = np.nan\n ddf = dd.from_pandas(df, npartitions=4)\n assert_eq(\n getattr(df.groupby(\"a\"), func)(axis=1), getattr(ddf.groupby(\"a\"), func)(axis=1)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unaligned_index_test_groupby_unaligned_index.for_res_sol_in_good_.assert_eq_res_sol_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unaligned_index_test_groupby_unaligned_index.for_res_sol_in_good_.assert_eq_res_sol_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1459, "end_line": 1499, "span_ids": ["test_groupby_unaligned_index"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_unaligned_index():\n df = pd.DataFrame(\n {\n \"a\": np.random.randint(0, 10, 50),\n \"b\": np.random.randn(50),\n \"c\": np.random.randn(50),\n }\n )\n ddf = dd.from_pandas(df, npartitions=5)\n filtered = df[df.b < 0.5]\n dfiltered = ddf[ddf.b < 0.5]\n\n ddf_group = dfiltered.groupby(ddf.a)\n ds_group = dfiltered.b.groupby(ddf.a)\n\n bad = [\n ddf_group.mean(),\n ddf_group.var(),\n ddf_group.b.nunique(),\n ddf_group.get_group(0),\n ds_group.mean(),\n ds_group.var(),\n ds_group.nunique(),\n ds_group.get_group(0),\n ]\n\n for obj in bad:\n with pytest.raises(ValueError):\n obj.compute()\n\n def add1(x):\n return x + 1\n\n df_group = filtered.groupby(df.a)\n good = [\n (ddf_group.apply(add1, meta=ddf), df_group.apply(add1)),\n (ddf_group.b.apply(add1, meta=ddf.b), df_group.b.apply(add1)),\n ]\n\n for (res, sol) in good:\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_string_label_test_groupby_string_label.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_string_label_test_groupby_string_label.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1502, "end_line": 1512, "span_ids": ["test_groupby_string_label"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_string_label():\n df = pd.DataFrame({\"foo\": [1, 1, 4], \"B\": [2, 3, 4], \"C\": [5, 6, 7]})\n ddf = dd.from_pandas(pd.DataFrame(df), npartitions=1)\n ddf_group = ddf.groupby(\"foo\")\n result = ddf_group.get_group(1).compute()\n\n expected = pd.DataFrame(\n {\"foo\": [1, 1], \"B\": [2, 3], \"C\": [5, 6]}, index=pd.Index([0, 1])\n )\n\n tm.assert_frame_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dataframe_cum_caching_test_groupby_dataframe_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dataframe_cum_caching_test_groupby_dataframe_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1515, "end_line": 1540, "span_ids": ["test_groupby_dataframe_cum_caching"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_dataframe_cum_caching():\n \"\"\"Test caching behavior of cumulative operations on grouped dataframes.\n\n Relates to #3756.\n \"\"\"\n df = pd.DataFrame(\n dict(a=list(\"aabbcc\")), index=pd.date_range(start=\"20100101\", periods=6)\n )\n df[\"ones\"] = 1\n df[\"twos\"] = 2\n\n ddf = dd.from_pandas(df, npartitions=3)\n\n ops = [\"cumsum\", \"cumprod\"]\n\n for op in ops:\n ddf0 = getattr(ddf.groupby([\"a\"]), op)()\n ddf1 = ddf.rename(columns={\"ones\": \"foo\", \"twos\": \"bar\"})\n ddf1 = getattr(ddf1.groupby([\"a\"]), op)()\n\n # _a and _b dataframe should be equal\n res0_a, res1_a = dask.compute(ddf0, ddf1)\n res0_b, res1_b = ddf0.compute(), ddf1.compute()\n\n assert res0_a.equals(res0_b)\n assert res1_a.equals(res1_b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_series_cum_caching_test_groupby_series_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_series_cum_caching_test_groupby_series_cum_caching.for_op_in_ops_.assert_res1_a_equals_res1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1543, "end_line": 1565, "span_ids": ["test_groupby_series_cum_caching"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_series_cum_caching():\n \"\"\"Test caching behavior of cumulative operations on grouped Series\n\n Relates to #3755\n \"\"\"\n df = pd.DataFrame(\n dict(a=list(\"aabbcc\")), index=pd.date_range(start=\"20100101\", periods=6)\n )\n df[\"ones\"] = 1\n df[\"twos\"] = 2\n\n ops = [\"cumsum\", \"cumprod\"]\n for op in ops:\n ddf = dd.from_pandas(df, npartitions=3)\n dcum = ddf.groupby([\"a\"])\n res0_a, res1_a = dask.compute(\n getattr(dcum[\"ones\"], op)(), getattr(dcum[\"twos\"], op)()\n )\n cum = df.groupby([\"a\"])\n res0_b, res1_b = (getattr(cum[\"ones\"], op)(), getattr(cum[\"twos\"], op)())\n\n assert res0_a.equals(res0_b)\n assert res1_a.equals(res1_b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_slice_agg_reduces_test_groupby_agg_grouper_single.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_slice_agg_reduces_test_groupby_agg_grouper_single.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1568, "end_line": 1583, "span_ids": ["test_groupby_agg_grouper_single", "test_groupby_slice_agg_reduces"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_slice_agg_reduces():\n d = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [2, 3, 4, 5]})\n a = dd.from_pandas(d, npartitions=2)\n result = a.groupby(\"a\")[\"b\"].agg([\"min\", \"max\"])\n expected = d.groupby(\"a\")[\"b\"].agg([\"min\", \"max\"])\n assert_eq(result, expected)\n\n\ndef test_groupby_agg_grouper_single():\n # https://github.com/dask/dask/issues/2255\n d = pd.DataFrame({\"a\": [1, 2, 3, 4]})\n a = dd.from_pandas(d, npartitions=2)\n\n result = a.groupby(\"a\")[\"a\"].agg([\"min\", \"max\"])\n expected = d.groupby(\"a\")[\"a\"].agg([\"min\", \"max\"])\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_grouper_multiple_test_groupby_agg_grouper_multiple.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_grouper_multiple_test_groupby_agg_grouper_multiple.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1586, "end_line": 1594, "span_ids": ["test_groupby_agg_grouper_multiple"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"slice_\", [\"a\", [\"a\"], [\"a\", \"b\"], [\"b\"]])\ndef test_groupby_agg_grouper_multiple(slice_):\n # https://github.com/dask/dask/issues/2255\n d = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1, 2, 3, 4]})\n a = dd.from_pandas(d, npartitions=2)\n\n result = a.groupby(\"a\")[slice_].agg([\"min\", \"max\"])\n expected = d.groupby(\"a\")[slice_].agg([\"min\", \"max\"])\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_agg_funcs_test_groupby_column_and_index_agg_funcs.None_5.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_agg_funcs_test_groupby_column_and_index_agg_funcs.None_5.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1597, "end_line": 1687, "span_ids": ["test_groupby_column_and_index_agg_funcs"], "tokens": 682}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"agg_func\",\n [\n \"cumprod\",\n \"cumcount\",\n \"cumsum\",\n \"var\",\n \"sum\",\n \"mean\",\n \"count\",\n \"size\",\n \"std\",\n \"min\",\n \"max\",\n \"first\",\n \"last\",\n \"prod\",\n ],\n)\ndef test_groupby_column_and_index_agg_funcs(agg_func):\n def call(g, m, **kwargs):\n return getattr(g, m)(**kwargs)\n\n df = pd.DataFrame(\n {\n \"idx\": [1, 1, 1, 2, 2, 2],\n \"a\": [1, 2, 1, 2, 1, 2],\n \"b\": np.arange(6),\n \"c\": [1, 1, 1, 2, 2, 2],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(df, npartitions=df.index.nunique())\n ddf_no_divs = dd.from_pandas(df, npartitions=df.index.nunique(), sort=False)\n\n # Index and then column\n\n # Compute expected result\n expected = call(df.groupby([\"idx\", \"a\"]), agg_func)\n if agg_func in {\"mean\", \"var\"}:\n expected = expected.astype(float)\n\n result = call(ddf.groupby([\"idx\", \"a\"]), agg_func)\n assert_eq(expected, result)\n\n result = call(ddf_no_divs.groupby([\"idx\", \"a\"]), agg_func)\n assert_eq(expected, result)\n\n # apply-combine-apply aggregation functions\n aca_agg = {\"sum\", \"mean\", \"var\", \"size\", \"std\", \"count\", \"first\", \"last\", \"prod\"}\n\n # Test aggregate strings\n if agg_func in aca_agg:\n result = ddf_no_divs.groupby([\"idx\", \"a\"]).agg(agg_func)\n assert_eq(expected, result)\n\n # Column and then index\n\n # Compute expected result\n expected = call(df.groupby([\"a\", \"idx\"]), agg_func)\n if agg_func in {\"mean\", \"var\"}:\n expected = expected.astype(float)\n\n result = call(ddf.groupby([\"a\", \"idx\"]), agg_func)\n assert_eq(expected, result)\n\n result = call(ddf_no_divs.groupby([\"a\", \"idx\"]), agg_func)\n assert_eq(expected, result)\n\n # Test aggregate strings\n if agg_func in aca_agg:\n result = ddf_no_divs.groupby([\"a\", \"idx\"]).agg(agg_func)\n assert_eq(expected, result)\n\n # Index only\n\n # Compute expected result\n expected = call(df.groupby(\"idx\"), agg_func)\n if agg_func in {\"mean\", \"var\"}:\n expected = expected.astype(float)\n\n result = call(ddf.groupby(\"idx\"), agg_func)\n assert_eq(expected, result)\n\n result = call(ddf_no_divs.groupby(\"idx\"), agg_func)\n assert_eq(expected, result)\n\n # Test aggregate strings\n if agg_func in aca_agg:\n result = ddf_no_divs.groupby(\"idx\").agg(agg_func)\n assert_eq(expected, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_custom_mean_test_dataframe_groupby_agg_custom_sum.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_custom_mean_test_dataframe_groupby_agg_custom_sum.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1730, "end_line": 1756, "span_ids": ["impl:3", "test_dataframe_groupby_agg_custom_sum"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "custom_mean = dd.Aggregation(\n \"mean\",\n lambda s: (s.count(), s.sum()),\n lambda s0, s1: (s0.sum(), s1.sum()),\n lambda s0, s1: s1 / s0,\n)\n\ncustom_sum = dd.Aggregation(\"sum\", lambda s: s.sum(), lambda s0: s0.sum())\n\n\n@pytest.mark.parametrize(\n \"pandas_spec, dask_spec, check_dtype\",\n [\n ({\"b\": \"mean\"}, {\"b\": custom_mean}, False),\n ({\"b\": \"sum\"}, {\"b\": custom_sum}, True),\n ([\"mean\", \"sum\"], [custom_mean, custom_sum], False),\n ({\"b\": [\"mean\", \"sum\"]}, {\"b\": [custom_mean, custom_sum]}, False),\n ],\n)\ndef test_dataframe_groupby_agg_custom_sum(pandas_spec, dask_spec, check_dtype):\n df = pd.DataFrame({\"g\": [0, 0, 1] * 3, \"b\": [1, 2, 3] * 3})\n ddf = dd.from_pandas(df, npartitions=2)\n\n expected = df.groupby(\"g\").aggregate(pandas_spec)\n result = ddf.groupby(\"g\").aggregate(dask_spec)\n\n assert_eq(result, expected, check_dtype=check_dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_agg_custom_mean_test_series_groupby_agg_custom_mean.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_agg_custom_mean_test_series_groupby_agg_custom_mean.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1760, "end_line": 1775, "span_ids": ["test_series_groupby_agg_custom_mean"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"pandas_spec, dask_spec\",\n [\n (\"mean\", custom_mean),\n ([\"mean\"], [custom_mean]),\n ([\"mean\", \"sum\"], [custom_mean, custom_sum]),\n ],\n)\ndef test_series_groupby_agg_custom_mean(pandas_spec, dask_spec):\n d = pd.DataFrame({\"g\": [0, 0, 1] * 3, \"b\": [1, 2, 3] * 3})\n a = dd.from_pandas(d, npartitions=2)\n\n expected = d[\"b\"].groupby(d[\"g\"]).aggregate(pandas_spec)\n result = a[\"b\"].groupby(a[\"g\"]).aggregate(dask_spec)\n\n assert_eq(result, expected, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_same_column_test_groupby_agg_custom__name_clash_with_internal_same_column.with_pytest_raises_ValueE.a_groupby_g_aggregate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_same_column_test_groupby_agg_custom__name_clash_with_internal_same_column.with_pytest_raises_ValueE.a_groupby_g_aggregate_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1778, "end_line": 1786, "span_ids": ["test_groupby_agg_custom__name_clash_with_internal_same_column"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_agg_custom__name_clash_with_internal_same_column():\n \"\"\"for a single input column only unique names are allowed\"\"\"\n d = pd.DataFrame({\"g\": [0, 0, 1] * 3, \"b\": [1, 2, 3] * 3})\n a = dd.from_pandas(d, npartitions=2)\n\n agg_func = dd.Aggregation(\"sum\", lambda s: s.sum(), lambda s0: s0.sum())\n\n with pytest.raises(ValueError):\n a.groupby(\"g\").aggregate({\"b\": [agg_func, \"sum\"]})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_different_column_test_groupby_agg_custom__name_clash_with_internal_different_column.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__name_clash_with_internal_different_column_test_groupby_agg_custom__name_clash_with_internal_different_column.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1789, "end_line": 1807, "span_ids": ["test_groupby_agg_custom__name_clash_with_internal_different_column"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_agg_custom__name_clash_with_internal_different_column():\n \"\"\"custom aggregation functions can share the name of a builtin function\"\"\"\n d = pd.DataFrame({\"g\": [0, 0, 1] * 3, \"b\": [1, 2, 3] * 3, \"c\": [4, 5, 6] * 3})\n a = dd.from_pandas(d, npartitions=2)\n\n # NOTE: this function is purposefully misnamed\n agg_func = dd.Aggregation(\n \"sum\",\n lambda s: (s.count(), s.sum()),\n lambda s0, s1: (s0.sum(), s1.sum()),\n lambda s0, s1: s1 / s0,\n )\n\n # NOTE: the name of agg-func is suppressed in the output,\n # since only a single agg func per column was specified\n result = a.groupby(\"g\").aggregate({\"b\": agg_func, \"c\": \"sum\"})\n expected = d.groupby(\"g\").aggregate({\"b\": \"mean\", \"c\": \"sum\"})\n\n assert_eq(result, expected, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__mode_test_groupby_agg_custom__mode.assert_eq_actual_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_agg_custom__mode_test_groupby_agg_custom__mode.assert_eq_actual_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1810, "end_line": 1846, "span_ids": ["test_groupby_agg_custom__mode"], "tokens": 345}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_agg_custom__mode():\n # mode function passing intermediates as pure python objects around. to protect\n # results from pandas in apply use return results as single-item lists\n def agg_mode(s):\n def impl(s):\n (res,) = s.iloc[0]\n\n for (i,) in s.iloc[1:]:\n res = res.add(i, fill_value=0)\n\n return [res]\n\n return s.apply(impl)\n\n agg_func = dd.Aggregation(\n \"custom_mode\",\n lambda s: s.apply(lambda s: [s.value_counts()]),\n agg_mode,\n lambda s: s.map(lambda i: i[0].idxmax()),\n )\n\n d = pd.DataFrame(\n {\n \"g0\": [0, 0, 0, 1, 1] * 3,\n \"g1\": [0, 0, 0, 1, 1] * 3,\n \"cc\": [4, 5, 4, 6, 6] * 3,\n }\n )\n a = dd.from_pandas(d, npartitions=5)\n\n actual = a[\"cc\"].groupby([a[\"g0\"], a[\"g1\"]]).agg(agg_func)\n\n # cheat to get the correct index\n expected = pd.DataFrame({\"g0\": [0, 1], \"g1\": [0, 1], \"cc\": [4, 6]})\n expected = expected[\"cc\"].groupby([expected[\"g0\"], expected[\"g1\"]]).agg(\"sum\")\n\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_select_column_agg_test_groupby_select_column_agg.assert_eq_actual_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_select_column_agg_test_groupby_select_column_agg.assert_eq_actual_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1849, "end_line": 1860, "span_ids": ["test_groupby_select_column_agg"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"var\", list])\ndef test_groupby_select_column_agg(func):\n pdf = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 1, 2, 3, 1, 2, 4],\n \"B\": [-0.776, -0.4, -0.873, 0.054, 1.419, -0.948, -0.967, -1.714, -0.666],\n }\n )\n ddf = dd.from_pandas(pdf, npartitions=4)\n actual = ddf.groupby(\"A\")[\"B\"].agg(func)\n expected = pdf.groupby(\"A\")[\"B\"].agg(func)\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_object_dtype_test_std_object_dtype.assert_eq_func_df_func_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_object_dtype_test_std_object_dtype.assert_eq_func_df_func_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1769, "end_line": 1784, "span_ids": ["test_std_object_dtype"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: x.std(numeric_only=True),\n lambda x: x.groupby(\"x\").std(),\n lambda x: x.groupby(\"x\").var(),\n lambda x: x.groupby(\"x\").mean(),\n lambda x: x.groupby(\"x\").sum(),\n lambda x: x.groupby(\"x\").z.std(),\n ],\n)\ndef test_std_object_dtype(func):\n df = pd.DataFrame({\"x\": [1, 2, 1], \"y\": [\"a\", \"b\", \"c\"], \"z\": [11.0, 22.0, 33.0]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(func(df), func(ddf))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_columns_int_test_timeseries.assert_eq_df_groupby_nam": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_std_columns_int_test_timeseries.assert_eq_df_groupby_nam", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1880, "end_line": 1892, "span_ids": ["test_timeseries", "test_std_columns_int"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_std_columns_int():\n # Make sure std() works when index_by is a df with integer column names\n # Non regression test for issue #3560\n\n df = pd.DataFrame({0: [5], 1: [5]})\n ddf = dd.from_pandas(df, npartitions=2)\n by = dask.array.from_array([0, 1]).to_dask_dataframe()\n ddf.groupby(by).std()\n\n\ndef test_timeseries():\n df = dask.datasets.timeseries().partitions[:2]\n assert_eq(df.groupby(\"name\").std(), df.groupby(\"name\").std())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_with_min_count_test_with_min_count.for_df_ddf_in_zip_dfs_d.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_with_min_count_test_with_min_count.for_df_ddf_in_zip_dfs_d.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1897, "end_line": 1927, "span_ids": ["test_with_min_count"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"min_count\", [0, 1, 2, 3])\ndef test_with_min_count(min_count):\n dfs = [\n pd.DataFrame(\n {\n \"group\": [\"A\", \"A\", \"B\"],\n \"val1\": [np.nan, 2, 3],\n \"val2\": [np.nan, 5, 6],\n \"val3\": [5, 4, 9],\n }\n ),\n pd.DataFrame(\n {\n \"group\": [\"A\", \"A\", \"B\"],\n \"val1\": [2, np.nan, np.nan],\n \"val2\": [np.nan, 5, 6],\n \"val3\": [5, 4, 9],\n }\n ),\n ]\n ddfs = [dd.from_pandas(df, npartitions=4) for df in dfs]\n\n for df, ddf in zip(dfs, ddfs):\n assert_eq(\n df.groupby(\"group\").sum(min_count=min_count),\n ddf.groupby(\"group\").sum(min_count=min_count),\n )\n assert_eq(\n df.groupby(\"group\").prod(min_count=min_count),\n ddf.groupby(\"group\").prod(min_count=min_count),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_group_keys_test_groupby_group_keys.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_group_keys_test_groupby_group_keys.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1933, "end_line": 1943, "span_ids": ["test_groupby_group_keys"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_group_keys():\n df = pd.DataFrame({\"a\": [1, 2, 2, 3], \"b\": [2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"a\")\n pdf = df.set_index(\"a\")\n\n func = lambda g: g.copy()\n expected = pdf.groupby(\"a\").apply(func)\n assert_eq(expected, ddf.groupby(\"a\").apply(func, meta=expected))\n\n expected = pdf.groupby(\"a\", group_keys=False).apply(func)\n assert_eq(expected, ddf.groupby(\"a\", group_keys=False).apply(func, meta=expected))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_cov_test_groupby_cov.if_isinstance_columns_np.else_.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_cov_test_groupby_cov.if_isinstance_columns_np.else_.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1947, "end_line": 1971, "span_ids": ["test_groupby_cov"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"columns\",\n [[\"a\", \"b\", \"c\"], np.array([1.0, 2.0, 3.0]), [\"1\", \"2\", \"3\"], [\"\", \"a\", \"b\"]],\n)\ndef test_groupby_cov(columns):\n rows = 20\n cols = 3\n data = np.random.randn(rows, cols)\n df = pd.DataFrame(data, columns=columns)\n df[\"key\"] = [0] * 10 + [1] * 5 + [2] * 5\n ddf = dd.from_pandas(df, npartitions=3)\n\n expected = df.groupby(\"key\").cov()\n result = ddf.groupby(\"key\").cov()\n # when using numerical values for columns\n # the column mapping and stacking leads to a float typed\n # MultiIndex. Pandas will normally create a object typed\n # MultiIndex\n if isinstance(columns, np.ndarray):\n result = result.compute()\n # don't bother checking index -- MultiIndex levels are in a frozenlist\n result.columns = result.columns.astype(np.dtype(\"O\"))\n assert_eq(expected, result, check_index=False)\n else:\n assert_eq(expected, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_test_df_groupby_idxmin.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_test_df_groupby_idxmin.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1973, "end_line": 1986, "span_ids": ["test_df_groupby_idxmin"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_df_groupby_idxmin():\n pdf = pd.DataFrame(\n {\"idx\": list(range(4)), \"group\": [1, 1, 2, 2], \"value\": [10, 20, 20, 10]}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = pd.DataFrame({\"group\": [1, 2], \"value\": [0, 3]}).set_index(\"group\")\n\n result_pd = pdf.groupby(\"group\").idxmin()\n result_dd = ddf.groupby(\"group\").idxmin()\n\n assert_eq(result_pd, result_dd)\n assert_eq(expected, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_skipna_test_df_groupby_idxmin_skipna.assert_eq_result_pd_resu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmin_skipna_test_df_groupby_idxmin_skipna.assert_eq_result_pd_resu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1989, "end_line": 2004, "span_ids": ["test_df_groupby_idxmin_skipna"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\ndef test_df_groupby_idxmin_skipna(skipna):\n pdf = pd.DataFrame(\n {\n \"idx\": list(range(4)),\n \"group\": [1, 1, 2, 2],\n \"value\": [np.nan, 20.1, np.nan, 10.1],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n result_pd = pdf.groupby(\"group\").idxmin(skipna=skipna)\n result_dd = ddf.groupby(\"group\").idxmin(skipna=skipna)\n\n assert_eq(result_pd, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_test_df_groupby_idxmax.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_test_df_groupby_idxmax.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2007, "end_line": 2020, "span_ids": ["test_df_groupby_idxmax"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_df_groupby_idxmax():\n pdf = pd.DataFrame(\n {\"idx\": list(range(4)), \"group\": [1, 1, 2, 2], \"value\": [10, 20, 20, 10]}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = pd.DataFrame({\"group\": [1, 2], \"value\": [1, 2]}).set_index(\"group\")\n\n result_pd = pdf.groupby(\"group\").idxmax()\n result_dd = ddf.groupby(\"group\").idxmax()\n\n assert_eq(result_pd, result_dd)\n assert_eq(expected, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_skipna_test_df_groupby_idxmax_skipna.assert_eq_result_pd_resu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_df_groupby_idxmax_skipna_test_df_groupby_idxmax_skipna.assert_eq_result_pd_resu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2023, "end_line": 2038, "span_ids": ["test_df_groupby_idxmax_skipna"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\ndef test_df_groupby_idxmax_skipna(skipna):\n pdf = pd.DataFrame(\n {\n \"idx\": list(range(4)),\n \"group\": [1, 1, 2, 2],\n \"value\": [np.nan, 20.1, np.nan, 10.1],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n result_pd = pdf.groupby(\"group\").idxmax(skipna=skipna)\n result_dd = ddf.groupby(\"group\").idxmax(skipna=skipna)\n\n assert_eq(result_pd, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_test_series_groupby_idxmin.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_test_series_groupby_idxmin.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2041, "end_line": 2056, "span_ids": ["test_series_groupby_idxmin"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_groupby_idxmin():\n pdf = pd.DataFrame(\n {\"idx\": list(range(4)), \"group\": [1, 1, 2, 2], \"value\": [10, 20, 20, 10]}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = (\n pd.DataFrame({\"group\": [1, 2], \"value\": [0, 3]}).set_index(\"group\").squeeze()\n )\n\n result_pd = pdf.groupby(\"group\")[\"value\"].idxmin()\n result_dd = ddf.groupby(\"group\")[\"value\"].idxmin()\n\n assert_eq(result_pd, result_dd)\n assert_eq(expected, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_skipna_test_series_groupby_idxmin_skipna.assert_eq_result_pd_resu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmin_skipna_test_series_groupby_idxmin_skipna.assert_eq_result_pd_resu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2059, "end_line": 2074, "span_ids": ["test_series_groupby_idxmin_skipna"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\ndef test_series_groupby_idxmin_skipna(skipna):\n pdf = pd.DataFrame(\n {\n \"idx\": list(range(4)),\n \"group\": [1, 1, 2, 2],\n \"value\": [np.nan, 20.1, np.nan, 10.1],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n result_pd = pdf.groupby(\"group\")[\"value\"].idxmin(skipna=skipna)\n result_dd = ddf.groupby(\"group\")[\"value\"].idxmin(skipna=skipna)\n\n assert_eq(result_pd, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_test_series_groupby_idxmax.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_test_series_groupby_idxmax.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2077, "end_line": 2092, "span_ids": ["test_series_groupby_idxmax"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_groupby_idxmax():\n pdf = pd.DataFrame(\n {\"idx\": list(range(4)), \"group\": [1, 1, 2, 2], \"value\": [10, 20, 20, 10]}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n expected = (\n pd.DataFrame({\"group\": [1, 2], \"value\": [1, 2]}).set_index(\"group\").squeeze()\n )\n\n result_pd = pdf.groupby(\"group\")[\"value\"].idxmax()\n result_dd = ddf.groupby(\"group\")[\"value\"].idxmax()\n\n assert_eq(result_pd, result_dd)\n assert_eq(expected, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_skipna_test_series_groupby_idxmax_skipna.assert_eq_result_pd_resu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_idxmax_skipna_test_series_groupby_idxmax_skipna.assert_eq_result_pd_resu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2095, "end_line": 2110, "span_ids": ["test_series_groupby_idxmax_skipna"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\ndef test_series_groupby_idxmax_skipna(skipna):\n pdf = pd.DataFrame(\n {\n \"idx\": list(range(4)),\n \"group\": [1, 1, 2, 2],\n \"value\": [np.nan, 20.1, np.nan, 10.1],\n }\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n result_pd = pdf.groupby(\"group\")[\"value\"].idxmax(skipna=skipna)\n result_dd = ddf.groupby(\"group\")[\"value\"].idxmax(skipna=skipna)\n\n assert_eq(result_pd, result_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unique_test_groupby_value_counts.assert_eq_dd_gb_pd_gb_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_unique_test_groupby_value_counts.assert_eq_dd_gb_pd_gb_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2110, "end_line": 2133, "span_ids": ["test_groupby_value_counts", "test_groupby_unique"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_unique():\n rng = np.random.RandomState(42)\n df = pd.DataFrame(\n {\"foo\": rng.randint(3, size=100), \"bar\": rng.randint(10, size=100)}\n )\n ddf = dd.from_pandas(df, npartitions=10)\n\n pd_gb = df.groupby(\"foo\")[\"bar\"].unique()\n dd_gb = ddf.groupby(\"foo\")[\"bar\"].unique()\n\n # Use explode because each DataFrame row is a list; equality fails\n assert_eq(dd_gb.explode(), pd_gb.explode())\n\n\ndef test_groupby_value_counts():\n rng = np.random.RandomState(42)\n df = pd.DataFrame(\n {\"foo\": rng.randint(3, size=100), \"bar\": rng.randint(4, size=100)}\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n pd_gb = df.groupby(\"foo\")[\"bar\"].value_counts()\n dd_gb = ddf.groupby(\"foo\")[\"bar\"].value_counts()\n assert_eq(dd_gb, pd_gb)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_funcs_test_groupby_transform_funcs.with_pytest_warns_UserWar.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_funcs_test_groupby_transform_funcs.with_pytest_warns_UserWar.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2143, "end_line": 2168, "span_ids": ["test_groupby_transform_funcs"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"transformation\", [lambda x: x.sum(), np.sum, \"sum\", pd.Series.rank]\n)\ndef test_groupby_transform_funcs(transformation):\n pdf = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4] * 5,\n \"B\": np.random.randn(20),\n \"C\": np.random.randn(20),\n \"D\": np.random.randn(20),\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n\n with pytest.warns(UserWarning):\n # DataFrame\n assert_eq(\n pdf.groupby(\"A\").transform(transformation),\n ddf.groupby(\"A\").transform(transformation),\n )\n\n # Series\n assert_eq(\n pdf.groupby(\"A\")[\"B\"].transform(transformation),\n ddf.groupby(\"A\")[\"B\"].transform(transformation),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_ufunc_partitioning_test_groupby_transform_ufunc_partitioning.with_pytest_warns_UserWar.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_transform_ufunc_partitioning_test_groupby_transform_ufunc_partitioning.with_pytest_warns_UserWar.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2171, "end_line": 2196, "span_ids": ["test_groupby_transform_ufunc_partitioning"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", list(range(1, 10)))\n@pytest.mark.parametrize(\"indexed\", [True, False])\ndef test_groupby_transform_ufunc_partitioning(npartitions, indexed):\n pdf = pd.DataFrame({\"group\": [1, 2, 3, 4, 5] * 20, \"value\": np.random.randn(100)})\n\n if indexed:\n pdf = pdf.set_index(\"group\")\n\n ddf = dd.from_pandas(pdf, npartitions)\n\n with pytest.warns(UserWarning):\n # DataFrame\n assert_eq(\n pdf.groupby(\"group\").transform(lambda series: series - series.mean()),\n ddf.groupby(\"group\").transform(lambda series: series - series.mean()),\n )\n\n # Series\n assert_eq(\n pdf.groupby(\"group\")[\"value\"].transform(\n lambda series: series - series.mean()\n ),\n ddf.groupby(\"group\")[\"value\"].transform(\n lambda series: series - series.mean()\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_aggregate_categoricals_test_groupby_aggregate_categoricals.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_aggregate_categoricals_test_groupby_aggregate_categoricals.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2178, "end_line": 2210, "span_ids": ["test_groupby_aggregate_categoricals"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"grouping,agg\",\n [\n (\n lambda df: df.drop(columns=\"category_2\").groupby(\"category_1\"),\n lambda grp: grp.mean(),\n ),\n (\n lambda df: df.drop(columns=\"category_2\").groupby(\"category_1\"),\n lambda grp: grp.agg(\"mean\"),\n ),\n (lambda df: df.groupby([\"category_1\", \"category_2\"]), lambda grp: grp.mean()),\n (\n lambda df: df.groupby([\"category_1\", \"category_2\"]),\n lambda grp: grp.agg(\"mean\"),\n ),\n ],\n)\ndef test_groupby_aggregate_categoricals(grouping, agg):\n pdf = pd.DataFrame(\n {\n \"category_1\": pd.Categorical(list(\"AABBCC\")),\n \"category_2\": pd.Categorical(list(\"ABCABC\")),\n \"value\": np.random.uniform(size=6),\n }\n )\n ddf = dd.from_pandas(pdf, 2)\n\n # DataFrameGroupBy\n assert_eq(agg(grouping(pdf)), agg(grouping(ddf)))\n\n # SeriesGroupBy\n assert_eq(agg(grouping(pdf)[\"value\"]), agg(grouping(ddf)[\"value\"]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_pandas_test_groupby_dropna_pandas.assert_eq_dask_result_pd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_pandas_test_groupby_dropna_pandas.assert_eq_dask_result_pd", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2234, "end_line": 2247, "span_ids": ["test_groupby_dropna_pandas"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n not dask.dataframe.utils.PANDAS_GT_110,\n reason=\"dropna kwarg not supported in pandas < 1.1.0.\",\n)\n@pytest.mark.parametrize(\"dropna\", [False, True])\ndef test_groupby_dropna_pandas(dropna):\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, None, None, 7, 8], \"e\": [4, 5, 6, 3, 2, 1, 0, 0]}\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n dask_result = ddf.groupby(\"a\", dropna=dropna).e.sum()\n pd_result = df.groupby(\"a\", dropna=dropna).e.sum()\n assert_eq(dask_result, pd_result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_cudf_test_groupby_dropna_cudf.assert_eq_dask_result_cu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_cudf_test_groupby_dropna_cudf.assert_eq_dask_result_cu", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2229, "end_line": 2262, "span_ids": ["test_groupby_dropna_cudf"], "tokens": 394}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.gpu\n@pytest.mark.parametrize(\"dropna\", [False, True, None])\n@pytest.mark.parametrize(\"by\", [\"a\", \"c\", \"d\", [\"a\", \"b\"], [\"a\", \"c\"], [\"a\", \"d\"]])\ndef test_groupby_dropna_cudf(dropna, by):\n\n # NOTE: This test requires cudf/dask_cudf, and will\n # be skipped by non-GPU CI\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n df = cudf.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, None, 7, 8],\n \"b\": [1, 0] * 4,\n \"c\": [\"a\", \"b\", None, None, \"e\", \"f\", \"g\", \"h\"],\n \"e\": [4, 5, 6, 3, 2, 1, 0, 0],\n }\n )\n df[\"d\"] = df[\"c\"].astype(\"category\")\n ddf = dask_cudf.from_cudf(df, npartitions=3)\n\n if dropna is None:\n dask_result = ddf.groupby(by).e.sum()\n cudf_result = df.groupby(by).e.sum()\n else:\n dask_result = ddf.groupby(by, dropna=dropna).e.sum()\n cudf_result = df.groupby(by, dropna=dropna).e.sum()\n if by in [\"c\", \"d\"]:\n # Lose string/category index name in cudf...\n dask_result = dask_result.compute()\n dask_result.index.name = cudf_result.index.name\n\n assert_eq(dask_result, cudf_result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_large_ints_exception_test_groupby_large_ints_exception.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_large_ints_exception_test_groupby_large_ints_exception.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2392, "end_line": 2416, "span_ids": ["test_groupby_large_ints_exception"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"backend\",\n [\n \"pandas\",\n pytest.param(\"cudf\", marks=pytest.mark.gpu),\n ],\n)\ndef test_groupby_large_ints_exception(backend):\n data_source = pytest.importorskip(backend)\n if backend == \"cudf\":\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n data_frame = dask_cudf.from_cudf\n else:\n data_frame = dd.from_pandas\n max = np.iinfo(np.uint64).max\n sqrt = max**0.5\n series = data_source.Series(\n np.concatenate([sqrt * np.arange(5), np.arange(35)])\n ).astype(\"int64\")\n df = data_source.DataFrame({\"x\": series, \"z\": np.arange(40), \"y\": np.arange(40)})\n ddf = data_frame(df, npartitions=1)\n assert_eq(\n df.groupby(\"x\").std(),\n ddf.groupby(\"x\").std().compute(scheduler=\"single-threaded\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_test_groupby_sort_argument.if_agg_mean_.else_.assert_eq_result_3_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_test_groupby_sort_argument.if_agg_mean_.else_.assert_eq_result_3_resul", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2354, "end_line": 2391, "span_ids": ["test_groupby_sort_argument"], "tokens": 404}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"by\", [\"a\", \"b\", \"c\", [\"a\", \"b\"], [\"a\", \"c\"]])\n@pytest.mark.parametrize(\"agg\", [\"count\", \"mean\", \"std\"])\n@pytest.mark.parametrize(\"sort\", [True, False])\ndef test_groupby_sort_argument(by, agg, sort):\n\n df = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, None, None, 7, 8],\n \"b\": [1, 0] * 4,\n \"c\": [\"a\", \"b\", None, None, \"e\", \"f\", \"g\", \"h\"],\n \"e\": [4, 5, 6, 3, 2, 1, 0, 0],\n }\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n gb = ddf.groupby(by, sort=sort)\n gb_pd = df.groupby(by, sort=sort)\n\n # Basic groupby aggregation\n result_1 = getattr(gb, agg)\n result_1_pd = getattr(gb_pd, agg)\n\n # Choose single column\n result_2 = getattr(gb.e, agg)\n result_2_pd = getattr(gb_pd.e, agg)\n\n # Use `agg()` api\n result_3 = gb.agg({\"e\": agg})\n result_3_pd = gb_pd.agg({\"e\": agg})\n\n if agg == \"mean\":\n assert_eq(result_1(), result_1_pd().astype(\"float\"))\n assert_eq(result_2(), result_2_pd().astype(\"float\"))\n assert_eq(result_3, result_3_pd.astype(\"float\"))\n else:\n assert_eq(result_1(), result_1_pd())\n assert_eq(result_2(), result_2_pd())\n assert_eq(result_3, result_3_pd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_agg_test_groupby_sort_argument_agg.if_sort_.assert_eq_result_index_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_argument_agg_test_groupby_sort_argument_agg.if_sort_.assert_eq_result_index_r", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2395, "end_line": 2408, "span_ids": ["test_groupby_sort_argument_agg"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"agg\", [M.sum, M.prod, M.max, M.min])\n@pytest.mark.parametrize(\"sort\", [True, False])\ndef test_groupby_sort_argument_agg(agg, sort):\n df = pd.DataFrame({\"x\": [4, 2, 1, 2, 3, 1], \"y\": [1, 2, 3, 4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n result = agg(ddf.groupby(\"x\", sort=sort))\n result_pd = agg(df.groupby(\"x\", sort=sort))\n\n assert_eq(result, result_pd)\n if sort:\n # Check order of index if sort==True\n # (no guarantee that order will match otherwise)\n assert_eq(result.index, result_pd.index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_np_test_hash_pandas_object.if_isinstance_a_np_ndarr.else_.assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_np_test_hash_pandas_object.if_isinstance_a_np_ndarr.else_.assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hashing.py", "file_name": "test_hashing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 36, "span_ids": ["imports", "test_hash_pandas_object"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.util import hash_pandas_object\n\nimport dask.dataframe as dd\nfrom dask.dataframe import _compat\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.utils import assert_eq\n\n\n@pytest.mark.parametrize(\n \"obj\",\n [\n pd.Series([1, 2, 3]),\n pd.Series([1.0, 1.5, 3.2]),\n pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),\n pd.Series([\"a\", \"b\", \"c\"]),\n pd.Series([True, False, True]),\n pd.Index([1, 2, 3]),\n pd.Index([True, False, True]),\n pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2, 3]}),\n _compat.makeMissingDataframe(),\n _compat.makeMixedDataFrame(),\n _compat.makeTimeDataFrame(),\n _compat.makeTimeSeries(),\n _compat.makeTimedeltaIndex(),\n ],\n)\ndef test_hash_pandas_object(obj):\n a = hash_pandas_object(obj)\n b = hash_pandas_object(obj)\n if isinstance(a, np.ndarray):\n np.testing.assert_equal(a, b)\n else:\n assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_categorical_consistency_test_categorical_consistency.for_s1_in_.for_categorize_in_True_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_categorical_consistency_test_categorical_consistency.for_s1_in_.for_categorize_in_True_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hashing.py", "file_name": "test_hashing.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 56, "span_ids": ["test_categorical_consistency"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_consistency():\n # Check that categoricals hash consistent with their values, not codes\n # This should work for categoricals of any dtype\n for s1 in [\n pd.Series([\"a\", \"b\", \"c\", \"d\"]),\n pd.Series([1000, 2000, 3000, 4000]),\n pd.Series(pd.date_range(0, periods=4)),\n ]:\n s2 = s1.astype(\"category\").cat.set_categories(s1)\n s3 = s2.cat.set_categories(list(reversed(s1)))\n for categorize in [True, False]:\n # These should all hash identically\n h1 = hash_pandas_object(s1, categorize=categorize)\n h2 = hash_pandas_object(s2, categorize=categorize)\n h3 = hash_pandas_object(s3, categorize=categorize)\n tm.assert_series_equal(h1, h2)\n tm.assert_series_equal(h1, h3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_object_missing_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hashing.py_test_object_missing_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hashing.py", "file_name": "test_hashing.py", "file_type": "text/x-python", "category": "test", "start_line": 58, "end_line": 83, "span_ids": ["test_hash_object_dispatch", "test_object_missing_values"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_object_missing_values():\n # Check that the presence of missing values doesn't change how object dtype\n # is hashed.\n s = pd.Series([\"a\", \"b\", \"c\", None])\n h1 = hash_pandas_object(s).iloc[:3]\n h2 = hash_pandas_object(s.iloc[:3])\n tm.assert_series_equal(h1, h2)\n\n\n@pytest.mark.parametrize(\n \"obj\",\n [\n pd.Index([1, 2, 3]),\n pd.Index([True, False, True]),\n pd.Series([1, 2, 3]),\n pd.Series([1.0, 1.5, 3.2]),\n pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),\n pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2, 3]}),\n pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2, 3]}, index=[\"a\", \"z\", \"x\"]),\n ],\n)\ndef test_hash_object_dispatch(obj):\n result = dd.dispatch.hash_object_dispatch(obj)\n expected = pd.util.hash_pandas_object(obj)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_test_split_every_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_test_split_every_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hyperloglog.py", "file_name": "test_hyperloglog.py", "file_type": "text/x-python", "category": "test", "start_line": 80, "end_line": 101, "span_ids": ["test_split_every", "test_larger_data"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [None, 2, 10])\n@pytest.mark.parametrize(\"npartitions\", [2, 20])\ndef test_split_every(split_every, npartitions):\n df = pd.Series([1, 2, 3] * 1000)\n ddf = dd.from_pandas(df, npartitions=npartitions)\n\n approx = ddf.nunique_approx(split_every=split_every).compute(scheduler=\"sync\")\n exact = len(df.drop_duplicates())\n assert abs(approx - exact) <= 2 or abs(approx - exact) / exact < 0.05\n\n\ndef test_larger_data():\n df = dd.demo.make_timeseries(\n \"2000-01-01\",\n \"2000-04-01\",\n {\"value\": float, \"id\": int},\n freq=\"10s\",\n partition_freq=\"1D\",\n seed=1,\n )\n assert df.nunique_approx().compute() > 1000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_test_loc.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_test_loc.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 26, "end_line": 51, "span_ids": ["test_loc"], "tokens": 332}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc():\n assert d.loc[3:8].divisions[0] == 3\n assert d.loc[3:8].divisions[-1] == 8\n\n assert d.loc[5].divisions == (5, 5)\n\n assert_eq(d.loc[5], full.loc[5:5])\n assert_eq(d.loc[3:8], full.loc[3:8])\n assert_eq(d.loc[:8], full.loc[:8])\n assert_eq(d.loc[3:], full.loc[3:])\n assert_eq(d.loc[[5]], full.loc[[5]])\n\n assert_eq(d.a.loc[5], full.a.loc[5:5])\n assert_eq(d.a.loc[3:8], full.a.loc[3:8])\n assert_eq(d.a.loc[:8], full.a.loc[:8])\n assert_eq(d.a.loc[3:], full.a.loc[3:])\n assert_eq(d.a.loc[[5]], full.a.loc[[5]])\n assert_eq(d.a.loc[[]], full.a.loc[[]])\n assert_eq(d.a.loc[np.array([])], full.a.loc[np.array([])])\n\n pytest.raises(KeyError, lambda: d.loc[1000])\n assert_eq(d.loc[1000:], full.loc[1000:])\n assert_eq(d.loc[-2000:-1000], full.loc[-2000:-1000])\n\n assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)\n assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_non_informative_index_test_loc_non_informative_index.assert_eq_ddf_loc_20_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_non_informative_index_test_loc_non_informative_index.assert_eq_ddf_loc_20_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 88, "span_ids": ["test_loc_non_informative_index"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_non_informative_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]}, index=[10, 20, 30, 40])\n ddf = dd.from_pandas(df, npartitions=2, sort=True)\n ddf.divisions = (None,) * 3\n assert not ddf.known_divisions\n\n ddf.loc[20:30].compute(scheduler=\"sync\")\n\n assert_eq(ddf.loc[20:30], df.loc[20:30])\n\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]}, index=[10, 20, 20, 40])\n ddf = dd.from_pandas(df, npartitions=2, sort=True)\n assert_eq(ddf.loc[20], df.loc[20:20])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_text_dates_test_loc_with_text_dates.assert_len_s_loc_2000_01": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_text_dates_test_loc_with_text_dates.assert_len_s_loc_2000_01", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 91, "end_line": 103, "span_ids": ["test_loc_with_text_dates"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_with_text_dates():\n A = dd._compat.makeTimeSeries().iloc[:5]\n B = dd._compat.makeTimeSeries().iloc[5:]\n s = dd.Series(\n {(\"df\", 0): A, (\"df\", 1): B},\n \"df\",\n A,\n [A.index.min(), B.index.min(), B.index.max()],\n )\n\n assert s.loc[\"2000\":\"2010\"].divisions == s.divisions\n assert_eq(s.loc[\"2000\":\"2010\"], s)\n assert len(s.loc[\"2000-01-03\":\"2000-01-05\"].compute()) == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_different_partition_test_loc_with_series_different_partition.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_different_partition_test_loc_with_series_different_partition.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 153, "span_ids": ["test_loc_with_series_different_partition"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_with_series_different_partition():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"ABCDE\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.loc[ddf.A > 0], df.loc[df.A > 0])\n assert_eq(\n ddf.loc[(ddf.A > 0).repartition([\"a\", \"g\", \"k\", \"o\", \"t\"])], df.loc[df.A > 0]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_test_loc2d.None_3.d_a_loc_d_a_2_0_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_test_loc2d.None_3.d_a_loc_d_a_2_0_3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 156, "end_line": 184, "span_ids": ["test_loc2d"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc2d():\n # index indexer is always regarded as slice for duplicated values\n assert_eq(d.loc[5, \"a\"], full.loc[5:5, \"a\"])\n # assert_eq(d.loc[[5], 'a'], full.loc[[5], 'a'])\n assert_eq(d.loc[5, [\"a\"]], full.loc[5:5, [\"a\"]])\n # assert_eq(d.loc[[5], ['a']], full.loc[[5], ['a']])\n\n assert_eq(d.loc[3:8, \"a\"], full.loc[3:8, \"a\"])\n assert_eq(d.loc[:8, \"a\"], full.loc[:8, \"a\"])\n assert_eq(d.loc[3:, \"a\"], full.loc[3:, \"a\"])\n assert_eq(d.loc[[8], \"a\"], full.loc[[8], \"a\"])\n\n assert_eq(d.loc[3:8, [\"a\"]], full.loc[3:8, [\"a\"]])\n assert_eq(d.loc[:8, [\"a\"]], full.loc[:8, [\"a\"]])\n assert_eq(d.loc[3:, [\"a\"]], full.loc[3:, [\"a\"]])\n\n # 3d\n with pytest.raises(pd.core.indexing.IndexingError):\n d.loc[3, 3, 3]\n\n # Series should raise\n with pytest.raises(pd.core.indexing.IndexingError):\n d.a.loc[3, 3]\n\n with pytest.raises(pd.core.indexing.IndexingError):\n d.a.loc[3:, 3]\n\n with pytest.raises(pd.core.indexing.IndexingError):\n d.a.loc[d.a % 2 == 0, 3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_with_unknown_divisions_test_loc2d_with_unknown_divisions.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_with_unknown_divisions_test_loc2d_with_unknown_divisions.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 228, "span_ids": ["test_loc2d_with_unknown_divisions"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc2d_with_unknown_divisions():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"ABCDE\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n ddf.divisions = (None,) * len(ddf.divisions)\n assert ddf.known_divisions is False\n\n assert_eq(ddf.loc[\"a\", \"A\"], df.loc[[\"a\"], \"A\"])\n assert_eq(ddf.loc[\"a\", [\"A\"]], df.loc[[\"a\"], [\"A\"]])\n assert_eq(ddf.loc[\"a\":\"o\", \"A\"], df.loc[\"a\":\"o\", \"A\"])\n assert_eq(ddf.loc[\"a\":\"o\", [\"A\"]], df.loc[\"a\":\"o\", [\"A\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_duplicated_columns_test_loc2d_duplicated_columns.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_duplicated_columns_test_loc2d_duplicated_columns.None_13", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 231, "end_line": 255, "span_ids": ["test_loc2d_duplicated_columns"], "tokens": 417}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc2d_duplicated_columns():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"AABCD\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.loc[\"a\", \"A\"], df.loc[[\"a\"], \"A\"])\n assert_eq(ddf.loc[\"a\", [\"A\"]], df.loc[[\"a\"], [\"A\"]])\n assert_eq(ddf.loc[\"j\", \"B\"], df.loc[[\"j\"], \"B\"])\n assert_eq(ddf.loc[\"j\", [\"B\"]], df.loc[[\"j\"], [\"B\"]])\n\n assert_eq(ddf.loc[\"a\":\"o\", \"A\"], df.loc[\"a\":\"o\", \"A\"])\n assert_eq(ddf.loc[\"a\":\"o\", [\"A\"]], df.loc[\"a\":\"o\", [\"A\"]])\n assert_eq(ddf.loc[\"j\":\"q\", \"B\"], df.loc[\"j\":\"q\", \"B\"])\n assert_eq(ddf.loc[\"j\":\"q\", [\"B\"]], df.loc[\"j\":\"q\", [\"B\"]])\n\n assert_eq(ddf.loc[\"a\":\"o\", \"B\":\"D\"], df.loc[\"a\":\"o\", \"B\":\"D\"])\n assert_eq(ddf.loc[\"a\":\"o\", \"B\":\"D\"], df.loc[\"a\":\"o\", \"B\":\"D\"])\n assert_eq(ddf.loc[\"j\":\"q\", \"B\":\"A\"], df.loc[\"j\":\"q\", \"B\":\"A\"])\n assert_eq(ddf.loc[\"j\":\"q\", \"B\":\"A\"], df.loc[\"j\":\"q\", \"B\":\"A\"])\n\n assert_eq(ddf.loc[ddf.B > 0, \"B\"], df.loc[df.B > 0, \"B\"])\n assert_eq(ddf.loc[ddf.B > 0, [\"A\", \"C\"]], df.loc[df.B > 0, [\"A\", \"C\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_test_getitem.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_test_getitem.None_13", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 258, "end_line": 291, "span_ids": ["test_getitem"], "tokens": 390}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem():\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"B\": [9, 8, 7, 6, 5, 4, 3, 2, 1],\n \"C\": [True, False, True] * 3,\n },\n columns=list(\"ABC\"),\n )\n ddf = dd.from_pandas(df, 2)\n assert_eq(ddf[\"A\"], df[\"A\"])\n # check cache consistency\n tm.assert_series_equal(ddf[\"A\"]._meta, ddf._meta[\"A\"])\n\n assert_eq(ddf[[\"A\", \"B\"]], df[[\"A\", \"B\"]])\n tm.assert_frame_equal(ddf[[\"A\", \"B\"]]._meta, ddf._meta[[\"A\", \"B\"]])\n\n assert_eq(ddf[ddf.C], df[df.C])\n tm.assert_series_equal(ddf.C._meta, ddf._meta.C)\n\n assert_eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])\n\n pytest.raises(KeyError, lambda: df[\"X\"])\n pytest.raises(KeyError, lambda: df[[\"A\", \"X\"]])\n pytest.raises(AttributeError, lambda: df.X)\n\n # not str/unicode\n df = pd.DataFrame(np.random.randn(10, 5))\n ddf = dd.from_pandas(df, 2)\n assert_eq(ddf[0], df[0])\n assert_eq(ddf[[1, 2]], df[[1, 2]])\n\n pytest.raises(KeyError, lambda: df[8])\n pytest.raises(KeyError, lambda: df[[1, 8]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_slice_test_getitem_slice.assert_eq_ddf_f_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_slice_test_getitem_slice.assert_eq_ddf_f_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 294, "end_line": 306, "span_ids": ["test_getitem_slice"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_slice():\n df = pd.DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"B\": [9, 8, 7, 6, 5, 4, 3, 2, 1],\n \"C\": [True, False, True] * 3,\n },\n index=list(\"abcdefghi\"),\n )\n ddf = dd.from_pandas(df, 3)\n assert_eq(ddf[\"a\":\"e\"], df[\"a\":\"e\"])\n assert_eq(ddf[\"a\":\"b\"], df[\"a\":\"b\"])\n assert_eq(ddf[\"f\":], df[\"f\":])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_integer_slice_test_getitem_integer_slice.assert_eq_ddf_8_df_8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_integer_slice_test_getitem_integer_slice.assert_eq_ddf_8_df_8_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 309, "end_line": 321, "span_ids": ["test_getitem_integer_slice"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_integer_slice():\n df = pd.DataFrame({\"A\": range(6)})\n ddf = dd.from_pandas(df, 2)\n # integer slicing is iloc based\n with pytest.raises(NotImplementedError):\n ddf[1:3]\n\n df = pd.DataFrame({\"A\": range(6)}, index=[1.0, 2.0, 3.0, 5.0, 10.0, 11.0])\n ddf = dd.from_pandas(df, 2)\n # except for float dtype indexes\n assert_eq(ddf[2:8], df[2:8])\n assert_eq(ddf[2:], df[2:])\n assert_eq(ddf[:8], df[:8])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_on_numpy_datetimes_test_loc_on_pandas_datetimes.assert_eq_a_loc_2014_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_on_numpy_datetimes_test_loc_on_pandas_datetimes.assert_eq_a_loc_2014_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 333, "end_line": 350, "span_ids": ["test_loc_on_numpy_datetimes", "test_loc_on_pandas_datetimes"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_on_numpy_datetimes():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3]}, index=list(map(np.datetime64, [\"2014\", \"2015\", \"2016\"]))\n )\n a = dd.from_pandas(df, 2)\n a.divisions = tuple(map(np.datetime64, a.divisions))\n\n assert_eq(a.loc[\"2014\":\"2015\"], a.loc[\"2014\":\"2015\"])\n\n\ndef test_loc_on_pandas_datetimes():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3]}, index=list(map(pd.Timestamp, [\"2014\", \"2015\", \"2016\"]))\n )\n a = dd.from_pandas(df, 2)\n a.divisions = tuple(map(pd.Timestamp, a.divisions))\n\n assert_eq(a.loc[\"2014\":\"2015\"], a.loc[\"2014\":\"2015\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_datetime_no_freq_test_coerce_loc_index.for_t_in_pd_Timestamp_n.assert_isinstance__coerce": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_datetime_no_freq_test_coerce_loc_index.for_t_in_pd_Timestamp_n.assert_isinstance__coerce", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 344, "end_line": 360, "span_ids": ["test_coerce_loc_index", "test_loc_datetime_no_freq"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_datetime_no_freq():\n # https://github.com/dask/dask/issues/2389\n\n datetime_index = pd.date_range(\"2016-01-01\", \"2016-01-31\", freq=\"12h\")\n datetime_index.freq = None # FORGET FREQUENCY\n df = pd.DataFrame({\"num\": range(len(datetime_index))}, index=datetime_index)\n\n ddf = dd.from_pandas(df, npartitions=1)\n slice_ = slice(\"2016-01-03\", \"2016-01-05\")\n result = ddf.loc[slice_, :]\n expected = df.loc[slice_, :]\n assert_eq(result, expected)\n\n\ndef test_coerce_loc_index():\n for t in [pd.Timestamp, np.datetime64]:\n assert isinstance(_coerce_loc_index([t(\"2014\")], \"2014\"), t)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_timestamp_str_test_loc_timestamp_str.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_timestamp_str_test_loc_timestamp_str.None_15", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 372, "end_line": 431, "span_ids": ["test_loc_timestamp_str"], "tokens": 707}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_timestamp_str():\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n # partial string slice\n assert_eq(df.loc[\"2011-01-02\"], ddf.loc[\"2011-01-02\"])\n assert_eq(df.loc[\"2011-01-02\":\"2011-01-10\"], ddf.loc[\"2011-01-02\":\"2011-01-10\"])\n # same reso, dask result is always DataFrame\n assert_eq(\n df.loc[\"2011-01-02 10:00\"].to_frame().T,\n ddf.loc[\"2011-01-02 10:00\"],\n **CHECK_FREQ,\n )\n\n # series\n assert_eq(df.A.loc[\"2011-01-02\"], ddf.A.loc[\"2011-01-02\"], **CHECK_FREQ)\n assert_eq(\n df.A.loc[\"2011-01-02\":\"2011-01-10\"],\n ddf.A.loc[\"2011-01-02\":\"2011-01-10\"],\n **CHECK_FREQ,\n )\n\n # slice with timestamp (dask result must be DataFrame)\n assert_eq(\n df.loc[pd.Timestamp(\"2011-01-02\")].to_frame().T,\n ddf.loc[pd.Timestamp(\"2011-01-02\")],\n **CHECK_FREQ,\n )\n assert_eq(\n df.loc[pd.Timestamp(\"2011-01-02\") : pd.Timestamp(\"2011-01-10\")],\n ddf.loc[pd.Timestamp(\"2011-01-02\") : pd.Timestamp(\"2011-01-10\")],\n **CHECK_FREQ,\n )\n assert_eq(\n df.loc[pd.Timestamp(\"2011-01-02 10:00\")].to_frame().T,\n ddf.loc[pd.Timestamp(\"2011-01-02 10:00\")],\n **CHECK_FREQ,\n )\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"M\", periods=100),\n )\n ddf = dd.from_pandas(df, 50)\n assert_eq(df.loc[\"2011-01\"], ddf.loc[\"2011-01\"])\n assert_eq(df.loc[\"2011\"], ddf.loc[\"2011\"])\n\n assert_eq(df.loc[\"2011-01\":\"2012-05\"], ddf.loc[\"2011-01\":\"2012-05\"])\n assert_eq(df.loc[\"2011\":\"2015\"], ddf.loc[\"2011\":\"2015\"])\n\n # series\n assert_eq(df.B.loc[\"2011-01\"], ddf.B.loc[\"2011-01\"])\n assert_eq(df.B.loc[\"2011\"], ddf.B.loc[\"2011\"])\n\n assert_eq(df.B.loc[\"2011-01\":\"2012-05\"], ddf.B.loc[\"2011-01\":\"2012-05\"])\n assert_eq(df.B.loc[\"2011\":\"2015\"], ddf.B.loc[\"2011\":\"2015\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_period_str_test_getitem_period_str.assert_eq_df_2011_2015": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_period_str_test_getitem_period_str.assert_eq_df_2011_2015", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 483, "end_line": 525, "span_ids": ["test_getitem_period_str"], "tokens": 457}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_period_str():\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.period_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n # partial string slice\n if PANDAS_GT_120:\n with pytest.warns(\n FutureWarning, match=\"Indexing a DataFrame with a datetimelike\"\n ):\n assert_eq(df.loc[\"2011-01-02\"], ddf[\"2011-01-02\"])\n else:\n assert_eq(df[\"2011-01-02\"], ddf[\"2011-01-02\"])\n assert_eq(df[\"2011-01-02\":\"2011-01-10\"], ddf[\"2011-01-02\":\"2011-01-10\"])\n # same reso, dask result is always DataFrame\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.period_range(\"2011-01-01\", freq=\"D\", periods=100),\n )\n ddf = dd.from_pandas(df, 50)\n\n if PANDAS_GT_120:\n with pytest.warns(\n FutureWarning, match=\"Indexing a DataFrame with a datetimelike\"\n ):\n assert_eq(df.loc[\"2011-01\"], ddf[\"2011-01\"])\n else:\n assert_eq(df[\"2011-01\"], ddf[\"2011-01\"])\n\n if PANDAS_GT_120:\n with pytest.warns(\n FutureWarning, match=\"Indexing a DataFrame with a datetimelike\"\n ):\n assert_eq(df.loc[\"2011\"], ddf[\"2011\"])\n else:\n assert_eq(df[\"2011\"], ddf[\"2011\"])\n\n assert_eq(df[\"2011-01\":\"2012-05\"], ddf[\"2011-01\":\"2012-05\"])\n assert_eq(df[\"2011\":\"2015\"], ddf[\"2011\":\"2015\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_test_iloc_series.with_pytest_raises_Attrib.ds_iloc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_test_iloc_series.with_pytest_raises_Attrib.ds_iloc_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 534, "end_line": 549, "span_ids": ["test_iloc_series", "test_iloc"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"indexer\", [0, [0], [0, 1], [1, 0], [False, True, True]])\ndef test_iloc(indexer):\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n\n result = ddf.iloc[:, indexer]\n expected = df.iloc[:, indexer]\n\n assert_eq(result, expected)\n\n\ndef test_iloc_series():\n s = pd.Series([1, 2, 3])\n ds = dd.from_pandas(s, 2)\n with pytest.raises(AttributeError):\n ds.iloc[:]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_raises_test_iloc_raises.with_pytest_raises_IndexE.ddf_iloc_5_6_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_raises_test_iloc_raises.with_pytest_raises_IndexE.ddf_iloc_5_6_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 552, "end_line": 566, "span_ids": ["test_iloc_raises"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_iloc_raises():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n\n with pytest.raises(NotImplementedError):\n ddf.iloc[[0, 1], :]\n\n with pytest.raises(NotImplementedError):\n ddf.iloc[[0, 1], [0, 1]]\n\n with pytest.raises(ValueError):\n ddf.iloc[[0, 1], [0, 1], [1, 2]]\n\n with pytest.raises(IndexError):\n ddf.iloc[:, [5, 6]]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_duplicate_columns_test_iloc_duplicate_columns.assert_eq_select_negative": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_duplicate_columns_test_iloc_duplicate_columns.assert_eq_select_negative", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 569, "end_line": 589, "span_ids": ["test_iloc_duplicate_columns"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_iloc_duplicate_columns():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n df.columns = [\"A\", \"A\", \"C\"]\n ddf.columns = [\"A\", \"A\", \"C\"]\n\n selection = ddf.iloc[:, 2]\n # Check that `iloc` is called instead of getitem\n assert any([key.startswith(\"iloc\") for key in selection.dask.layers.keys()])\n\n select_first = ddf.iloc[:, 1]\n assert_eq(select_first, df.iloc[:, 1])\n\n select_zeroth = ddf.iloc[:, 0]\n assert_eq(select_zeroth, df.iloc[:, 0])\n\n select_list_cols = ddf.iloc[:, [0, 2]]\n assert_eq(select_list_cols, df.iloc[:, [0, 2]])\n\n select_negative = ddf.iloc[:, -1:-3:-1]\n assert_eq(select_negative, df.iloc[:, -1:-3:-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_dispatch_to_getitem_test_iloc_dispatch_to_getitem.assert_eq_select_negative": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_dispatch_to_getitem_test_iloc_dispatch_to_getitem.assert_eq_select_negative", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 592, "end_line": 611, "span_ids": ["test_iloc_dispatch_to_getitem"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_iloc_dispatch_to_getitem():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n\n selection = ddf.iloc[:, 2]\n\n assert all([not key.startswith(\"iloc\") for key in selection.dask.layers.keys()])\n assert any([key.startswith(\"getitem\") for key in selection.dask.layers.keys()])\n\n select_first = ddf.iloc[:, 1]\n assert_eq(select_first, df.iloc[:, 1])\n\n select_zeroth = ddf.iloc[:, 0]\n assert_eq(select_zeroth, df.iloc[:, 0])\n\n select_list_cols = ddf.iloc[:, [0, 2]]\n assert_eq(select_list_cols, df.iloc[:, [0, 2]])\n\n select_negative = ddf.iloc[:, -1:-3:-1]\n assert_eq(select_negative, df.iloc[:, -1:-3:-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_df_right_df_right.return.pd_DataFrame_dict_idx_idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_df_right_df_right.return.pd_DataFrame_dict_idx_idx", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 23, "end_line": 32, "span_ids": ["df_right"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef df_right():\n # Create frame with 10 partitions\n # Frame has 11 distinct idx values\n partition_sizes = np.array([4, 2, 5, 3, 2, 5, 9, 4, 7, 4, 8])\n idx = [i for i, s in enumerate(partition_sizes) for _ in range(s)]\n k = [i for s in partition_sizes for i in range(s)]\n vi = range(len(k))\n\n return pd.DataFrame(dict(idx=idx, k=k, v1=vi)).set_index([\"idx\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_single_test_merge_known_to_single.assert_len_result___dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_single_test_merge_known_to_single.assert_len_result___dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 106, "end_line": 119, "span_ids": ["test_merge_known_to_single"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"left\"])\ndef test_merge_known_to_single(\n df_left, df_right, ddf_left, ddf_right_single, on, how, shuffle_method\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left.merge(ddf_right_single, on=on, how=how, shuffle=shuffle_method)\n\n # Assertions\n assert_eq(result, expected)\n assert result.divisions == ddf_left.divisions\n assert len(result.__dask_graph__()) < 30", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_single_to_known_test_merge_single_to_known.assert_len_result___dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_single_to_known_test_merge_single_to_known.assert_len_result___dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 122, "end_line": 135, "span_ids": ["test_merge_single_to_known"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"right\"])\ndef test_merge_single_to_known(\n df_left, df_right, ddf_left_single, ddf_right, on, how, shuffle_method\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left_single.merge(ddf_right, on=on, how=how, shuffle=shuffle_method)\n\n # Assertions\n assert_eq(result, expected)\n assert result.divisions == ddf_right.divisions\n assert len(result.__dask_graph__()) < 30", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_test_align_partitions._different_index": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_test_align_partitions._different_index", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 29, "end_line": 86, "span_ids": ["test_align_partitions"], "tokens": 878}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_align_partitions():\n A = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n a = dd.repartition(A, [10, 40, 60])\n\n B = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": list(\"abda\")}, index=[30, 70, 80, 100])\n b = dd.repartition(B, [30, 80, 100])\n\n s = dd.core.Scalar({(\"s\", 0): 10}, \"s\", \"i8\")\n\n (aa, bb), divisions, L = align_partitions(a, b)\n\n def _check(a, b, aa, bb):\n assert isinstance(a, dd.DataFrame)\n assert isinstance(b, dd.DataFrame)\n assert isinstance(aa, dd.DataFrame)\n assert isinstance(bb, dd.DataFrame)\n assert_eq(a, aa)\n assert_eq(b, bb)\n assert divisions == (10, 30, 40, 60, 80, 100)\n assert isinstance(L, list)\n assert len(divisions) == 1 + len(L)\n\n _check(a, b, aa, bb)\n assert L == [\n [(aa._name, 0), (bb._name, 0)],\n [(aa._name, 1), (bb._name, 1)],\n [(aa._name, 2), (bb._name, 2)],\n [(aa._name, 3), (bb._name, 3)],\n [(aa._name, 4), (bb._name, 4)],\n ]\n\n (aa, ss, bb), divisions, L = align_partitions(a, s, b)\n _check(a, b, aa, bb)\n assert L == [\n [(aa._name, 0), None, (bb._name, 0)],\n [(aa._name, 1), None, (bb._name, 1)],\n [(aa._name, 2), None, (bb._name, 2)],\n [(aa._name, 3), None, (bb._name, 3)],\n [(aa._name, 4), None, (bb._name, 4)],\n ]\n assert_eq(ss, 10)\n\n ldf = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n rdf = pd.DataFrame({\"c\": [1, 2, 3, 4, 5, 6, 7], \"d\": [7, 6, 5, 4, 3, 2, 1]})\n\n for lhs, rhs in [\n (dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),\n (dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2)),\n ]:\n (lresult, rresult), div, parts = align_partitions(lhs, rhs)\n assert_eq(lresult, ldf)\n assert_eq(rresult, rdf)\n\n # different index\n # ... other code\n\n for lhs, rhs in [\n (dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),\n (dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2)),\n ]:\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions.ldf_7_test_align_partitions.None_1.assert_eq_rresult_rdf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions.ldf_7_test_align_partitions.None_1.assert_eq_rresult_rdf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 87, "end_line": 102, "span_ids": ["test_align_partitions"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_align_partitions():\n # ... other code\n _check(a, b, aa, bb)\n # ... other code\n ldf = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n rdf = pd.DataFrame(\n {\"c\": [1, 2, 3, 4, 5, 6, 7], \"d\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"fghijkl\")\n )\n\n for lhs, rhs in [\n (dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),\n (dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),\n (dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2)),\n ]:\n (lresult, rresult), div, parts = align_partitions(lhs, rhs)\n assert_eq(lresult, ldf)\n assert_eq(rresult, rdf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_unknown_divisions_test_align_partitions_unknown_divisions.None_1.align_partitions_ddf_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_align_partitions_unknown_divisions_test_align_partitions_unknown_divisions.None_1.align_partitions_ddf_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 105, "end_line": 122, "span_ids": ["test_align_partitions_unknown_divisions"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_align_partitions_unknown_divisions():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n # One known, one unknown\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf2.known_divisions\n\n with pytest.raises(ValueError):\n align_partitions(ddf, ddf2)\n\n # Both unknown\n ddf = dd.from_pandas(df + 1, npartitions=2, sort=False)\n ddf2 = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf.known_divisions\n assert not ddf2.known_divisions\n\n with pytest.raises(ValueError):\n align_partitions(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test__maybe_align_partitions_test__maybe_align_partitions.None_1._maybe_align_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test__maybe_align_partitions_test__maybe_align_partitions.None_1._maybe_align_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 125, "end_line": 167, "span_ids": ["test__maybe_align_partitions"], "tokens": 469}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test__maybe_align_partitions():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n # Both known, same divisions\n ddf = dd.from_pandas(df + 1, npartitions=2)\n ddf2 = dd.from_pandas(df, npartitions=2)\n\n a, b = _maybe_align_partitions([ddf, ddf2])\n assert a is ddf\n assert b is ddf2\n\n # Both unknown, same divisions\n ddf = dd.from_pandas(df + 1, npartitions=2, sort=False)\n ddf2 = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf.known_divisions\n assert not ddf2.known_divisions\n\n a, b = _maybe_align_partitions([ddf, ddf2])\n assert a is ddf\n assert b is ddf2\n\n # Both known, different divisions\n ddf = dd.from_pandas(df + 1, npartitions=2)\n ddf2 = dd.from_pandas(df, npartitions=3)\n\n a, b = _maybe_align_partitions([ddf, ddf2])\n assert a.divisions == b.divisions\n\n # Both unknown, different divisions\n ddf = dd.from_pandas(df + 1, npartitions=2, sort=False)\n ddf2 = dd.from_pandas(df, npartitions=3, sort=False)\n assert not ddf.known_divisions\n assert not ddf2.known_divisions\n\n with pytest.raises(ValueError):\n _maybe_align_partitions([ddf, ddf2])\n\n # One known, one unknown\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = dd.from_pandas(df, npartitions=2, sort=False)\n assert not ddf2.known_divisions\n\n with pytest.raises(ValueError):\n _maybe_align_partitions([ddf, ddf2])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_indexed_dataframe_to_indexed_dataframe_test_merge_indexed_dataframe_to_indexed_dataframe.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_indexed_dataframe_to_indexed_dataframe_test_merge_indexed_dataframe_to_indexed_dataframe.None_9", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 170, "end_line": 202, "span_ids": ["test_merge_indexed_dataframe_to_indexed_dataframe"], "tokens": 440}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_indexed_dataframe_to_indexed_dataframe():\n A = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6]}, index=[1, 2, 3, 4, 6, 7])\n a = dd.repartition(A, [1, 4, 7])\n\n B = pd.DataFrame({\"y\": list(\"abcdef\")}, index=[1, 2, 4, 5, 6, 8])\n b = dd.repartition(B, [1, 2, 5, 8])\n\n c = merge_indexed_dataframes(a, b, how=\"left\")\n assert c.divisions[0] == a.divisions[0]\n assert c.divisions[-1] == max(a.divisions + b.divisions)\n assert_eq(c, A.join(B))\n\n c = merge_indexed_dataframes(a, b, how=\"right\")\n assert c.divisions[0] == b.divisions[0]\n assert c.divisions[-1] == b.divisions[-1]\n assert_eq(c, A.join(B, how=\"right\"))\n\n c = merge_indexed_dataframes(a, b, how=\"inner\")\n assert c.divisions[0] == 1\n assert c.divisions[-1] == max(a.divisions + b.divisions)\n assert_eq(c.compute(), A.join(B, how=\"inner\"))\n\n c = merge_indexed_dataframes(a, b, how=\"outer\")\n assert c.divisions[0] == 1\n assert c.divisions[-1] == 8\n assert_eq(c.compute(), A.join(B, how=\"outer\"))\n\n assert sorted(merge_indexed_dataframes(a, b, how=\"inner\").dask) == sorted(\n merge_indexed_dataframes(a, b, how=\"inner\").dask\n )\n assert sorted(merge_indexed_dataframes(a, b, how=\"inner\").dask) != sorted(\n merge_indexed_dataframes(a, b, how=\"outer\").dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_list_eq_list_eq.dd__compat_assert_numpy_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_list_eq_list_eq.dd__compat_assert_numpy_a", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 223, "span_ids": ["list_eq"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def list_eq(aa, bb):\n if isinstance(aa, dd.DataFrame):\n a = aa.compute(scheduler=\"sync\")\n else:\n a = aa\n if isinstance(bb, dd.DataFrame):\n b = bb.compute(scheduler=\"sync\")\n else:\n b = bb\n tm.assert_index_equal(a.columns, b.columns)\n\n if isinstance(a, pd.DataFrame):\n av = a.sort_values(list(a.columns)).values\n bv = b.sort_values(list(b.columns)).values\n else:\n av = a.sort_values().values\n bv = b.sort_values().values\n\n dd._compat.assert_numpy_array_equal(av, bv)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_hash_join_test_hash_join.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_hash_join_test_hash_join.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 226, "end_line": 256, "span_ids": ["test_hash_join"], "tokens": 391}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"left\", \"right\", \"outer\"])\ndef test_hash_join(how, shuffle_method):\n A = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": [1, 1, 2, 2, 3, 4]})\n a = dd.repartition(A, [0, 4, 5])\n\n B = pd.DataFrame({\"y\": [1, 3, 4, 4, 5, 6], \"z\": [6, 5, 4, 3, 2, 1]})\n b = dd.repartition(B, [0, 2, 5])\n\n c = hash_join(a, \"y\", b, \"y\", how)\n\n result = c.compute()\n expected = pd.merge(A, B, how, \"y\")\n list_eq(result, expected)\n\n # Different columns and npartitions\n c = hash_join(a, \"x\", b, \"z\", \"outer\", npartitions=3, shuffle=shuffle_method)\n assert c.npartitions == 3\n\n result = c.compute(scheduler=\"single-threaded\")\n expected = pd.merge(A, B, \"outer\", None, \"x\", \"z\")\n\n list_eq(result, expected)\n\n assert (\n hash_join(a, \"y\", b, \"y\", \"inner\", shuffle=shuffle_method)._name\n == hash_join(a, \"y\", b, \"y\", \"inner\", shuffle=shuffle_method)._name\n )\n assert (\n hash_join(a, \"y\", b, \"y\", \"inner\", shuffle=shuffle_method)._name\n != hash_join(a, \"y\", b, \"y\", \"outer\", shuffle=shuffle_method)._name\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_sequential_joins_test_sequential_joins.assert_eq_multi_join_pd_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_sequential_joins_test_sequential_joins.assert_eq_multi_join_pd_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 260, "end_line": 278, "span_ids": ["test_sequential_joins"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sequential_joins():\n # Pandas version of multiple inner joins\n df1 = pd.DataFrame(\n {\"key\": list(range(6)), \"A\": [\"A0\", \"A1\", \"A2\", \"A3\", \"A4\", \"A5\"]}\n )\n df2 = pd.DataFrame({\"key\": list(range(4)), \"B\": [\"B0\", \"B1\", \"B2\", \"B3\"]})\n df3 = pd.DataFrame({\"key\": list(range(1, 5)), \"C\": [\"C0\", \"C1\", \"C2\", \"C3\"]})\n\n join_pd = df1.join(df2, how=\"inner\", lsuffix=\"_l\", rsuffix=\"_r\")\n multi_join_pd = join_pd.join(df3, how=\"inner\", lsuffix=\"_l\", rsuffix=\"_r\")\n\n # Dask version of multiple inner joins\n ddf1 = dd.from_pandas(df1, npartitions=3)\n ddf2 = dd.from_pandas(df2, npartitions=2)\n ddf3 = dd.from_pandas(df3, npartitions=2)\n\n join_dd = ddf1.join(ddf2, how=\"inner\", lsuffix=\"_l\", rsuffix=\"_r\")\n multi_join_dd = join_dd.join(ddf3, how=\"inner\", lsuffix=\"_l\", rsuffix=\"_r\")\n assert_eq(multi_join_pd, multi_join_dd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_test_merge_asof_indexed.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_test_merge_asof_indexed.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 281, "end_line": 296, "span_ids": ["test_merge_asof_indexed"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_indexed():\n A = pd.DataFrame(\n {\"left_val\": list(\"abcd\" * 3)},\n index=[1, 3, 7, 9, 10, 13, 14, 17, 20, 24, 25, 28],\n )\n a = dd.from_pandas(A, npartitions=4)\n B = pd.DataFrame(\n {\"right_val\": list(\"xyz\" * 4)},\n index=[1, 2, 3, 6, 7, 10, 12, 14, 16, 19, 23, 26],\n )\n b = dd.from_pandas(B, npartitions=3)\n\n C = pd.merge_asof(A, B, left_index=True, right_index=True)\n c = dd.merge_asof(a, b, left_index=True, right_index=True)\n\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_basic_test_merge_asof_on_basic.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_basic_test_merge_asof_on_basic.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 299, "end_line": 307, "span_ids": ["test_merge_asof_on_basic"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_on_basic():\n A = pd.DataFrame({\"a\": [1, 5, 10], \"left_val\": [\"a\", \"b\", \"c\"]})\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"a\": [1, 2, 3, 6, 7], \"right_val\": [1, 2, 3, 6, 7]})\n b = dd.from_pandas(B, npartitions=2)\n\n C = pd.merge_asof(A, B, on=\"a\")\n c = dd.merge_asof(a, b, on=\"a\")\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_test_merge_asof_on.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_test_merge_asof_on.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 310, "end_line": 324, "span_ids": ["test_merge_asof_on"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"allow_exact_matches\", [True, False])\n@pytest.mark.parametrize(\"direction\", [\"backward\", \"forward\", \"nearest\"])\ndef test_merge_asof_on(allow_exact_matches, direction):\n A = pd.DataFrame({\"a\": [1, 5, 10], \"left_val\": [\"a\", \"b\", \"c\"]})\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"a\": [1, 2, 3, 6, 7], \"right_val\": [1, 2, 3, 6, 7]})\n b = dd.from_pandas(B, npartitions=2)\n\n C = pd.merge_asof(\n A, B, on=\"a\", allow_exact_matches=allow_exact_matches, direction=direction\n )\n c = dd.merge_asof(\n a, b, on=\"a\", allow_exact_matches=allow_exact_matches, direction=direction\n )\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_left_on_right_index_test_merge_asof_left_on_right_index.for_nparts_in_1_2_3_.for_a1_idx2_in_.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_left_on_right_index_test_merge_asof_left_on_right_index.for_nparts_in_1_2_3_.for_a1_idx2_in_.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 326, "end_line": 389, "span_ids": ["test_merge_asof_left_on_right_index"], "tokens": 622}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"allow_exact_matches\", [True, False])\n@pytest.mark.parametrize(\"direction\", [\"backward\", \"forward\", \"nearest\"])\n@pytest.mark.parametrize(\"unknown_divisions\", [False, True])\ndef test_merge_asof_left_on_right_index(\n allow_exact_matches, direction, unknown_divisions\n):\n A = pd.DataFrame({\"a\": [1, 5, 10], \"left_val\": [\"a\", \"b\", \"c\"]}, index=[10, 20, 30])\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"right_val\": [2, 3, 6, 7]}, index=[2, 3, 6, 7])\n b = dd.from_pandas(B, npartitions=2)\n\n if unknown_divisions:\n a.divisions = (None,) * len(a.divisions)\n\n C = pd.merge_asof(\n A,\n B,\n left_on=\"a\",\n right_index=True,\n allow_exact_matches=allow_exact_matches,\n direction=direction,\n )\n c = dd.merge_asof(\n a,\n b,\n left_on=\"a\",\n right_index=True,\n allow_exact_matches=allow_exact_matches,\n direction=direction,\n )\n assert_eq(c, C)\n\n for nparts in [1, 2, 3]:\n for a1, idx2 in (\n ([5, 10, 15, 20], [1, 2, 3, 4]),\n ([1, 2, 3, 4], [5, 10, 15, 20]),\n ([5, 5, 10, 10, 15, 15], [4, 5, 6, 9, 10, 11, 14, 15, 16]),\n ([5, 10, 15], [4, 4, 5, 5, 6, 6, 9, 9, 10, 10, 11, 11]),\n ):\n A = pd.DataFrame({\"a\": a1}, index=[x * 10 for x in a1])\n a = dd.from_pandas(A, npartitions=nparts)\n B = pd.DataFrame({\"b\": idx2}, index=idx2)\n b = dd.from_pandas(B, npartitions=nparts)\n\n if unknown_divisions:\n a.divisions = (None,) * len(a.divisions)\n\n C = pd.merge_asof(\n A,\n B,\n left_on=\"a\",\n right_index=True,\n allow_exact_matches=allow_exact_matches,\n direction=direction,\n )\n c = dd.merge_asof(\n a,\n b,\n left_on=\"a\",\n right_index=True,\n allow_exact_matches=allow_exact_matches,\n direction=direction,\n )\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_two_partitions_test_merge_asof_indexed_two_partitions.assert_eq_c_C_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_indexed_two_partitions_test_merge_asof_indexed_two_partitions.assert_eq_c_C_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 393, "end_line": 401, "span_ids": ["test_merge_asof_indexed_two_partitions"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_indexed_two_partitions():\n A = pd.DataFrame({\"left_val\": [\"a\", \"b\", \"c\"]}, index=[1, 5, 10])\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"right_val\": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])\n b = dd.from_pandas(B, npartitions=2)\n\n C = pd.merge_asof(A, B, left_index=True, right_index=True)\n c = dd.merge_asof(a, b, left_index=True, right_index=True)\n assert_eq(c, C)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_test_merge_asof_on_by.assert_eq_c_C_check_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_test_merge_asof_on_by.assert_eq_c_C_check_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 404, "end_line": 453, "span_ids": ["test_merge_asof_on_by"], "tokens": 649}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_on_by():\n times_A = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.030\",\n \"2016-05-25 13:30:00.041\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.049\",\n \"2016-05-25 13:30:00.072\",\n \"2016-05-25 13:30:00.075\",\n ]\n ]\n tickers_A = [\"GOOG\", \"MSFT\", \"MSFT\", \"MSFT\", \"GOOG\", \"AAPL\", \"GOOG\", \"MSFT\"]\n bids_A = [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01]\n asks_A = [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]\n times_B = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.038\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n ]\n ]\n tickers_B = [\"MSFT\", \"MSFT\", \"GOOG\", \"GOOG\", \"AAPL\"]\n prices_B = [51.95, 51.95, 720.77, 720.92, 98.00]\n quantities_B = [75, 155, 100, 100, 100]\n\n A = pd.DataFrame(\n {\"time\": times_A, \"ticker\": tickers_A, \"bid\": bids_A, \"ask\": asks_A},\n columns=[\"time\", \"ticker\", \"bid\", \"ask\"],\n )\n a = dd.from_pandas(A, npartitions=4)\n B = pd.DataFrame(\n {\n \"time\": times_B,\n \"ticker\": tickers_B,\n \"price\": prices_B,\n \"quantity\": quantities_B,\n },\n columns=[\"time\", \"ticker\", \"price\", \"quantity\"],\n )\n b = dd.from_pandas(B, npartitions=3)\n\n C = pd.merge_asof(B, A, on=\"time\", by=\"ticker\")\n c = dd.merge_asof(b, a, on=\"time\", by=\"ticker\")\n assert_eq(c, C, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_test_merge_asof_on_by_tolerance.assert_eq_c_C_check_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_test_merge_asof_on_by_tolerance.assert_eq_c_C_check_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 456, "end_line": 505, "span_ids": ["test_merge_asof_on_by_tolerance"], "tokens": 668}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_on_by_tolerance():\n times_A = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.030\",\n \"2016-05-25 13:30:00.041\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.049\",\n \"2016-05-25 13:30:00.072\",\n \"2016-05-25 13:30:00.075\",\n ]\n ]\n tickers_A = [\"GOOG\", \"MSFT\", \"MSFT\", \"MSFT\", \"GOOG\", \"AAPL\", \"GOOG\", \"MSFT\"]\n bids_A = [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01]\n asks_A = [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]\n times_B = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.038\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n ]\n ]\n tickers_B = [\"MSFT\", \"MSFT\", \"GOOG\", \"GOOG\", \"AAPL\"]\n prices_B = [51.95, 51.95, 720.77, 720.92, 98.00]\n quantities_B = [75, 155, 100, 100, 100]\n\n A = pd.DataFrame(\n {\"time\": times_A, \"ticker\": tickers_A, \"bid\": bids_A, \"ask\": asks_A},\n columns=[\"time\", \"ticker\", \"bid\", \"ask\"],\n )\n a = dd.from_pandas(A, npartitions=4)\n B = pd.DataFrame(\n {\n \"time\": times_B,\n \"ticker\": tickers_B,\n \"price\": prices_B,\n \"quantity\": quantities_B,\n },\n columns=[\"time\", \"ticker\", \"price\", \"quantity\"],\n )\n b = dd.from_pandas(B, npartitions=3)\n\n C = pd.merge_asof(B, A, on=\"time\", by=\"ticker\", tolerance=pd.Timedelta(\"2ms\"))\n c = dd.merge_asof(b, a, on=\"time\", by=\"ticker\", tolerance=pd.Timedelta(\"2ms\"))\n assert_eq(c, C, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_no_exact_matches_test_merge_asof_on_by_tolerance_no_exact_matches.assert_eq_c_C_check_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_on_by_tolerance_no_exact_matches_test_merge_asof_on_by_tolerance_no_exact_matches.assert_eq_c_C_check_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 508, "end_line": 571, "span_ids": ["test_merge_asof_on_by_tolerance_no_exact_matches"], "tokens": 699}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_on_by_tolerance_no_exact_matches():\n times_A = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.030\",\n \"2016-05-25 13:30:00.041\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.049\",\n \"2016-05-25 13:30:00.072\",\n \"2016-05-25 13:30:00.075\",\n ]\n ]\n tickers_A = [\"GOOG\", \"MSFT\", \"MSFT\", \"MSFT\", \"GOOG\", \"AAPL\", \"GOOG\", \"MSFT\"]\n bids_A = [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01]\n asks_A = [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]\n times_B = [\n pd.to_datetime(d)\n for d in [\n \"2016-05-25 13:30:00.023\",\n \"2016-05-25 13:30:00.038\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n \"2016-05-25 13:30:00.048\",\n ]\n ]\n tickers_B = [\"MSFT\", \"MSFT\", \"GOOG\", \"GOOG\", \"AAPL\"]\n prices_B = [51.95, 51.95, 720.77, 720.92, 98.00]\n quantities_B = [75, 155, 100, 100, 100]\n\n A = pd.DataFrame(\n {\"time\": times_A, \"ticker\": tickers_A, \"bid\": bids_A, \"ask\": asks_A},\n columns=[\"time\", \"ticker\", \"bid\", \"ask\"],\n )\n a = dd.from_pandas(A, npartitions=4)\n B = pd.DataFrame(\n {\n \"time\": times_B,\n \"ticker\": tickers_B,\n \"price\": prices_B,\n \"quantity\": quantities_B,\n },\n columns=[\"time\", \"ticker\", \"price\", \"quantity\"],\n )\n b = dd.from_pandas(B, npartitions=3)\n\n C = pd.merge_asof(\n B,\n A,\n on=\"time\",\n by=\"ticker\",\n tolerance=pd.Timedelta(\"10ms\"),\n allow_exact_matches=False,\n )\n c = dd.merge_asof(\n b,\n a,\n on=\"time\",\n by=\"ticker\",\n tolerance=pd.Timedelta(\"10ms\"),\n allow_exact_matches=False,\n )\n assert_eq(c, C, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_unsorted_raises_test_merge_asof_unsorted_raises.with_pytest_raises_ValueE.result_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_unsorted_raises_test_merge_asof_unsorted_raises.with_pytest_raises_ValueE.result_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 574, "end_line": 583, "span_ids": ["test_merge_asof_unsorted_raises"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_unsorted_raises():\n A = pd.DataFrame({\"a\": [1, 5, 10], \"left_val\": [\"a\", \"b\", \"c\"]})\n a = dd.from_pandas(A, npartitions=2)\n B = pd.DataFrame({\"a\": [2, 1, 3, 6, 7], \"right_val\": [1, 2, 3, 6, 7]})\n b = dd.from_pandas(B, npartitions=2)\n\n result = dd.merge_asof(a, b, on=\"a\")\n # raise at runtime\n with pytest.raises(ValueError, match=\"right keys\"):\n result.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_indexed_concat_test_indexed_concat.with_warnings_catch_warni.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_indexed_concat_test_indexed_concat.with_warnings_catch_warni.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 586, "end_line": 607, "span_ids": ["test_indexed_concat"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"join\", [\"inner\", \"outer\"])\ndef test_indexed_concat(join):\n A = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 6, 7], \"y\": list(\"abcdef\")}, index=[1, 2, 3, 4, 6, 7]\n )\n a = dd.repartition(A, [1, 4, 7])\n\n B = pd.DataFrame({\"x\": [10, 20, 40, 50, 60, 80]}, index=[1, 2, 4, 5, 6, 8])\n b = dd.repartition(B, [1, 2, 5, 8])\n\n expected = pd.concat([A, B], axis=0, join=join, sort=False)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n result = concat_indexed_dataframes([a, b], join=join)\n assert_eq(result, expected)\n assert sorted(concat_indexed_dataframes([a, b], join=join).dask) == sorted(\n concat_indexed_dataframes([a, b], join=join).dask\n )\n assert sorted(concat_indexed_dataframes([a, b], join=\"inner\").dask) != sorted(\n concat_indexed_dataframes([a, b], join=\"outer\").dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_test_concat.None_1.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_test_concat.None_1.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 610, "end_line": 649, "span_ids": ["test_concat"], "tokens": 521}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"join\", [\"inner\", \"outer\"])\ndef test_concat(join):\n pdf1 = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 6, 7], \"y\": list(\"abcdef\")}, index=[1, 2, 3, 4, 6, 7]\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n pdf2 = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 6, 7], \"y\": list(\"abcdef\")}, index=[8, 9, 10, 11, 12, 13]\n )\n ddf2 = dd.from_pandas(pdf2, 2)\n\n # different columns\n pdf3 = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 6, 7], \"z\": list(\"abcdef\")}, index=[8, 9, 10, 11, 12, 13]\n )\n ddf3 = dd.from_pandas(pdf3, 2)\n\n kwargs = {\"sort\": False}\n\n for (dd1, dd2, pd1, pd2) in [\n (ddf1, ddf2, pdf1, pdf2),\n (ddf1, ddf3, pdf1, pdf3),\n ]:\n\n expected = pd.concat([pd1, pd2], join=join, **kwargs)\n result = dd.concat([dd1, dd2], join=join, **kwargs)\n assert_eq(result, expected)\n\n # test outer only, inner has a problem on pandas side\n for (dd1, dd2, pd1, pd2) in [\n (ddf1, ddf2, pdf1, pdf2),\n (ddf1, ddf3, pdf1, pdf3),\n (ddf1.x, ddf2.x, pdf1.x, pdf2.x),\n (ddf1.x, ddf3.z, pdf1.x, pdf3.z),\n (ddf1.x, ddf2.x, pdf1.x, pdf2.x),\n (ddf1.x, ddf3.z, pdf1.x, pdf3.z),\n ]:\n expected = pd.concat([pd1, pd2], **kwargs)\n result = dd.concat([dd1, dd2], **kwargs)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_different_dtypes_test_concat_different_dtypes.assert_dask_dtypes_pa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_different_dtypes_test_concat_different_dtypes.assert_dask_dtypes_pa", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 652, "end_line": 679, "span_ids": ["test_concat_different_dtypes"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"value_1, value_2\",\n [\n (1.0, 1),\n (1.0, \"one\"),\n (1.0, pd.to_datetime(\"1970-01-01\")),\n (1, \"one\"),\n (1, pd.to_datetime(\"1970-01-01\")),\n (\"one\", pd.to_datetime(\"1970-01-01\")),\n ],\n)\ndef test_concat_different_dtypes(value_1, value_2):\n # check that the resulting dataframe has coherent dtypes\n # refer to https://github.com/dask/dask/issues/4685 and\n # https://github.com/dask/dask/issues/5968\n df_1 = pd.DataFrame({\"x\": [value_1]})\n df_2 = pd.DataFrame({\"x\": [value_2]})\n df = pd.concat([df_1, df_2], axis=0)\n\n pandas_dtype = df[\"x\"].dtype\n\n ddf_1 = dd.from_pandas(df_1, npartitions=1)\n ddf_2 = dd.from_pandas(df_2, npartitions=1)\n ddf = dd.concat([ddf_1, ddf_2], axis=0)\n\n dask_dtypes = list(ddf.map_partitions(lambda x: x.dtypes).compute())\n\n assert dask_dtypes == [pandas_dtype, pandas_dtype]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_columns_dtypes_test_merge_columns_dtypes.assert_has_nans_and_warn": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_columns_dtypes_test_merge_columns_dtypes.assert_has_nans_and_warn", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 733, "end_line": 767, "span_ids": ["test_merge_columns_dtypes"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\n@pytest.mark.parametrize(\"on_index\", [True, False])\ndef test_merge_columns_dtypes(how, on_index):\n # tests results of merges with merge columns having different dtypes;\n # asserts that either the merge was successful or the corresponding warning is raised\n # addresses issue #4574\n\n df1 = pd.DataFrame(\n {\"A\": list(np.arange(5).astype(float)) * 2, \"B\": list(np.arange(5)) * 2}\n )\n df2 = pd.DataFrame({\"A\": np.arange(5), \"B\": np.arange(5)})\n\n a = dd.from_pandas(df1, 2) # merge column \"A\" is float\n b = dd.from_pandas(df2, 2) # merge column \"A\" is int\n\n on = [\"A\"]\n left_index = right_index = on_index\n\n if on_index:\n a = a.set_index(\"A\")\n b = b.set_index(\"A\")\n on = None\n\n with warnings.catch_warnings(record=True) as record:\n warnings.simplefilter(\"always\")\n result = dd.merge(\n a, b, on=on, how=how, left_index=left_index, right_index=right_index\n )\n warned = any(\"merge column data type mismatches\" in str(r) for r in record)\n\n # result type depends on merge operation -> convert to pandas\n result = result if isinstance(result, pd.DataFrame) else result.compute()\n\n has_nans = result.isna().values.any()\n assert (has_nans and warned) or not has_nans", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_test_merge._pd_merge_A_B_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_test_merge._pd_merge_A_B_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 771, "end_line": 850, "span_ids": ["test_merge"], "tokens": 801}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_merge(how, shuffle_method):\n A = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": [1, 1, 2, 2, 3, 4]})\n a = dd.repartition(A, [0, 4, 5])\n\n B = pd.DataFrame({\"y\": [1, 3, 4, 4, 5, 6], \"z\": [6, 5, 4, 3, 2, 1]})\n b = dd.repartition(B, [0, 2, 5])\n\n assert_eq(\n dd.merge(\n a, b, left_index=True, right_index=True, how=how, shuffle=shuffle_method\n ),\n pd.merge(A, B, left_index=True, right_index=True, how=how),\n )\n\n result = dd.merge(a, b, on=\"y\", how=how)\n list_eq(result, pd.merge(A, B, on=\"y\", how=how))\n assert all(d is None for d in result.divisions)\n\n list_eq(\n dd.merge(a, b, left_on=\"x\", right_on=\"z\", how=how, shuffle=shuffle_method),\n pd.merge(A, B, left_on=\"x\", right_on=\"z\", how=how),\n )\n list_eq(\n dd.merge(\n a,\n b,\n left_on=\"x\",\n right_on=\"z\",\n how=how,\n suffixes=(\"1\", \"2\"),\n shuffle=shuffle_method,\n ),\n pd.merge(A, B, left_on=\"x\", right_on=\"z\", how=how, suffixes=(\"1\", \"2\")),\n )\n\n list_eq(dd.merge(a, b, how=how, shuffle=shuffle_method), pd.merge(A, B, how=how))\n list_eq(dd.merge(a, B, how=how, shuffle=shuffle_method), pd.merge(A, B, how=how))\n list_eq(dd.merge(A, b, how=how, shuffle=shuffle_method), pd.merge(A, B, how=how))\n list_eq(dd.merge(A, B, how=how, shuffle=shuffle_method), pd.merge(A, B, how=how))\n\n list_eq(\n dd.merge(\n a, b, left_index=True, right_index=True, how=how, shuffle=shuffle_method\n ),\n pd.merge(A, B, left_index=True, right_index=True, how=how),\n )\n list_eq(\n dd.merge(\n a,\n b,\n left_index=True,\n right_index=True,\n how=how,\n suffixes=(\"1\", \"2\"),\n shuffle=shuffle_method,\n ),\n pd.merge(A, B, left_index=True, right_index=True, how=how, suffixes=(\"1\", \"2\")),\n )\n\n list_eq(\n dd.merge(a, b, left_on=\"x\", right_index=True, how=how, shuffle=shuffle_method),\n pd.merge(A, B, left_on=\"x\", right_index=True, how=how),\n )\n list_eq(\n dd.merge(\n a,\n b,\n left_on=\"x\",\n right_index=True,\n how=how,\n suffixes=(\"1\", \"2\"),\n shuffle=shuffle_method,\n ),\n pd.merge(A, B, left_on=\"x\", right_index=True, how=how, suffixes=(\"1\", \"2\")),\n )\n\n # pandas result looks buggy\n # list_eq(dd.merge(a, B, left_index=True, right_on='y'),\n # pd.merge(A, B, left_index=True, right_on='y'))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_semi_anti_cudf_test_merge_tasks_semi_anti_cudf.assert_eq_result_expect_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_semi_anti_cudf_test_merge_tasks_semi_anti_cudf.assert_eq_result_expect_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 853, "end_line": 902, "span_ids": ["test_merge_tasks_semi_anti_cudf"], "tokens": 529}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"parts\", [(3, 3), (3, 1), (1, 3)])\n@pytest.mark.parametrize(\"how\", [\"leftsemi\", \"leftanti\"])\n@pytest.mark.parametrize(\n \"engine\",\n [\n pytest.param(\n \"pandas\",\n marks=pytest.mark.xfail(\n reason=\"Pandas does not support leftsemi or leftanti\"\n ),\n ),\n pytest.param(\"cudf\", marks=pytest.mark.gpu),\n ],\n)\ndef test_merge_tasks_semi_anti_cudf(engine, how, parts):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n emp = pd.DataFrame(\n {\n \"emp_id\": np.arange(101, stop=106),\n \"name\": [\"John\", \"Tom\", \"Harry\", \"Rahul\", \"Sakil\"],\n \"city\": [\"Cal\", \"Mum\", \"Del\", \"Ban\", \"Del\"],\n \"salary\": [50000, 40000, 80000, 60000, 90000],\n }\n )\n skills = pd.DataFrame(\n {\n \"skill_id\": [404, 405, 406, 407, 408],\n \"emp_id\": [103, 101, 105, 102, 101],\n \"skill_name\": [\"Dask\", \"Spark\", \"C\", \"Python\", \"R\"],\n }\n )\n\n if engine == \"cudf\":\n emp = cudf.from_pandas(emp)\n skills = cudf.from_pandas(skills)\n dd_emp = dask_cudf.from_cudf(emp, npartitions=parts[0])\n dd_skills = dask_cudf.from_cudf(skills, npartitions=parts[1])\n else:\n dd_emp = dd.from_pandas(emp, npartitions=parts[0])\n dd_skills = dd.from_pandas(skills, npartitions=parts[1])\n\n expect = emp.merge(skills, on=\"emp_id\", how=how).sort_values([\"emp_id\"])\n result = dd_emp.merge(dd_skills, on=\"emp_id\", how=how).sort_values([\"emp_id\"])\n assert_eq(result, expect, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_passes_through_test_merge_tasks_passes_through.assert_not_any_partd_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_passes_through_test_merge_tasks_passes_through.assert_not_any_partd_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 851, "end_line": 860, "span_ids": ["test_merge_tasks_passes_through"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_tasks_passes_through():\n a = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n b = pd.DataFrame({\"c\": [1, 2, 3, 4, 5, 6, 7], \"d\": [7, 6, 5, 4, 3, 2, 1]})\n\n aa = dd.from_pandas(a, npartitions=3)\n bb = dd.from_pandas(b, npartitions=2)\n\n cc = aa.merge(bb, left_on=\"a\", right_on=\"d\", shuffle=\"tasks\")\n\n assert not any(\"partd\" in k[0] for k in cc.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns_test_merge_by_index_patterns.pd_merge.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns_test_merge_by_index_patterns.pd_merge.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 917, "end_line": 959, "span_ids": ["test_merge_by_index_patterns"], "tokens": 788}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_merge_by_index_patterns(how, shuffle_method):\n\n pdf1l = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n pdf1r = pd.DataFrame({\"c\": [1, 2, 3, 4, 5, 6, 7], \"d\": [7, 6, 5, 4, 3, 2, 1]})\n\n pdf2l = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf2r = pd.DataFrame(\n {\"c\": [7, 6, 5, 4, 3, 2, 1], \"d\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n\n pdf3l = pdf2l\n pdf3r = pd.DataFrame({\"c\": [6, 7, 8, 9], \"d\": [5, 4, 3, 2]}, index=list(\"abdg\"))\n\n pdf4l = pdf2l\n pdf4r = pd.DataFrame({\"c\": [9, 10, 11, 12], \"d\": [5, 4, 3, 2]}, index=list(\"abdg\"))\n\n # completely different index\n pdf5l = pd.DataFrame(\n {\"a\": [1, 1, 2, 2, 3, 3, 4], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"lmnopqr\")\n )\n pdf5r = pd.DataFrame({\"c\": [1, 1, 1, 1], \"d\": [5, 4, 3, 2]}, index=list(\"abcd\"))\n\n pdf6l = pd.DataFrame(\n {\"a\": [1, 1, 2, 2, 3, 3, 4], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"cdefghi\")\n )\n pdf6r = pd.DataFrame({\"c\": [1, 2, 1, 2], \"d\": [5, 4, 3, 2]}, index=list(\"abcd\"))\n\n pdf7l = pd.DataFrame(\n {\"a\": [1, 1, 2, 2, 3, 3, 4], \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf7r = pd.DataFrame({\"c\": [5, 6, 7, 8], \"d\": [5, 4, 3, 2]}, index=list(\"fghi\"))\n\n def pd_merge(left, right, **kwargs):\n # Workaround pandas bug where output dtype of empty index will be int64\n # even if input was object.\n out = pd.merge(left, right, **kwargs)\n if len(out) == 0:\n return out.set_index(out.index.astype(left.index.dtype))\n return out\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns.for_pdl_pdr_in__test_merge_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_index_patterns.for_pdl_pdr_in__test_merge_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_.None_15", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 961, "end_line": 1129, "span_ids": ["test_merge_by_index_patterns"], "tokens": 1203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\n@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_merge_by_index_patterns(how, shuffle_method):\n # ... other code\n\n for pdl, pdr in [\n (pdf1l, pdf1r),\n (pdf2l, pdf2r),\n (pdf3l, pdf3r),\n (pdf4l, pdf4r),\n (pdf5l, pdf5r),\n (pdf6l, pdf6r),\n (pdf7l, pdf7r),\n ]:\n\n for lpart, rpart in [\n (2, 2), # same partition\n (3, 2), # left npartition > right npartition\n (2, 3),\n ]: # left npartition < right npartition\n\n ddl = dd.from_pandas(pdl, lpart)\n ddr = dd.from_pandas(pdr, rpart)\n\n assert_eq(\n dd.merge(\n ddl,\n ddr,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle_method,\n ),\n pd_merge(pdl, pdr, how=how, left_index=True, right_index=True),\n )\n assert_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle_method,\n ),\n pd_merge(pdr, pdl, how=how, left_index=True, right_index=True),\n )\n\n assert_eq(\n dd.merge(\n ddl,\n ddr,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle_method,\n indicator=True,\n ),\n pd_merge(\n pdl, pdr, how=how, left_index=True, right_index=True, indicator=True\n ),\n )\n assert_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle_method,\n indicator=True,\n ),\n pd_merge(\n pdr, pdl, how=how, left_index=True, right_index=True, indicator=True\n ),\n )\n\n assert_eq(\n ddr.merge(\n ddl,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle_method,\n ),\n pdr.merge(pdl, how=how, left_index=True, right_index=True),\n )\n assert_eq(\n ddl.merge(\n ddr,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle_method,\n ),\n pdl.merge(pdr, how=how, left_index=True, right_index=True),\n )\n\n # hash join\n list_eq(\n dd.merge(\n ddl, ddr, how=how, left_on=\"a\", right_on=\"c\", shuffle=shuffle_method\n ),\n pd.merge(pdl, pdr, how=how, left_on=\"a\", right_on=\"c\"),\n )\n list_eq(\n dd.merge(\n ddl, ddr, how=how, left_on=\"b\", right_on=\"d\", shuffle=shuffle_method\n ),\n pd.merge(pdl, pdr, how=how, left_on=\"b\", right_on=\"d\"),\n )\n\n list_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_on=\"c\",\n right_on=\"a\",\n shuffle=shuffle_method,\n indicator=True,\n ),\n pd.merge(pdr, pdl, how=how, left_on=\"c\", right_on=\"a\", indicator=True),\n )\n list_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_on=\"d\",\n right_on=\"b\",\n shuffle=shuffle_method,\n indicator=True,\n ),\n pd.merge(pdr, pdl, how=how, left_on=\"d\", right_on=\"b\", indicator=True),\n )\n\n list_eq(\n dd.merge(\n ddr, ddl, how=how, left_on=\"c\", right_on=\"a\", shuffle=shuffle_method\n ),\n pd.merge(pdr, pdl, how=how, left_on=\"c\", right_on=\"a\"),\n )\n list_eq(\n dd.merge(\n ddr, ddl, how=how, left_on=\"d\", right_on=\"b\", shuffle=shuffle_method\n ),\n pd.merge(pdr, pdl, how=how, left_on=\"d\", right_on=\"b\"),\n )\n\n list_eq(\n ddl.merge(\n ddr, how=how, left_on=\"a\", right_on=\"c\", shuffle=shuffle_method\n ),\n pdl.merge(pdr, how=how, left_on=\"a\", right_on=\"c\"),\n )\n list_eq(\n ddl.merge(\n ddr, how=how, left_on=\"b\", right_on=\"d\", shuffle=shuffle_method\n ),\n pdl.merge(pdr, how=how, left_on=\"b\", right_on=\"d\"),\n )\n\n list_eq(\n ddr.merge(\n ddl, how=how, left_on=\"c\", right_on=\"a\", shuffle=shuffle_method\n ),\n pdr.merge(pdl, how=how, left_on=\"c\", right_on=\"a\"),\n )\n list_eq(\n ddr.merge(\n ddl, how=how, left_on=\"d\", right_on=\"b\", shuffle=shuffle_method\n ),\n pdr.merge(pdl, how=how, left_on=\"d\", right_on=\"b\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns_test_join_by_index_patterns.pdf7r.pd_DataFrame_c_list_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns_test_join_by_index_patterns.pdf7r.pd_DataFrame_c_list_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1132, "end_line": 1172, "span_ids": ["test_join_by_index_patterns"], "tokens": 610}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_join_by_index_patterns(how, shuffle_method):\n\n # Similar test cases as test_merge_by_index_patterns,\n # but columns / index for join have same dtype\n\n pdf1l = pd.DataFrame(\n {\"a\": list(\"abcdefg\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf1r = pd.DataFrame(\n {\"c\": list(\"abcdefg\"), \"d\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n\n pdf2l = pdf1l\n pdf2r = pd.DataFrame(\n {\"c\": list(\"gfedcba\"), \"d\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n\n pdf3l = pdf1l\n pdf3r = pd.DataFrame({\"c\": list(\"abdg\"), \"d\": [5, 4, 3, 2]}, index=list(\"abdg\"))\n\n pdf4l = pd.DataFrame(\n {\"a\": list(\"abcabce\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf4r = pd.DataFrame({\"c\": list(\"abda\"), \"d\": [5, 4, 3, 2]}, index=list(\"abdg\"))\n\n # completely different index\n pdf5l = pd.DataFrame(\n {\"a\": list(\"lmnopqr\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"lmnopqr\")\n )\n pdf5r = pd.DataFrame({\"c\": list(\"abcd\"), \"d\": [5, 4, 3, 2]}, index=list(\"abcd\"))\n\n pdf6l = pd.DataFrame(\n {\"a\": list(\"cdefghi\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"cdefghi\")\n )\n pdf6r = pd.DataFrame({\"c\": list(\"abab\"), \"d\": [5, 4, 3, 2]}, index=list(\"abcd\"))\n\n pdf7l = pd.DataFrame(\n {\"a\": list(\"aabbccd\"), \"b\": [7, 6, 5, 4, 3, 2, 1]}, index=list(\"abcdefg\")\n )\n pdf7r = pd.DataFrame({\"c\": list(\"aabb\"), \"d\": [5, 4, 3, 2]}, index=list(\"fghi\"))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns.for_pdl_pdr_in__test_join_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_2_._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_by_index_patterns.for_pdl_pdr_in__test_join_by_index_patterns.for_pdl_pdr_in_.for_lpart_rpart_in_2_._", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1174, "end_line": 1229, "span_ids": ["test_join_by_index_patterns"], "tokens": 666}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_join_by_index_patterns(how, shuffle_method):\n # ... other code\n\n for pdl, pdr in [\n (pdf1l, pdf1r),\n (pdf2l, pdf2r),\n (pdf3l, pdf3r),\n (pdf4l, pdf4r),\n (pdf5l, pdf5r),\n (pdf6l, pdf6r),\n (pdf7l, pdf7r),\n ]:\n\n for lpart, rpart in [(2, 2), (3, 2), (2, 3)]:\n\n ddl = dd.from_pandas(pdl, lpart)\n ddr = dd.from_pandas(pdr, rpart)\n\n assert_eq(\n ddl.join(ddr, how=how, shuffle=shuffle_method), pdl.join(pdr, how=how)\n )\n assert_eq(\n ddr.join(ddl, how=how, shuffle=shuffle_method), pdr.join(pdl, how=how)\n )\n\n assert_eq(\n ddl.join(\n ddr, how=how, lsuffix=\"l\", rsuffix=\"r\", shuffle=shuffle_method\n ),\n pdl.join(pdr, how=how, lsuffix=\"l\", rsuffix=\"r\"),\n )\n assert_eq(\n ddr.join(\n ddl, how=how, lsuffix=\"l\", rsuffix=\"r\", shuffle=shuffle_method\n ),\n pdr.join(pdl, how=how, lsuffix=\"l\", rsuffix=\"r\"),\n )\n\n \"\"\"\n # temporary disabled bacause pandas may incorrectly raise\n # IndexError for empty DataFrame\n # https://github.com/pydata/pandas/pull/10826\n\n list_assert_eq(ddl.join(ddr, how=how, on='a', lsuffix='l', rsuffix='r'),\n pdl.join(pdr, how=how, on='a', lsuffix='l', rsuffix='r'))\n\n list_eq(ddr.join(ddl, how=how, on='c', lsuffix='l', rsuffix='r'),\n pdr.join(pdl, how=how, on='c', lsuffix='l', rsuffix='r'))\n\n # merge with index and columns\n list_eq(ddl.merge(ddr, how=how, left_on='a', right_index=True),\n pdl.merge(pdr, how=how, left_on='a', right_index=True))\n list_eq(ddr.merge(ddl, how=how, left_on='c', right_index=True),\n pdr.merge(pdl, how=how, left_on='c', right_index=True))\n list_eq(ddl.merge(ddr, how=how, left_index=True, right_on='c'),\n pdl.merge(pdr, how=how, left_index=True, right_on='c'))\n list_eq(ddr.merge(ddl, how=how, left_index=True, right_on='a'),\n pdr.merge(pdl, how=how, left_index=True, right_on='a'))\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns_test_merge_by_multiple_columns.pdf3r.pd_DataFrame_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns_test_merge_by_multiple_columns.pdf3r.pd_DataFrame_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1246, "end_line": 1298, "span_ids": ["test_merge_by_multiple_columns"], "tokens": 496}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_merge_by_multiple_columns(how, shuffle_method):\n # warnings here from pandas\n pdf1l = pd.DataFrame(\n {\n \"a\": list(\"abcdefghij\"),\n \"b\": list(\"abcdefghij\"),\n \"c\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n },\n index=list(\"abcdefghij\"),\n )\n pdf1r = pd.DataFrame(\n {\n \"d\": list(\"abcdefghij\"),\n \"e\": list(\"abcdefghij\"),\n \"f\": [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],\n },\n index=list(\"abcdefghij\"),\n )\n\n pdf2l = pd.DataFrame(\n {\n \"a\": list(\"abcdeabcde\"),\n \"b\": list(\"abcabcabca\"),\n \"c\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n },\n index=list(\"abcdefghij\"),\n )\n pdf2r = pd.DataFrame(\n {\n \"d\": list(\"edcbaedcba\"),\n \"e\": list(\"aaabbbcccd\"),\n \"f\": [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],\n },\n index=list(\"fghijklmno\"),\n )\n\n pdf3l = pd.DataFrame(\n {\n \"a\": list(\"aaaaaaaaaa\"),\n \"b\": list(\"aaaaaaaaaa\"),\n \"c\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n },\n index=list(\"abcdefghij\"),\n )\n pdf3r = pd.DataFrame(\n {\n \"d\": list(\"aaabbbccaa\"),\n \"e\": list(\"abbbbbbbbb\"),\n \"f\": [10, 9, 8, 7, 6, 5, 4, 3, 2, 1],\n },\n index=list(\"ABCDEFGHIJ\"),\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l__test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l_.for_lpart_rpart_in_2_.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l__test_merge_by_multiple_columns.for_pdl_pdr_in_pdf1l_.for_lpart_rpart_in_2_.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1300, "end_line": 1374, "span_ids": ["test_merge_by_multiple_columns"], "tokens": 633}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_merge_by_multiple_columns(how, shuffle_method):\n # ... other code\n\n for pdl, pdr in [(pdf1l, pdf1r), (pdf2l, pdf2r), (pdf3l, pdf3r)]:\n\n for lpart, rpart in [(2, 2), (3, 2), (2, 3)]:\n\n ddl = dd.from_pandas(pdl, lpart)\n ddr = dd.from_pandas(pdr, rpart)\n\n assert_eq(\n ddl.join(ddr, how=how, shuffle=shuffle_method), pdl.join(pdr, how=how)\n )\n assert_eq(\n ddr.join(ddl, how=how, shuffle=shuffle_method), pdr.join(pdl, how=how)\n )\n\n assert_eq(\n dd.merge(\n ddl,\n ddr,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle_method,\n ),\n pd.merge(pdl, pdr, how=how, left_index=True, right_index=True),\n )\n assert_eq(\n dd.merge(\n ddr,\n ddl,\n how=how,\n left_index=True,\n right_index=True,\n shuffle=shuffle_method,\n ),\n pd.merge(pdr, pdl, how=how, left_index=True, right_index=True),\n )\n\n # hash join\n list_eq(\n dd.merge(\n ddl, ddr, how=how, left_on=\"a\", right_on=\"d\", shuffle=shuffle_method\n ),\n pd.merge(pdl, pdr, how=how, left_on=\"a\", right_on=\"d\"),\n )\n list_eq(\n dd.merge(\n ddl, ddr, how=how, left_on=\"b\", right_on=\"e\", shuffle=shuffle_method\n ),\n pd.merge(pdl, pdr, how=how, left_on=\"b\", right_on=\"e\"),\n )\n\n list_eq(\n dd.merge(\n ddr, ddl, how=how, left_on=\"d\", right_on=\"a\", shuffle=shuffle_method\n ),\n pd.merge(pdr, pdl, how=how, left_on=\"d\", right_on=\"a\"),\n )\n list_eq(\n dd.merge(\n ddr, ddl, how=how, left_on=\"e\", right_on=\"b\", shuffle=shuffle_method\n ),\n pd.merge(pdr, pdl, how=how, left_on=\"e\", right_on=\"b\"),\n )\n\n list_eq(\n dd.merge(\n ddl,\n ddr,\n how=how,\n left_on=[\"a\", \"b\"],\n right_on=[\"d\", \"e\"],\n shuffle=shuffle_method,\n ),\n pd.merge(pdl, pdr, how=how, left_on=[\"a\", \"b\"], right_on=[\"d\", \"e\"]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_melt_test_melt.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_melt_test_melt.None_9", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1268, "end_line": 1298, "span_ids": ["test_melt"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_melt():\n pdf = pd.DataFrame(\n {\"A\": list(\"abcd\") * 5, \"B\": list(\"XY\") * 10, \"C\": np.random.randn(20)}\n )\n ddf = dd.from_pandas(pdf, 4)\n\n list_eq(dd.melt(ddf), pd.melt(pdf))\n\n list_eq(dd.melt(ddf, id_vars=\"C\"), pd.melt(pdf, id_vars=\"C\"))\n list_eq(dd.melt(ddf, value_vars=\"C\"), pd.melt(pdf, value_vars=\"C\"))\n list_eq(\n dd.melt(ddf, value_vars=[\"A\", \"C\"], var_name=\"myvar\"),\n pd.melt(pdf, value_vars=[\"A\", \"C\"], var_name=\"myvar\"),\n )\n list_eq(\n dd.melt(ddf, id_vars=\"B\", value_vars=[\"A\", \"C\"], value_name=\"myval\"),\n pd.melt(pdf, id_vars=\"B\", value_vars=[\"A\", \"C\"], value_name=\"myval\"),\n )\n\n # test again as DataFrame method\n list_eq(ddf.melt(), pdf.melt())\n list_eq(ddf.melt(id_vars=\"C\"), pdf.melt(id_vars=\"C\"))\n list_eq(ddf.melt(value_vars=\"C\"), pdf.melt(value_vars=\"C\"))\n list_eq(\n ddf.melt(value_vars=[\"A\", \"C\"], var_name=\"myvar\"),\n pdf.melt(value_vars=[\"A\", \"C\"], var_name=\"myvar\"),\n )\n list_eq(\n ddf.melt(id_vars=\"B\", value_vars=[\"A\", \"C\"], value_name=\"myval\"),\n pdf.melt(id_vars=\"B\", value_vars=[\"A\", \"C\"], value_name=\"myval\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_inner_merge_with_pandas_object_test_cheap_inner_merge_with_pandas_object.list_eq_da_merge_b_on_x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_inner_merge_with_pandas_object_test_cheap_inner_merge_with_pandas_object.list_eq_da_merge_b_on_x", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1410, "end_line": 1422, "span_ids": ["test_cheap_inner_merge_with_pandas_object"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cheap_inner_merge_with_pandas_object():\n a = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n da = dd.from_pandas(a, npartitions=3)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")})\n\n dc = da.merge(b, on=\"x\", how=\"inner\")\n assert not hlg_layer_topological(dc.dask, -1).is_materialized()\n assert all(\"shuffle\" not in k[0] for k in dc.dask)\n\n list_eq(da.merge(b, on=\"x\", how=\"inner\"), a.merge(b, on=\"x\", how=\"inner\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_divisions_test_cheap_single_partition_merge_divisions.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_divisions_test_cheap_single_partition_merge_divisions.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1446, "end_line": 1463, "span_ids": ["test_cheap_single_partition_merge_divisions"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cheap_single_partition_merge_divisions():\n a = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n aa = dd.from_pandas(a, npartitions=3)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")})\n bb = dd.from_pandas(b, npartitions=1, sort=False)\n\n actual = aa.merge(bb, on=\"x\", how=\"inner\")\n assert not hlg_layer_topological(actual.dask, -1).is_materialized()\n\n assert not actual.known_divisions\n assert_divisions(actual)\n\n actual = bb.merge(aa, on=\"x\", how=\"inner\")\n assert not actual.known_divisions\n assert_divisions(actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_parition_merge_left_right_test_cheap_single_parition_merge_left_right.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_parition_merge_left_right_test_cheap_single_parition_merge_left_right.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1466, "end_line": 1488, "span_ids": ["test_cheap_single_parition_merge_left_right"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"left\", \"right\"])\n@pytest.mark.parametrize(\"flip\", [False, True])\ndef test_cheap_single_parition_merge_left_right(how, flip):\n a = pd.DataFrame({\"x\": range(8), \"z\": list(\"ababbdda\")}, index=range(8))\n aa = dd.from_pandas(a, npartitions=1)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")}, index=range(4))\n bb = dd.from_pandas(b, npartitions=1)\n\n pd_inputs = (b, a) if flip else (a, b)\n inputs = (bb, aa) if flip else (aa, bb)\n\n actual = dd.merge(*inputs, left_index=True, right_on=\"x\", how=how)\n expected = pd.merge(*pd_inputs, left_index=True, right_on=\"x\", how=how)\n\n assert not hlg_layer_topological(actual.dask, -1).is_materialized()\n assert_eq(actual, expected)\n\n actual = dd.merge(*inputs, left_on=\"x\", right_index=True, how=how)\n expected = pd.merge(*pd_inputs, left_on=\"x\", right_index=True, how=how)\n\n assert not hlg_layer_topological(actual.dask, -1).is_materialized()\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_on_index_test_cheap_single_partition_merge_on_index.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_on_index_test_cheap_single_partition_merge_on_index.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1491, "end_line": 1518, "span_ids": ["test_cheap_single_partition_merge_on_index"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cheap_single_partition_merge_on_index():\n a = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n aa = dd.from_pandas(a, npartitions=3)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")})\n bb = dd.from_pandas(b, npartitions=1, sort=False)\n\n actual = aa.merge(bb, left_index=True, right_on=\"x\", how=\"inner\")\n expected = a.merge(b, left_index=True, right_on=\"x\", how=\"inner\")\n\n # Workaround https://github.com/pandas-dev/pandas/issues/26925\n # actual has the correct dtype for the index (Int64). Pandas as object-dtype\n # for empty joins.\n expected.index = expected.index.astype(\"int64\")\n\n assert not hlg_layer_topological(actual.dask, -1).is_materialized()\n assert not actual.known_divisions\n assert_eq(actual, expected)\n\n actual = bb.merge(aa, right_index=True, left_on=\"x\", how=\"inner\")\n expected = b.merge(a, right_index=True, left_on=\"x\", how=\"inner\")\n expected.index = expected.index.astype(\"int64\")\n\n assert not hlg_layer_topological(actual.dask, -1).is_materialized()\n assert not actual.known_divisions\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_maintains_columns_test_merge_maintains_columns.assert_tuple_merged_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_maintains_columns_test_merge_maintains_columns.assert_tuple_merged_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1396, "end_line": 1405, "span_ids": ["test_merge_maintains_columns"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_maintains_columns():\n lhs = pd.DataFrame(\n {\"A\": [1, 2, 3], \"B\": list(\"abc\"), \"C\": \"foo\", \"D\": 1.0}, columns=list(\"DCBA\")\n )\n rhs = pd.DataFrame(\n {\"G\": [4, 5], \"H\": 6.0, \"I\": \"bar\", \"B\": list(\"ab\")}, columns=list(\"GHIB\")\n )\n ddf = dd.from_pandas(lhs, npartitions=1)\n merged = dd.merge(ddf, rhs, on=\"B\").compute()\n assert tuple(merged.columns) == (\"D\", \"C\", \"B\", \"A\", \"G\", \"H\", \"I\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_index_without_divisions_test_merge_index_without_divisions.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_index_without_divisions_test_merge_index_without_divisions.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1533, "end_line": 1542, "span_ids": ["test_merge_index_without_divisions"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_index_without_divisions(shuffle_method):\n a = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]}, index=[1, 2, 3, 4, 5])\n b = pd.DataFrame({\"y\": [1, 2, 3, 4, 5]}, index=[5, 4, 3, 2, 1])\n\n aa = dd.from_pandas(a, npartitions=3, sort=False)\n bb = dd.from_pandas(b, npartitions=2)\n\n result = aa.join(bb, how=\"inner\", shuffle=shuffle_method)\n expected = a.join(b, how=\"inner\")\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_half_indexed_dataframe_avoids_shuffle_test_half_indexed_dataframe_avoids_shuffle.assert_len_cc_dask_500": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_half_indexed_dataframe_avoids_shuffle_test_half_indexed_dataframe_avoids_shuffle.assert_len_cc_dask_500", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1421, "end_line": 1435, "span_ids": ["test_half_indexed_dataframe_avoids_shuffle"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_half_indexed_dataframe_avoids_shuffle():\n a = pd.DataFrame({\"x\": np.random.randint(100, size=1000)})\n b = pd.DataFrame(\n {\"y\": np.random.randint(100, size=100)}, index=np.random.randint(100, size=100)\n )\n\n aa = dd.from_pandas(a, npartitions=100)\n bb = dd.from_pandas(b, npartitions=2)\n\n c = pd.merge(a, b, left_index=True, right_on=\"y\")\n cc = dd.merge(aa, bb, left_index=True, right_on=\"y\", shuffle=\"tasks\")\n\n list_eq(c, cc)\n\n assert len(cc.dask) < 500", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_errors_for_merge_on_frame_columns_test_concat_one_series.assert_isinstance_c_dd_D": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_errors_for_merge_on_frame_columns_test_concat_one_series.assert_isinstance_c_dd_D", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1438, "end_line": 1460, "span_ids": ["test_errors_for_merge_on_frame_columns", "test_concat_one_series"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_errors_for_merge_on_frame_columns():\n a = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]}, index=[1, 2, 3, 4, 5])\n b = pd.DataFrame({\"y\": [1, 2, 3, 4, 5]}, index=[5, 4, 3, 2, 1])\n\n aa = dd.from_pandas(a, npartitions=3, sort=False)\n bb = dd.from_pandas(b, npartitions=2)\n\n with pytest.raises(NotImplementedError):\n dd.merge(aa, bb, left_on=\"x\", right_on=bb.y)\n\n with pytest.raises(NotImplementedError):\n dd.merge(aa, bb, left_on=aa.x, right_on=bb.y)\n\n\ndef test_concat_one_series():\n a = pd.Series([1, 2, 3, 4])\n aa = dd.from_pandas(a, npartitions=2, sort=False)\n\n c = dd.concat([aa], axis=0)\n assert isinstance(c, dd.Series)\n\n c = dd.concat([aa], axis=1)\n assert isinstance(c, dd.DataFrame)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_errors_test_concat_unknown_divisions_errors.with_pytest_raises_ValueE.with_pytest_warns_UserWar.dd_concat_aa_bb_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_errors_test_concat_unknown_divisions_errors.with_pytest_raises_ValueE.with_pytest_warns_UserWar.dd_concat_aa_bb_axis_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1483, "end_line": 1491, "span_ids": ["test_concat_unknown_divisions_errors"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_unknown_divisions_errors():\n a = pd.Series([1, 2, 3, 4, 5, 6])\n b = pd.Series([4, 3, 2, 1])\n aa = dd.from_pandas(a, npartitions=2, sort=False)\n bb = dd.from_pandas(b, npartitions=2, sort=False)\n\n with pytest.raises(ValueError):\n with pytest.warns(UserWarning): # Concat with unknown divisions\n dd.concat([aa, bb], axis=1).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2_test_concat2.assert_dd_concat_a_is_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2_test_concat2.assert_dd_concat_a_is_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1545, "end_line": 1583, "span_ids": ["test_concat2"], "tokens": 615}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat2():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\"}, parent_meta=pd.DataFrame())\n a = dd.DataFrame(dsk, \"x\", meta, [None, None])\n dsk = {\n (\"y\", 0): pd.DataFrame({\"a\": [10, 20, 30], \"b\": [40, 50, 60]}),\n (\"y\", 1): pd.DataFrame({\"a\": [40, 50, 60], \"b\": [30, 20, 10]}),\n (\"y\", 2): pd.DataFrame({\"a\": [70, 80, 90], \"b\": [0, 0, 0]}),\n }\n b = dd.DataFrame(dsk, \"y\", meta, [None, None])\n\n dsk = {\n (\"y\", 0): pd.DataFrame({\"b\": [10, 20, 30], \"c\": [40, 50, 60]}),\n (\"y\", 1): pd.DataFrame({\"b\": [40, 50, 60], \"c\": [30, 20, 10]}),\n }\n meta = make_meta({\"b\": \"i8\", \"c\": \"i8\"}, parent_meta=pd.DataFrame())\n c = dd.DataFrame(dsk, \"y\", meta, [None, None])\n\n dsk = {\n (\"y\", 0): pd.DataFrame(\n {\"b\": [10, 20, 30], \"c\": [40, 50, 60], \"d\": [70, 80, 90]}\n ),\n (\"y\", 1): pd.DataFrame(\n {\"b\": [40, 50, 60], \"c\": [30, 20, 10], \"d\": [90, 80, 70]}, index=[3, 4, 5]\n ),\n }\n meta = make_meta(\n {\"b\": \"i8\", \"c\": \"i8\", \"d\": \"i8\"},\n index=pd.Index([], \"i8\"),\n parent_meta=pd.DataFrame(),\n )\n d = dd.DataFrame(dsk, \"y\", meta, [0, 3, 5])\n\n cases = [[a, b], [a, c], [a, d]]\n assert dd.concat([a]) is a\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2.for_case_in_cases__test_concat2.for_case_in_cases_.None_5.assert_set_result_dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat2.for_case_in_cases__test_concat2.for_case_in_cases_.None_5.assert_set_result_dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1529, "end_line": 1561, "span_ids": ["test_concat2"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat2():\n # ... other code\n for case in cases:\n pdcase = [_c.compute() for _c in case]\n\n with warnings.catch_warnings(record=True) as w:\n expected = pd.concat(pdcase, sort=False)\n\n ctx = FutureWarning if w else None\n\n with pytest.warns(ctx):\n result = dd.concat(case)\n\n assert result.npartitions == case[0].npartitions + case[1].npartitions\n assert result.divisions == (None,) * (result.npartitions + 1)\n assert_eq(expected, result)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n assert set(result.dask) == set(dd.concat(case).dask)\n\n with warnings.catch_warnings(record=True) as w:\n expected = pd.concat(pdcase, join=\"inner\", sort=False)\n\n ctx = FutureWarning if w else None\n\n with pytest.warns(ctx):\n result = dd.concat(case, join=\"inner\")\n assert result.npartitions == case[0].npartitions + case[1].npartitions\n assert result.divisions == (None,) * (result.npartitions + 1)\n assert_eq(result, result)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n assert set(result.dask) == set(dd.concat(case, join=\"inner\").dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat3_test_concat3.None_5.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat3_test_concat3.None_5.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1564, "end_line": 1616, "span_ids": ["test_concat3"], "tokens": 485}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat3():\n pdf1 = pd.DataFrame(\n np.random.randn(6, 5), columns=list(\"ABCDE\"), index=list(\"abcdef\")\n )\n pdf2 = pd.DataFrame(\n np.random.randn(6, 5), columns=list(\"ABCFG\"), index=list(\"ghijkl\")\n )\n pdf3 = pd.DataFrame(\n np.random.randn(6, 5), columns=list(\"ABCHI\"), index=list(\"mnopqr\")\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n ddf2 = dd.from_pandas(pdf2, 3)\n ddf3 = dd.from_pandas(pdf3, 2)\n\n with warnings.catch_warnings(record=True) as w:\n expected = pd.concat([pdf1, pdf2], sort=False)\n\n ctx = FutureWarning if w else None\n\n with pytest.warns(ctx):\n result = dd.concat([ddf1, ddf2])\n\n assert result.divisions == ddf1.divisions[:-1] + ddf2.divisions\n assert result.npartitions == ddf1.npartitions + ddf2.npartitions\n assert_eq(result, expected)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n assert_eq(\n dd.concat([ddf1, ddf2], interleave_partitions=True), pd.concat([pdf1, pdf2])\n )\n\n with warnings.catch_warnings(record=True) as w:\n expected = pd.concat([pdf1, pdf2, pdf3], sort=False)\n\n ctx = FutureWarning if w else None\n\n with pytest.warns(ctx):\n result = dd.concat([ddf1, ddf2, ddf3])\n assert result.divisions == (\n ddf1.divisions[:-1] + ddf2.divisions[:-1] + ddf3.divisions\n )\n assert result.npartitions == (\n ddf1.npartitions + ddf2.npartitions + ddf3.npartitions\n )\n assert_eq(result, expected)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n assert_eq(\n dd.concat([ddf1, ddf2, ddf3], interleave_partitions=True),\n pd.concat([pdf1, pdf2, pdf3]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat4_interleave_partitions_test_concat4_interleave_partitions.assert_msg_in_str_err_val": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat4_interleave_partitions_test_concat4_interleave_partitions.assert_msg_in_str_err_val", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1747, "end_line": 1791, "span_ids": ["test_concat4_interleave_partitions"], "tokens": 385}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat4_interleave_partitions():\n pdf1 = pd.DataFrame(\n np.random.randn(10, 5), columns=list(\"ABCDE\"), index=list(\"abcdefghij\")\n )\n pdf2 = pd.DataFrame(\n np.random.randn(13, 5), columns=list(\"ABCDE\"), index=list(\"fghijklmnopqr\")\n )\n pdf3 = pd.DataFrame(\n np.random.randn(13, 6), columns=list(\"CDEXYZ\"), index=list(\"fghijklmnopqr\")\n )\n\n ddf1 = dd.from_pandas(pdf1, 2)\n ddf2 = dd.from_pandas(pdf2, 3)\n ddf3 = dd.from_pandas(pdf3, 2)\n\n msg = (\n \"All inputs have known divisions which cannot be \"\n \"concatenated in order. Specify \"\n \"interleave_partitions=True to ignore order\"\n )\n\n cases = [\n [ddf1, ddf1],\n [ddf1, ddf2],\n [ddf1, ddf3],\n [ddf2, ddf1],\n [ddf2, ddf3],\n [ddf3, ddf1],\n [ddf3, ddf2],\n ]\n for case in cases:\n pdcase = [c.compute() for c in case]\n\n assert_eq(\n dd.concat(case, interleave_partitions=True), pd.concat(pdcase, sort=False)\n )\n assert_eq(\n dd.concat(case, join=\"inner\", interleave_partitions=True),\n pd.concat(pdcase, join=\"inner\", sort=False),\n )\n\n msg = \"'join' must be 'inner' or 'outer'\"\n with pytest.raises(ValueError) as err:\n dd.concat([ddf1, ddf1], join=\"invalid\", interleave_partitions=True)\n assert msg in str(err.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_test_concat_categorical.if_not_known_.dframes[0]._meta.clear_known_categories_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_test_concat_categorical.if_not_known_.dframes[0]._meta.clear_known_categories_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1889, "end_line": 1937, "span_ids": ["test_concat_categorical"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"known, cat_index, divisions\",\n [\n (True, True, False),\n (True, False, True),\n (True, False, False),\n (False, True, False),\n (False, False, True),\n (False, False, False),\n ],\n)\ndef test_concat_categorical(known, cat_index, divisions):\n frames = [\n pd.DataFrame(\n {\n \"w\": list(\"xxxxx\"),\n \"x\": np.arange(5),\n \"y\": list(\"abcbc\"),\n \"z\": np.arange(5, dtype=\"f8\"),\n }\n ),\n pd.DataFrame(\n {\n \"w\": list(\"yyyyy\"),\n \"x\": np.arange(5, 10),\n \"y\": list(\"abbba\"),\n \"z\": np.arange(5, 10, dtype=\"f8\"),\n }\n ),\n pd.DataFrame(\n {\n \"w\": list(\"zzzzz\"),\n \"x\": np.arange(10, 15),\n \"y\": list(\"bcbcc\"),\n \"z\": np.arange(10, 15, dtype=\"f8\"),\n }\n ),\n ]\n for df in frames:\n df.w = df.w.astype(\"category\")\n df.y = df.y.astype(\"category\")\n\n if cat_index:\n frames = [df.set_index(df.y) for df in frames]\n\n dframes = [dd.from_pandas(p, npartitions=2, sort=divisions) for p in frames]\n\n if not known:\n dframes[0]._meta = clear_known_categories(dframes[0]._meta, [\"y\"], index=True)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.check_and_return_test_concat_categorical.check_and_return.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.check_and_return_test_concat_categorical.check_and_return.return.res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1939, "end_line": 1950, "span_ids": ["test_concat_categorical"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"known, cat_index, divisions\",\n [\n (True, True, False),\n (True, False, True),\n (True, False, False),\n (False, True, False),\n (False, False, True),\n (False, False, False),\n ],\n)\ndef test_concat_categorical(known, cat_index, divisions):\n # ... other code\n\n def check_and_return(ddfs, dfs, join):\n sol = concat(dfs, join=join)\n res = dd.concat(ddfs, join=join, interleave_partitions=divisions)\n assert_eq(res, sol)\n if known:\n parts = compute_as_if_collection(\n dd.DataFrame, res.dask, res.__dask_keys__()\n )\n for p in [i.iloc[:0] for i in parts]:\n res._meta == p # will error if schemas don't align\n assert not cat_index or has_known_categories(res.index) == known\n return res\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.for_join_in_inner_ou_test_concat_categorical.for_join_in_inner_ou.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical.for_join_in_inner_ou_test_concat_categorical.for_join_in_inner_ou.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1952, "end_line": 1973, "span_ids": ["test_concat_categorical"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"known, cat_index, divisions\",\n [\n (True, True, False),\n (True, False, True),\n (True, False, False),\n (False, True, False),\n (False, False, True),\n (False, False, False),\n ],\n)\ndef test_concat_categorical(known, cat_index, divisions):\n # ... other code\n\n for join in [\"inner\", \"outer\"]:\n # Frame\n res = check_and_return(dframes, frames, join)\n assert has_known_categories(res.w)\n assert has_known_categories(res.y) == known\n\n # Series\n res = check_and_return([i.y for i in dframes], [i.y for i in frames], join)\n assert has_known_categories(res) == known\n\n # Non-cat series with cat index\n if cat_index:\n res = check_and_return([i.x for i in dframes], [i.x for i in frames], join)\n\n # Partition missing columns\n res = check_and_return(\n [dframes[0][[\"x\", \"y\"]]] + dframes[1:],\n [frames[0][[\"x\", \"y\"]]] + frames[1:],\n join,\n )\n assert not hasattr(res, \"w\") or has_known_categories(res.w)\n assert has_known_categories(res.y) == known", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_mixed_simple_test_concat_categorical_mixed_simple.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_categorical_mixed_simple_test_concat_categorical_mixed_simple.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1851, "end_line": 1859, "span_ids": ["test_concat_categorical_mixed_simple"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_categorical_mixed_simple():\n a = pd.Series([\"a\", \"b\", \"c\"], dtype=\"category\")\n b = pd.Series([\"a\", \"b\"], dtype=\"category\")\n da = dd.from_pandas(a, 2).cat.as_unknown().to_frame(\"A\")\n db = dd.from_pandas(b, 2).to_frame(\"A\")\n\n expected = concat([a.to_frame(\"A\"), b.to_frame(\"A\")])\n result = dd.concat([da, db])\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_test_append.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_test_append.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2032, "end_line": 2060, "span_ids": ["test_append"], "tokens": 432}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6], \"b\": [1, 2, 3, 4, 5, 6]})\n df2 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6], \"b\": [1, 2, 3, 4, 5, 6]}, index=[6, 7, 8, 9, 10, 11]\n )\n df3 = pd.DataFrame(\n {\"b\": [1, 2, 3, 4, 5, 6], \"c\": [1, 2, 3, 4, 5, 6]}, index=[6, 7, 8, 9, 10, 11]\n )\n\n ddf = dd.from_pandas(df, 2)\n ddf2 = dd.from_pandas(df2, 2)\n ddf3 = dd.from_pandas(df3, 2)\n\n s = pd.Series([7, 8], name=6, index=[\"a\", \"b\"])\n\n check_append_with_warning(ddf, s, df, s)\n check_append_with_warning(ddf, ddf2, df, df2)\n check_append_with_warning(ddf.a, ddf2.a, df.a, df2.a)\n\n # different columns\n check_append_with_warning(ddf, ddf3, df, df3)\n check_append_with_warning(ddf.a, ddf3.b, df.a, df3.b)\n\n # dask + pandas\n check_append_with_warning(ddf, df2, df, df2)\n check_append_with_warning(ddf.a, df2.a, df.a, df2.a)\n\n check_append_with_warning(ddf, df3, df, df3)\n check_append_with_warning(ddf.a, df3.b, df.a, df3.b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append2_test_append2.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append2_test_append2.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2063, "end_line": 2102, "span_ids": ["test_append2"], "tokens": 598}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append2():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\"}, parent_meta=pd.DataFrame())\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [None, None])\n df1 = ddf1.compute()\n\n dsk = {\n (\"y\", 0): pd.DataFrame({\"a\": [10, 20, 30], \"b\": [40, 50, 60]}),\n (\"y\", 1): pd.DataFrame({\"a\": [40, 50, 60], \"b\": [30, 20, 10]}),\n (\"y\", 2): pd.DataFrame({\"a\": [70, 80, 90], \"b\": [0, 0, 0]}),\n }\n ddf2 = dd.DataFrame(dsk, \"y\", meta, [None, None])\n df2 = ddf2.compute()\n\n dsk = {\n (\"y\", 0): pd.DataFrame({\"b\": [10, 20, 30], \"c\": [40, 50, 60]}),\n (\"y\", 1): pd.DataFrame({\"b\": [40, 50, 60], \"c\": [30, 20, 10]}),\n }\n meta = make_meta({\"b\": \"i8\", \"c\": \"i8\"}, parent_meta=pd.DataFrame())\n ddf3 = dd.DataFrame(dsk, \"y\", meta, [None, None])\n df3 = ddf3.compute()\n\n check_append_with_warning(ddf1, ddf2, df1, df2)\n check_append_with_warning(ddf2, ddf1, df2, df1)\n\n # different columns\n check_append_with_warning(ddf1, ddf3, df1, df3)\n check_append_with_warning(ddf3, ddf1, df3, df1)\n\n # Dask + pandas\n check_append_with_warning(ddf1, df2, df1, df2)\n check_append_with_warning(ddf2, df1, df2, df1)\n\n # different columns\n check_append_with_warning(ddf1, df3, df1, df3)\n check_append_with_warning(ddf3, df1, df3, df1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_categorical_test_append_categorical.for_known_in_True_False.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_categorical_test_append_categorical.for_known_in_True_False.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2105, "end_line": 2150, "span_ids": ["test_append_categorical"], "tokens": 382}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_categorical():\n frames = [\n pd.DataFrame(\n {\n \"x\": np.arange(5, 10),\n \"y\": list(\"abbba\"),\n \"z\": np.arange(5, 10, dtype=\"f8\"),\n }\n ),\n pd.DataFrame(\n {\n \"x\": np.arange(10, 15),\n \"y\": list(\"bcbcc\"),\n \"z\": np.arange(10, 15, dtype=\"f8\"),\n }\n ),\n ]\n frames2 = []\n for df in frames:\n df.y = df.y.astype(\"category\")\n df2 = df.copy()\n df2.y = df2.y.cat.set_categories(list(\"abc\"))\n df.index = df.y\n frames2.append(df2.set_index(df2.y))\n\n df1, df2 = frames2\n\n for known in [True, False]:\n dframes = [dd.from_pandas(p, npartitions=2, sort=False) for p in frames]\n if not known:\n dframes[0]._meta = clear_known_categories(\n dframes[0]._meta, [\"y\"], index=True\n )\n ddf1, ddf2 = dframes\n\n res = check_append_with_warning(ddf1, ddf2, df1, df2)\n\n assert has_known_categories(res.index) == known\n assert has_known_categories(res.y) == known\n\n res = check_append_with_warning(ddf1.y, ddf2.y, df1.y, df2.y)\n assert has_known_categories(res.index) == known\n assert has_known_categories(res) == known\n\n res = check_append_with_warning(ddf1.index, ddf2.index, df1.index, df2.index)\n assert has_known_categories(res) == known", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_lose_divisions_test_repartition_repeated_divisions.assert_eq_ddf2_df_set_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_append_lose_divisions_test_repartition_repeated_divisions.assert_eq_ddf2_df_set_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2153, "end_line": 2176, "span_ids": ["test_repartition_repeated_divisions", "test_singleton_divisions", "test_append_lose_divisions"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_lose_divisions():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]}, index=[1, 2, 3, 4])\n ddf = dd.from_pandas(df, npartitions=2)\n\n res = check_append_with_warning(ddf, ddf, df, df)\n assert res.known_divisions is False\n\n\ndef test_singleton_divisions():\n df = pd.DataFrame({\"x\": [1, 1, 1]}, index=[1, 2, 3])\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = ddf.set_index(\"x\")\n\n joined = ddf2.join(ddf2, rsuffix=\"r\")\n assert joined.divisions == (1, 1)\n joined.compute()\n\n\ndef test_repartition_repeated_divisions():\n df = pd.DataFrame({\"x\": [0, 0, 0, 0]})\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"x\")\n\n ddf2 = ddf.repartition(divisions=(0, 0), force=True)\n assert_eq(ddf2, df.set_index(\"x\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_multi_duplicate_divisions_test_multi_duplicate_divisions.assert_eq_r1_r2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_multi_duplicate_divisions_test_multi_duplicate_divisions.assert_eq_r1_r2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2052, "end_line": 2067, "span_ids": ["test_multi_duplicate_divisions"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_multi_duplicate_divisions():\n df1 = pd.DataFrame({\"x\": [0, 0, 0, 0]})\n df2 = pd.DataFrame({\"x\": [0]})\n\n ddf1 = dd.from_pandas(df1, npartitions=2).set_index(\"x\")\n ddf2 = dd.from_pandas(df2, npartitions=1).set_index(\"x\")\n assert ddf1.npartitions == 2\n assert len(ddf1) == len(df1)\n\n r1 = ddf1.merge(ddf2, how=\"left\", left_index=True, right_index=True)\n\n sf1 = df1.set_index(\"x\")\n sf2 = df2.set_index(\"x\")\n r2 = sf1.merge(sf2, how=\"left\", left_index=True, right_index=True)\n\n assert_eq(r1, r2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_test_fuse_ave_width_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_test_fuse_ave_width_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_optimize_dataframe.py", "file_name": "test_optimize_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 13, "end_line": 40, "span_ids": ["test_optimize_blockwise", "test_fuse_ave_width"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_ave_width():\n df = pd.DataFrame({\"x\": range(10)})\n df = dd.from_pandas(df, npartitions=5)\n\n s = (df.x + 1) + (df.x + 2)\n\n with dask.config.set({\"optimization.fuse.ave-width\": 4}):\n a = s.__dask_optimize__(s.dask, s.__dask_keys__())\n\n b = s.__dask_optimize__(s.dask, s.__dask_keys__())\n\n assert len(a) <= 15\n assert len(b) <= 15\n\n\ndef test_optimize_blockwise():\n from dask.array.optimization import optimize_blockwise\n\n df = pd.DataFrame({\"x\": range(10), \"y\": range(10)})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for i in range(10):\n ddf[\"x\"] = ddf.x + 1 + ddf.y\n\n graph = optimize_blockwise(ddf.dask)\n\n assert len(graph) <= 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_object_test_get_dummies_object.None_2.dd_get_dummies_ddf_colum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_object_test_get_dummies_object.None_2.dd_get_dummies_ddf_colum", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 30, "end_line": 53, "span_ids": ["test_get_dummies_object"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_object():\n df = pd.DataFrame(\n {\n \"a\": pd.Categorical([1, 2, 3, 4, 4, 3, 2, 1]),\n \"b\": list(\"abcdabcd\"),\n \"c\": pd.Categorical(list(\"abcdabcd\")),\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n # Explicitly exclude object columns\n exp = pd.get_dummies(df, columns=[\"a\", \"c\"])\n res = dd.get_dummies(ddf, columns=[\"a\", \"c\"])\n assert_eq(res, exp)\n tm.assert_index_equal(res.columns, exp.columns)\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf)\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf.b)\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf, columns=[\"b\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_test_get_dummies_sparse.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_test_get_dummies_sparse.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 104, "end_line": 119, "span_ids": ["test_get_dummies_sparse"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@check_pandas_issue_45618_warning\ndef test_get_dummies_sparse():\n s = pd.Series(pd.Categorical([\"a\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"]))\n ds = dd.from_pandas(s, 2)\n\n exp = pd.get_dummies(s, sparse=True)\n res = dd.get_dummies(ds, sparse=True)\n assert_eq(exp, res)\n\n assert res.compute().a.dtype == \"Sparse[uint8, 0]\"\n assert pd.api.types.is_sparse(res.a.compute())\n\n exp = pd.get_dummies(s.to_frame(name=\"a\"), sparse=True)\n res = dd.get_dummies(ds.to_frame(name=\"a\"), sparse=True)\n assert_eq(exp, res)\n assert pd.api.types.is_sparse(res.a_a.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_mix_test_get_dummies_sparse_mix.assert_pd_api_types_is_sp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_sparse_mix_test_get_dummies_sparse_mix.assert_pd_api_types_is_sp", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 122, "end_line": 137, "span_ids": ["test_get_dummies_sparse_mix"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@check_pandas_issue_45618_warning\ndef test_get_dummies_sparse_mix():\n df = pd.DataFrame(\n {\n \"A\": pd.Categorical([\"a\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"]),\n \"B\": [0, 0, 1],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n exp = pd.get_dummies(df, sparse=True)\n res = dd.get_dummies(ddf, sparse=True)\n assert_eq(exp, res)\n\n assert res.compute().A_a.dtype == \"Sparse[uint8, 0]\"\n assert pd.api.types.is_sparse(res.A_a.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_dtype_test_get_dummies_dtype.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_dtype_test_get_dummies_dtype.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 132, "end_line": 148, "span_ids": ["test_get_dummies_dtype"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_dtype():\n df = pd.DataFrame(\n {\n \"A\": pd.Categorical([\"a\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"]),\n \"B\": [0, 0, 1],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n exp = pd.get_dummies(df, dtype=\"float64\")\n res = dd.get_dummies(ddf, dtype=\"float64\")\n assert_eq(exp, res)\n assert res.compute().A_a.dtype == \"float64\"\n\n # dask's get_dummies on a pandas dataframe.\n assert_eq(dd.get_dummies(df, dtype=\"float64\"), exp)\n assert res.compute().A_a.dtype == \"float64\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_errors_test_get_dummies_errors.None_3.dd_get_dummies_ddf_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_errors_test_get_dummies_errors.None_3.dd_get_dummies_ddf_x_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 163, "span_ids": ["test_get_dummies_errors"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_errors():\n with pytest.raises(NotImplementedError):\n # not Categorical\n s = pd.Series([1, 1, 1, 2, 2, 1, 3, 4])\n ds = dd.from_pandas(s, 2)\n dd.get_dummies(ds)\n\n # unknown categories\n df = pd.DataFrame({\"x\": list(\"abcbc\"), \"y\": list(\"bcbcb\")})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf._meta = make_meta(\n {\"x\": \"category\", \"y\": \"category\"}, parent_meta=pd.DataFrame()\n )\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf)\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf, columns=[\"x\", \"y\"])\n\n with pytest.raises(NotImplementedError):\n dd.get_dummies(ddf.x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_test_pivot_table.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_test_pivot_table.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 183, "end_line": 210, "span_ids": ["test_pivot_table"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"values\", [\"B\", [\"B\"], [\"B\", \"D\"]])\n@pytest.mark.parametrize(\"aggfunc\", [\"mean\", \"sum\", \"count\", \"first\", \"last\"])\ndef test_pivot_table(values, aggfunc):\n df = pd.DataFrame(\n {\n \"A\": np.random.choice(list(\"XYZ\"), size=100),\n \"B\": np.random.randn(100),\n \"C\": pd.Categorical(np.random.choice(list(\"abc\"), size=100)),\n \"D\": np.random.randn(100),\n }\n )\n ddf = dd.from_pandas(df, 5).repartition((0, 20, 40, 60, 80, 98, 99))\n\n res = dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n exp = pd.pivot_table(df, index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n if aggfunc == \"count\":\n # dask result cannot be int64 dtype depending on divisions because of NaN\n exp = exp.astype(np.float64)\n\n assert_eq(res, exp)\n\n # method\n res = ddf.pivot_table(index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n exp = df.pivot_table(index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n if aggfunc == \"count\":\n # dask result cannot be int64 dtype depending on divisions because of NaN\n exp = exp.astype(np.float64)\n assert_eq(res, exp)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_dtype_test_pivot_table_dtype.assert_eq_res_exp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_dtype_test_pivot_table_dtype.assert_eq_res_exp_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 203, "end_line": 219, "span_ids": ["test_pivot_table_dtype"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pivot_table_dtype():\n\n df = pd.DataFrame(\n {\"A\": list(\"AABB\"), \"B\": pd.Categorical(list(\"ABAB\")), \"C\": [1, 2, 3, 4]}\n )\n ddf = dd.from_pandas(df, 2)\n res = dd.pivot_table(ddf, index=\"A\", columns=\"B\", values=\"C\", aggfunc=\"count\")\n\n exp_index = pd.CategoricalIndex([\"A\", \"B\"], name=\"B\")\n exp = pd.Series([np.float64] * 2, index=exp_index)\n tm.assert_series_equal(res.dtypes, exp)\n\n exp = pd.pivot_table(\n df, index=\"A\", columns=\"B\", values=\"C\", aggfunc=\"count\"\n ).astype(np.float64)\n\n assert_eq(res, exp)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_index_dtype_test_pivot_table_index_dtype.assert_res_index_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_index_dtype_test_pivot_table_index_dtype.assert_res_index_dtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 222, "end_line": 233, "span_ids": ["test_pivot_table_index_dtype"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pivot_table_index_dtype():\n df = pd.DataFrame(\n {\n \"A\": pd.date_range(start=\"2019-08-01\", periods=3, freq=\"1D\"),\n \"B\": pd.Categorical(list(\"abc\")),\n \"C\": [1, 2, 3],\n }\n )\n ddf = dd.from_pandas(df, 2)\n res = dd.pivot_table(ddf, index=\"A\", columns=\"B\", values=\"C\", aggfunc=\"count\")\n\n assert res.index.dtype == np.dtype(\"datetime64[ns]\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_errors_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_errors_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 272, "end_line": 325, "span_ids": ["test_pivot_table_errors"], "tokens": 533}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pivot_table_errors():\n df = pd.DataFrame(\n {\n \"A\": np.random.choice(list(\"abc\"), size=10),\n \"B\": np.random.randn(10),\n \"C\": pd.Categorical(np.random.choice(list(\"abc\"), size=10)),\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n msg = \"'index' must be the name of an existing column\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=[\"A\"], columns=\"C\", values=\"B\")\n assert msg in str(err.value)\n msg = \"'columns' must be the name of an existing column\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=[\"C\"], values=\"B\")\n assert msg in str(err.value)\n msg = \"'values' must refer to an existing column or columns\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=[[\"B\"]])\n assert msg in str(err.value)\n\n msg = \"aggfunc must be either 'mean', 'sum', 'count', 'first', 'last'\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=\"B\", aggfunc=[\"sum\"])\n assert msg in str(err.value)\n\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=\"B\", aggfunc=\"xx\")\n assert msg in str(err.value)\n\n # unknown categories\n ddf._meta = make_meta(\n {\"A\": object, \"B\": float, \"C\": \"category\"}, parent_meta=pd.DataFrame()\n )\n msg = \"'columns' must have known categories\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=[\"B\"])\n assert msg in str(err.value)\n\n df = pd.DataFrame(\n {\n \"A\": np.random.choice(list(\"abc\"), size=10),\n \"B\": np.random.randn(10),\n \"C\": np.random.choice(list(\"abc\"), size=10),\n }\n )\n ddf = dd.from_pandas(df, 2)\n msg = \"'columns' must be category dtype\"\n with pytest.raises(ValueError) as err:\n dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=\"B\")\n assert msg in str(err.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_test_map_overlap.for_before_after_in_0_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_test_map_overlap.for_before_after_in_0_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 47, "end_line": 59, "span_ids": ["test_map_overlap"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 4])\ndef test_map_overlap(npartitions):\n ddf = dd.from_pandas(df, npartitions)\n for before, after in [(0, 3), (3, 0), (3, 3), (0, 0)]:\n # DataFrame\n res = ddf.map_overlap(shifted_sum, before, after, before, after, c=2)\n sol = shifted_sum(df, before, after, c=2)\n assert_eq(res, sol)\n\n # Series\n res = ddf.b.map_overlap(shifted_sum, before, after, before, after, c=2)\n sol = shifted_sum(df.b, before, after, c=2)\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_names_test_map_overlap_names.assert_res4__name_res_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_names_test_map_overlap_names.assert_res4__name_res_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 79, "span_ids": ["test_map_overlap_names"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_names():\n npartitions = 3\n ddf = dd.from_pandas(df, npartitions)\n\n res = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)\n res2 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)\n assert set(res.dask) == set(res2.dask)\n\n res3 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=3)\n assert res3._name != res._name\n # Difference is just the final map\n diff = res3.dask.keys() - res.dask.keys()\n assert len(diff) == npartitions\n\n res4 = ddf.map_overlap(shifted_sum, 3, 0, 0, 3, c=2)\n assert res4._name != res._name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_provide_meta_test_map_overlap_provide_meta.assert_eq_res_sol_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_provide_meta_test_map_overlap_provide_meta.assert_eq_res_sol_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 98, "end_line": 109, "span_ids": ["test_map_overlap_provide_meta"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_provide_meta():\n df = pd.DataFrame(\n {\"x\": [1, 2, 4, 7, 11], \"y\": [1.0, 2.0, 3.0, 4.0, 5.0]}\n ).rename_axis(\"myindex\")\n ddf = dd.from_pandas(df, npartitions=2)\n\n # Provide meta spec, but not full metadata\n res = ddf.map_overlap(\n lambda df: df.rolling(2).sum(), 2, 0, meta={\"x\": \"i8\", \"y\": \"i8\"}\n )\n sol = df.rolling(2).sum()\n assert_eq(res, sol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_mad_rolling_method_args_check_less_precise._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_mad_rolling_method_args_check_less_precise._", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 112, "end_line": 134, "span_ids": ["mad", "impl:13"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def mad(x):\n return np.fabs(x - x.mean()).mean()\n\n\nrolling_method_args_check_less_precise = [\n (\"count\", (), False),\n (\"sum\", (), False),\n (\"mean\", (), False),\n (\"median\", (), False),\n (\"min\", (), False),\n (\"max\", (), False),\n (\"std\", (), True),\n (\"var\", (), True),\n (\"skew\", (), True), # here and elsewhere, results for kurt and skew are\n (\"kurt\", (), True), # checked with check_less_precise=True so that we are\n # only looking at 3ish decimal places for the equality check\n # rather than 5ish. I have encountered a case where a test\n # seems to have failed due to numerical problems with kurt.\n # So far, I am only weakening the check for kurt and skew,\n # as they involve third degree powers and higher\n (\"quantile\", (0.38,), False),\n (\"apply\", (mad,), False),\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_methods_test_rolling_methods.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_methods_test_rolling_methods.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 141, "end_line": 179, "span_ids": ["test_rolling_methods"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"method,args,check_less_precise\", rolling_method_args_check_less_precise\n)\n@pytest.mark.parametrize(\"window\", [1, 2, 4, 5])\n@pytest.mark.parametrize(\"center\", [True, False])\ndef test_rolling_methods(method, args, window, center, check_less_precise):\n if dd._compat.PANDAS_GT_110:\n if check_less_precise:\n check_less_precise = {\"atol\": 1e-3, \"rtol\": 1e-3}\n else:\n check_less_precise = {}\n else:\n check_less_precise = {\"check_less_precise\": check_less_precise}\n if dd._compat.PANDAS_GT_120 and method == \"count\":\n min_periods = 0\n else:\n min_periods = None\n # DataFrame\n prolling = df.rolling(window, center=center, min_periods=min_periods)\n drolling = ddf.rolling(window, center=center, min_periods=min_periods)\n if method == \"apply\":\n kwargs = {\"raw\": False}\n else:\n kwargs = {}\n\n assert_eq(\n getattr(prolling, method)(*args, **kwargs),\n getattr(drolling, method)(*args, **kwargs),\n **check_less_precise,\n )\n\n # Series\n prolling = df.a.rolling(window, center=center, min_periods=min_periods)\n drolling = ddf.a.rolling(window, center=center, min_periods=min_periods)\n assert_eq(\n getattr(prolling, method)(*args, **kwargs),\n getattr(drolling, method)(*args, **kwargs),\n **check_less_precise,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_raises_test_rolling_names.assert_sorted_a_rolling_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_raises_test_rolling_names.assert_sorted_a_rolling_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 211, "span_ids": ["test_rolling_raises", "test_rolling_names"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_raises():\n df = pd.DataFrame(\n {\"a\": np.random.randn(25).cumsum(), \"b\": np.random.randint(100, size=(25,))}\n )\n ddf = dd.from_pandas(df, 3)\n pytest.raises(ValueError, lambda: ddf.rolling(1.5))\n pytest.raises(ValueError, lambda: ddf.rolling(-1))\n pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=1.2))\n pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=-2))\n pytest.raises(ValueError, lambda: ddf.rolling(3, axis=10))\n pytest.raises(ValueError, lambda: ddf.rolling(3, axis=\"coulombs\"))\n pytest.raises(NotImplementedError, lambda: ddf.rolling(100).mean().compute())\n\n\ndef test_rolling_names():\n df = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n a = dd.from_pandas(df, npartitions=2)\n assert sorted(a.rolling(2).sum().dask) == sorted(a.rolling(2).sum().dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_axis_test_rolling_axis.assert_eq_s_rolling_5_ax": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_axis_test_rolling_axis.assert_eq_s_rolling_5_ax", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 231, "span_ids": ["test_rolling_axis"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_axis():\n df = pd.DataFrame(np.random.randn(20, 16))\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(df.rolling(3, axis=0).mean(), ddf.rolling(3, axis=0).mean())\n assert_eq(df.rolling(3, axis=1).mean(), ddf.rolling(3, axis=1).mean())\n assert_eq(\n df.rolling(3, min_periods=1, axis=1).mean(),\n ddf.rolling(3, min_periods=1, axis=1).mean(),\n )\n assert_eq(\n df.rolling(3, axis=\"columns\").mean(), ddf.rolling(3, axis=\"columns\").mean()\n )\n assert_eq(df.rolling(3, axis=\"rows\").mean(), ddf.rolling(3, axis=\"rows\").mean())\n\n s = df[3]\n ds = ddf[3]\n assert_eq(s.rolling(5, axis=0).std(), ds.rolling(5, axis=0).std())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_partition_size_test_rolling_partition_size.for_obj_dobj_in_df_dd.with_pytest_raises_NotImp.dobj_rolling_12_mean_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_partition_size_test_rolling_partition_size.for_obj_dobj_in_df_dd.with_pytest_raises_NotImp.dobj_rolling_12_mean_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 234, "end_line": 242, "span_ids": ["test_rolling_partition_size"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_partition_size():\n df = pd.DataFrame(np.random.randn(50, 2))\n ddf = dd.from_pandas(df, npartitions=5)\n\n for obj, dobj in [(df, ddf), (df[0], ddf[0])]:\n assert_eq(obj.rolling(10).mean(), dobj.rolling(10).mean())\n assert_eq(obj.rolling(11).mean(), dobj.rolling(11).mean())\n with pytest.raises(NotImplementedError):\n dobj.rolling(12).mean().compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_agg_aggregate_test_rolling_agg_aggregate.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_agg_aggregate_test_rolling_agg_aggregate.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 362, "end_line": 390, "span_ids": ["test_rolling_agg_aggregate"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_agg_aggregate():\n df = pd.DataFrame({\"A\": range(5), \"B\": range(0, 10, 2)})\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(\n df.rolling(window=3).agg([np.mean, np.std]),\n ddf.rolling(window=3).agg([np.mean, np.std]),\n )\n\n assert_eq(\n df.rolling(window=3).agg({\"A\": np.sum, \"B\": lambda x: np.std(x, ddof=1)}),\n ddf.rolling(window=3).agg({\"A\": np.sum, \"B\": lambda x: np.std(x, ddof=1)}),\n )\n\n assert_eq(\n df.rolling(window=3).agg([np.sum, np.mean]),\n ddf.rolling(window=3).agg([np.sum, np.mean]),\n )\n\n assert_eq(\n df.rolling(window=3).agg({\"A\": [np.sum, np.mean]}),\n ddf.rolling(window=3).agg({\"A\": [np.sum, np.mean]}),\n )\n\n kwargs = {\"raw\": True}\n assert_eq(\n df.rolling(window=3).apply(lambda x: np.std(x, ddof=1), **kwargs),\n ddf.rolling(window=3).apply(lambda x: np.std(x, ddof=1), **kwargs),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_numba_engine_test_rolling_numba_engine.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_numba_engine_test_rolling_numba_engine.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 387, "end_line": 403, "span_ids": ["test_rolling_numba_engine"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_numba_engine():\n numba = pytest.importorskip(\"numba\")\n numba_version = parse_version(numba.__version__)\n if not dd._compat.PANDAS_GT_104 and numba_version >= parse_version(\"0.49\"):\n # Was fixed in https://github.com/pandas-dev/pandas/pull/33687\n pytest.xfail(\"Known incompatibility between pandas and numba\")\n\n df = pd.DataFrame({\"A\": range(5), \"B\": range(0, 10, 2)})\n ddf = dd.from_pandas(df, npartitions=3)\n\n def f(x):\n return np.sum(x) + 5\n\n assert_eq(\n df.rolling(3).apply(f, engine=\"numba\", raw=True),\n ddf.rolling(3).apply(f, engine=\"numba\", raw=True),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_itertools_shuffle_func.shuffle": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_itertools_shuffle_func.shuffle", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 49, "span_ids": ["imports"], "tokens": 403}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import itertools\nimport multiprocessing as mp\nimport os\nimport pickle\nimport random\nimport string\nimport tempfile\nfrom concurrent.futures import ProcessPoolExecutor\nfrom copy import copy\nfrom functools import partial\nfrom unittest import mock\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask import delayed\nfrom dask.base import compute_as_if_collection\nfrom dask.dataframe._compat import PANDAS_GT_120, assert_categorical_equal, tm\nfrom dask.dataframe.shuffle import (\n _noop,\n maybe_buffered_partd,\n partitioning_index,\n rearrange_by_column,\n rearrange_by_divisions,\n remove_nans,\n shuffle,\n)\nfrom dask.dataframe.utils import assert_eq, make_meta\nfrom dask.optimization import cull\n\ndsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [1, 4, 7]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [2, 5, 8]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [3, 6, 9]}, index=[9, 9, 9]),\n}\nmeta = make_meta(\n {\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"), parent_meta=pd.DataFrame()\n)\nd = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\nfull = d.compute()\nCHECK_FREQ = {}\nif dd._compat.PANDAS_GT_110:\n CHECK_FREQ[\"check_freq\"] = False\n\n\nshuffle_func = shuffle", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_test_partitioning_index.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_test_partitioning_index.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 129, "end_line": 145, "span_ids": ["test_partitioning_index"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partitioning_index():\n res = partitioning_index(df2.i32, 3)\n assert ((res < 3) & (res >= 0)).all()\n assert len(np.unique(res)) > 1\n\n assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()\n\n res = partitioning_index(df2[[\"i32\"]], 3)\n assert ((res < 3) & (res >= 0)).all()\n assert len(np.unique(res)) > 1\n\n res = partitioning_index(df2[[\"cat\", \"bool\", \"f32\"]], 2)\n assert ((0 <= res) & (res < 2)).all()\n\n res = partitioning_index(df2.index, 4)\n assert ((res < 4) & (res >= 0)).all()\n assert len(np.unique(res)) > 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_categorical_on_values_test_partitioning_index_categorical_on_values.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_partitioning_index_categorical_on_values_test_partitioning_index_categorical_on_values.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 148, "end_line": 160, "span_ids": ["test_partitioning_index_categorical_on_values"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partitioning_index_categorical_on_values():\n df = pd.DataFrame({\"a\": list(string.ascii_letters), \"b\": [1, 2, 3, 4] * 13})\n df.a = df.a.astype(\"category\")\n df2 = df.copy()\n df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))\n\n res = partitioning_index(df.a, 5)\n res2 = partitioning_index(df2.a, 5)\n assert (res == res2).all()\n\n res = partitioning_index(df, 5)\n res2 = partitioning_index(df2, 5)\n assert (res == res2).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_names_test_set_index_names.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_names_test_set_index_names.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 218, "end_line": 240, "span_ids": ["test_set_index_names"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_names(shuffle_method):\n if shuffle_method == \"disk\":\n pytest.xfail(\"dsk names in disk shuffle are not deterministic\")\n\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n\n ddf = dd.from_pandas(df, npartitions=4)\n\n assert set(ddf.set_index(\"x\", shuffle=shuffle_method).dask) == set(\n ddf.set_index(\"x\", shuffle=shuffle_method).dask\n )\n assert set(ddf.set_index(\"x\", shuffle=shuffle_method).dask) != set(\n ddf.set_index(\"y\", shuffle=shuffle_method).dask\n )\n assert set(ddf.set_index(\"x\", max_branch=4, shuffle=shuffle_method).dask) != set(\n ddf.set_index(\"x\", max_branch=3, shuffle=shuffle_method).dask\n )\n assert set(ddf.set_index(\"x\", drop=True, shuffle=shuffle_method).dask) != set(\n ddf.set_index(\"x\", drop=False, shuffle=shuffle_method).dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_sort_test_shuffle_sort.assert_eq_ddf2_loc_2_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_sort_test_shuffle_sort.assert_eq_ddf2_loc_2_3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 276, "span_ids": ["test_shuffle_sort"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shuffle_sort(shuffle_method):\n df = pd.DataFrame({\"x\": [1, 2, 3, 2, 1], \"y\": [9, 8, 7, 1, 5]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n df2 = df.set_index(\"x\").sort_index()\n ddf2 = ddf.set_index(\"x\", shuffle=shuffle_method)\n\n assert_eq(ddf2.loc[2:3], df2.loc[2:3])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_test_rearrange.for_i_in_a__partitions_dr.assert_sum_i_in_set_part_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_test_rearrange.for_i_in_a__partitions_dr.assert_sum_i_in_set_part_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 297, "span_ids": ["test_rearrange"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"scheduler\", [\"threads\", \"processes\"])\ndef test_rearrange(shuffle_method, scheduler):\n df = pd.DataFrame({\"x\": np.random.random(10)})\n ddf = dd.from_pandas(df, npartitions=4)\n ddf2 = ddf.assign(_partitions=ddf.x % 4)\n\n result = rearrange_by_column(\n ddf2, \"_partitions\", max_branch=32, shuffle=shuffle_method\n )\n assert result.npartitions == ddf.npartitions\n assert set(ddf.dask).issubset(result.dask)\n\n # Every value in exactly one partition\n a = result.compute(scheduler=scheduler)\n get = dask.base.get_scheduler(scheduler=scheduler)\n parts = get(result.dask, result.__dask_keys__())\n\n for i in a._partitions.drop_duplicates():\n assert sum(i in set(part._partitions) for part in parts) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_cleanup_mock_shuffle_group_3.raise_ValueError_Mock_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_cleanup_mock_shuffle_group_3.raise_ValueError_Mock_ex", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 283, "end_line": 298, "span_ids": ["test_rearrange_cleanup", "mock_shuffle_group_3"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rearrange_cleanup():\n df = pd.DataFrame({\"x\": np.random.random(10)})\n ddf = dd.from_pandas(df, npartitions=4)\n ddf2 = ddf.assign(_partitions=ddf.x % 4)\n\n tmpdir = tempfile.mkdtemp()\n\n with dask.config.set(temporay_directory=str(tmpdir)):\n result = rearrange_by_column(ddf2, \"_partitions\", max_branch=32, shuffle=\"disk\")\n result.compute(scheduler=\"processes\")\n\n assert len(os.listdir(tmpdir)) == 0\n\n\ndef mock_shuffle_group_3(df, col, npartitions, p):\n raise ValueError(\"Mock exception!\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_disk_cleanup_with_exception_test_rearrange_disk_cleanup_with_exception.assert_len_os_listdir_tmp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_disk_cleanup_with_exception_test_rearrange_disk_cleanup_with_exception.assert_len_os_listdir_tmp", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 301, "end_line": 318, "span_ids": ["test_rearrange_disk_cleanup_with_exception"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rearrange_disk_cleanup_with_exception():\n # ensure temporary files are cleaned up when there's an internal exception.\n\n with mock.patch(\"dask.dataframe.shuffle.shuffle_group_3\", new=mock_shuffle_group_3):\n df = pd.DataFrame({\"x\": np.random.random(10)})\n ddf = dd.from_pandas(df, npartitions=4)\n ddf2 = ddf.assign(_partitions=ddf.x % 4)\n\n tmpdir = tempfile.mkdtemp()\n\n with dask.config.set(temporay_directory=str(tmpdir)):\n with pytest.raises(ValueError, match=\"Mock exception!\"):\n result = rearrange_by_column(\n ddf2, \"_partitions\", max_branch=32, shuffle=\"disk\"\n )\n result.compute(scheduler=\"processes\")\n\n assert len(os.listdir(tmpdir)) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_explicit_divisions_test_set_index_with_explicit_divisions.with_pytest_raises_ValueE.ddf_set_index_x_divisi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_explicit_divisions_test_set_index_with_explicit_divisions.with_pytest_raises_ValueE.ddf_set_index_x_divisi", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 343, "end_line": 360, "span_ids": ["test_set_index_with_explicit_divisions"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_with_explicit_divisions():\n df = pd.DataFrame({\"x\": [4, 1, 2, 5]}, index=[10, 20, 30, 40])\n\n ddf = dd.from_pandas(df, npartitions=2)\n\n def throw(*args, **kwargs):\n raise Exception()\n\n with dask.config.set(get=throw):\n ddf2 = ddf.set_index(\"x\", divisions=[1, 3, 5])\n assert ddf2.divisions == (1, 3, 5)\n\n df2 = df.set_index(\"x\")\n assert_eq(ddf2, df2)\n\n # Divisions must be sorted\n with pytest.raises(ValueError):\n ddf.set_index(\"x\", divisions=[3, 1, 5])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_compute_test_set_index_divisions_compute.assert_len_d4_dask_len": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_compute_test_set_index_divisions_compute.assert_len_d4_dask_len", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 373, "end_line": 389, "span_ids": ["test_set_index_divisions_compute"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_divisions_compute():\n d2 = d.set_index(\"b\", divisions=[0, 2, 9], compute=False)\n d3 = d.set_index(\"b\", divisions=[0, 2, 9], compute=True)\n\n assert_eq(d2, d3)\n assert_eq(d2, full.set_index(\"b\"))\n assert_eq(d3, full.set_index(\"b\"))\n assert len(d2.dask) > len(d3.dask)\n\n d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)\n d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)\n exp = full.copy()\n exp.index = exp.b\n assert_eq(d4, d5)\n assert_eq(d4, exp)\n assert_eq(d5, exp)\n assert len(d4.dask) > len(d5.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_sorted_test_set_index_divisions_sorted.None_3.ddf_set_index_y_divisi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_divisions_sorted_test_set_index_divisions_sorted.None_3.ddf_set_index_y_divisi", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 392, "end_line": 419, "span_ids": ["test_set_index_divisions_sorted"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_divisions_sorted():\n p1 = pd.DataFrame({\"x\": [10, 11, 12], \"y\": [\"a\", \"a\", \"a\"]})\n p2 = pd.DataFrame({\"x\": [13, 14, 15], \"y\": [\"b\", \"b\", \"c\"]})\n p3 = pd.DataFrame({\"x\": [16, 17, 18], \"y\": [\"d\", \"e\", \"e\"]})\n\n ddf = dd.DataFrame(\n {(\"x\", 0): p1, (\"x\", 1): p2, (\"x\", 2): p3}, \"x\", p1, [None, None, None, None]\n )\n df = ddf.compute()\n\n def throw(*args, **kwargs):\n raise Exception(\"Shouldn't have computed\")\n\n with dask.config.set(get=throw):\n res = ddf.set_index(\"x\", divisions=[10, 13, 16, 18], sorted=True)\n assert_eq(res, df.set_index(\"x\"))\n\n with dask.config.set(get=throw):\n res = ddf.set_index(\"y\", divisions=[\"a\", \"b\", \"d\", \"e\"], sorted=True)\n assert_eq(res, df.set_index(\"y\"))\n\n # with sorted=True, divisions must be same length as df.divisions\n with pytest.raises(ValueError):\n ddf.set_index(\"y\", divisions=[\"a\", \"b\", \"c\", \"d\", \"e\"], sorted=True)\n\n # Divisions must be sorted\n with pytest.raises(ValueError):\n ddf.set_index(\"y\", divisions=[\"a\", \"b\", \"d\", \"c\"], sorted=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_consistent_divisions_test_set_index_consistent_divisions.assert_len_divisions_set_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_consistent_divisions_test_set_index_consistent_divisions.assert_len_divisions_set_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 460, "end_line": 474, "span_ids": ["test_set_index_consistent_divisions"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_set_index_consistent_divisions():\n # See https://github.com/dask/dask/issues/3867\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n ddf = dd.from_pandas(df, npartitions=4)\n ddf = ddf.clear_divisions()\n\n ctx = mp.get_context(\"spawn\")\n with ProcessPoolExecutor(8, ctx) as pool:\n func = partial(_set_index, df=ddf, idx=\"x\")\n divisions_set = set(pool.map(func, range(100)))\n assert len(divisions_set) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__set_index_make_part.return.pd_DataFrame_x_np_ran": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__set_index_make_part.return.pd_DataFrame_x_np_ran", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 460, "end_line": 473, "span_ids": ["make_part", "_set_index", "test_set_index_reduces_partitions_small"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _set_index(i, df, idx):\n return df.set_index(idx).divisions\n\n\ndef test_set_index_reduces_partitions_small(shuffle_method):\n df = pd.DataFrame({\"x\": np.random.random(100)})\n ddf = dd.from_pandas(df, npartitions=50)\n\n ddf2 = ddf.set_index(\"x\", shuffle=shuffle_method, npartitions=\"auto\")\n assert ddf2.npartitions < 10\n\n\ndef make_part(n):\n return pd.DataFrame({\"x\": np.random.random(n), \"y\": np.random.random(n)})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_reduces_partitions_large_test_set_index_reduces_partitions_large.assert_1_ddf2_npartitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_reduces_partitions_large_test_set_index_reduces_partitions_large.assert_1_ddf2_npartitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 476, "end_line": 489, "span_ids": ["test_set_index_reduces_partitions_large"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_reduces_partitions_large(shuffle_method):\n nbytes = 1e6\n nparts = 50\n n = int(nbytes / (nparts * 8))\n ddf = dd.DataFrame(\n {(\"x\", i): (make_part, n) for i in range(nparts)},\n \"x\",\n make_part(1),\n [None] * (nparts + 1),\n )\n ddf2 = ddf.set_index(\n \"x\", shuffle=shuffle_method, npartitions=\"auto\", partition_size=nbytes\n )\n assert 1 < ddf2.npartitions < 20", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_doesnt_increase_partitions_test_set_index_detects_sorted_data.assert_len_ddf2_dask_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_doesnt_increase_partitions_test_set_index_detects_sorted_data.assert_len_ddf2_dask_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 492, "end_line": 513, "span_ids": ["test_set_index_detects_sorted_data", "test_set_index_doesnt_increase_partitions"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_doesnt_increase_partitions(shuffle_method):\n nparts = 2\n nbytes = 1e6\n n = int(nbytes / (nparts * 8))\n ddf = dd.DataFrame(\n {(\"x\", i): (make_part, n) for i in range(nparts)},\n \"x\",\n make_part(1),\n [None] * (nparts + 1),\n )\n ddf2 = ddf.set_index(\n \"x\", shuffle=shuffle_method, npartitions=\"auto\", partition_size=nbytes\n )\n assert ddf2.npartitions <= ddf.npartitions\n\n\ndef test_set_index_detects_sorted_data(shuffle_method):\n df = pd.DataFrame({\"x\": range(100), \"y\": range(100)})\n ddf = dd.from_pandas(df, npartitions=10, name=\"x\", sort=False)\n\n ddf2 = ddf.set_index(\"x\", shuffle=shuffle_method)\n assert len(ddf2.dask) < ddf.npartitions * 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorts_test_set_index_sorts.assert_ddf_set_index_tim": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorts_test_set_index_sorts.assert_ddf_set_index_tim", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 533, "end_line": 608, "span_ids": ["test_set_index_sorts"], "tokens": 730}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_sorts():\n # https://github.com/dask/dask/issues/2288\n vals = np.array(\n [\n 1348550149000000000,\n 1348550149000000000,\n 1348558142000000000,\n 1348558142000000000,\n 1348585928000000000,\n 1348585928000000000,\n 1348600739000000000,\n 1348601706000000000,\n 1348600739000000000,\n 1348601706000000000,\n 1348614789000000000,\n 1348614789000000000,\n 1348621037000000000,\n 1348621038000000000,\n 1348621040000000000,\n 1348621037000000000,\n 1348621038000000000,\n 1348621040000000000,\n 1348637628000000000,\n 1348638159000000000,\n 1348638160000000000,\n 1348638159000000000,\n 1348638160000000000,\n 1348637628000000000,\n 1348646354000000000,\n 1348646354000000000,\n 1348659107000000000,\n 1348657111000000000,\n 1348659107000000000,\n 1348657111000000000,\n 1348672876000000000,\n 1348672876000000000,\n 1348682787000000000,\n 1348681985000000000,\n 1348682787000000000,\n 1348681985000000000,\n 1348728167000000000,\n 1348728167000000000,\n 1348730745000000000,\n 1348730745000000000,\n 1348750198000000000,\n 1348750198000000000,\n 1348750198000000000,\n 1348753539000000000,\n 1348753539000000000,\n 1348753539000000000,\n 1348754449000000000,\n 1348754449000000000,\n 1348761333000000000,\n 1348761554000000000,\n 1348761610000000000,\n 1348761333000000000,\n 1348761554000000000,\n 1348761610000000000,\n 1348782624000000000,\n 1348782624000000000,\n 1348782624000000000,\n 1348782624000000000,\n ]\n )\n vals = pd.to_datetime(vals, unit=\"ns\")\n breaks = [10, 36, 58]\n dfs = []\n\n for i in range(len(breaks)):\n lo = sum(breaks[:i])\n hi = sum(breaks[i : i + 1])\n\n dfs.append(pd.DataFrame({\"timestamp\": vals[lo:hi]}, index=range(lo, hi)))\n\n ddf = dd.concat(dfs).clear_divisions()\n assert ddf.set_index(\"timestamp\").index.compute().is_monotonic_increasing is True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_test_set_index.assert_eq_d5_full_set_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_test_set_index.assert_eq_d5_full_set_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 594, "end_line": 632, "span_ids": ["test_set_index"], "tokens": 442}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"engine\", [\"pandas\", pytest.param(\"cudf\", marks=pytest.mark.gpu)]\n)\ndef test_set_index(engine):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 2, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 5, 8]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [9, 1, 8]}, index=[9, 9, 9]),\n }\n d = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n\n if engine == \"cudf\":\n d = dask_cudf.from_dask_dataframe(d)\n\n full = d.compute()\n\n d2 = d.set_index(\"b\", npartitions=3)\n assert d2.npartitions == 3\n assert d2.index.name == \"b\"\n assert_eq(d2, full.set_index(\"b\"))\n\n d3 = d.set_index(d.b, npartitions=3)\n assert d3.npartitions == 3\n assert d3.index.name == \"b\"\n assert_eq(d3, full.set_index(full.b))\n\n d4 = d.set_index(\"b\")\n assert d4.index.name == \"b\"\n assert_eq(d4, full.set_index(\"b\"))\n\n d5 = d.set_index([\"b\"])\n assert d5.index.name == \"b\"\n assert_eq(d5, full.set_index([\"b\"]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_drop_test_set_index_drop.assert_eq_ddf_set_index_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_drop_test_set_index_drop.assert_eq_ddf_set_index_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 652, "end_line": 680, "span_ids": ["test_set_index_drop"], "tokens": 426}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"drop\", [True, False])\ndef test_set_index_drop(drop):\n pdf = pd.DataFrame(\n {\n \"A\": list(\"ABAABBABAA\"),\n \"B\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n \"C\": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n\n assert_eq(ddf.set_index(\"A\", drop=drop), pdf.set_index(\"A\", drop=drop))\n assert_eq(ddf.set_index(\"B\", drop=drop), pdf.set_index(\"B\", drop=drop))\n assert_eq(ddf.set_index(\"C\", drop=drop), pdf.set_index(\"C\", drop=drop))\n assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))\n assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))\n assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))\n\n # numeric columns\n pdf = pd.DataFrame(\n {\n 0: list(\"ABAABBABAA\"),\n 1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],\n }\n )\n ddf = dd.from_pandas(pdf, 3)\n assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))\n assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_raises_error_on_bad_input_test_set_index_raises_error_on_bad_input.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_raises_error_on_bad_input_test_set_index_raises_error_on_bad_input.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 683, "end_line": 698, "span_ids": ["test_set_index_raises_error_on_bad_input"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_raises_error_on_bad_input():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n ddf = dd.from_pandas(df, 2)\n\n msg = r\"Dask dataframe does not yet support multi-indexes\"\n with pytest.raises(NotImplementedError) as err:\n ddf.set_index([\"a\", \"b\"])\n assert msg in str(err.value)\n\n with pytest.raises(NotImplementedError) as err:\n ddf.set_index([[\"a\", \"b\"]])\n assert msg in str(err.value)\n\n with pytest.raises(NotImplementedError) as err:\n ddf.set_index([[\"a\"]])\n assert msg in str(err.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_true_test_set_index_sorted_true.with_pytest_raises_ValueE.a_set_index_a_z_sorted_T": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_true_test_set_index_sorted_true.with_pytest_raises_ValueE.a_set_index_a_z_sorted_T", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 701, "end_line": 721, "span_ids": ["test_set_index_sorted_true"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_sorted_true():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 20, 40], \"z\": [4, 3, 2, 1]})\n a = dd.from_pandas(df, 2, sort=False)\n assert not a.known_divisions\n\n b = a.set_index(\"x\", sorted=True)\n assert b.known_divisions\n assert set(a.dask).issubset(set(b.dask))\n\n for drop in [True, False]:\n assert_eq(a.set_index(\"x\", drop=drop), df.set_index(\"x\", drop=drop))\n assert_eq(\n a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)\n )\n assert_eq(\n a.set_index(a.x + 1, sorted=True, drop=drop),\n df.set_index(df.x + 1, drop=drop),\n )\n\n with pytest.raises(ValueError):\n a.set_index(a.z, sorted=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_single_partition_test_set_index_sorted_min_max_same.assert_df2_divisions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_sorted_single_partition_test_set_index_sorted_min_max_same.assert_df2_divisions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 724, "end_line": 741, "span_ids": ["test_set_index_sorted_single_partition", "test_set_index_sorted_min_max_same"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_sorted_single_partition():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]})\n ddf = dd.from_pandas(df, npartitions=1)\n assert_eq(ddf.set_index(\"x\", sorted=True), df.set_index(\"x\"))\n\n\ndef test_set_index_sorted_min_max_same():\n a = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [0, 0, 0]})\n b = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [1, 1, 1]})\n\n aa = delayed(a)\n bb = delayed(b)\n\n df = dd.from_delayed([aa, bb], meta=a)\n assert not df.known_divisions\n\n df2 = df.set_index(\"y\", sorted=True)\n assert df2.divisions == (0, 1, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_empty_partition_test_set_index_empty_partition.for_conv_in_converters_.assert_assert_eq_ddf_set_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_empty_partition_test_set_index_empty_partition.for_conv_in_converters_.assert_assert_eq_ddf_set_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 744, "end_line": 761, "span_ids": ["test_set_index_empty_partition"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_empty_partition():\n test_vals = [1, 2, 3]\n\n converters = [int, float, str, lambda x: pd.to_datetime(x, unit=\"ns\")]\n\n for conv in converters:\n df = pd.DataFrame(\n [{\"x\": conv(i), \"y\": i} for i in test_vals], columns=[\"x\", \"y\"]\n )\n ddf = dd.concat(\n [\n dd.from_pandas(df, npartitions=1),\n dd.from_pandas(df[df.y > df.y.max()], npartitions=1),\n ]\n )\n\n assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))\n assert assert_eq(ddf.set_index(\"x\"), df.set_index(\"x\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_on_empty_test_set_index_on_empty.for_converter_in_converte.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_on_empty_test_set_index_on_empty.for_converter_in_converte.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 764, "end_line": 778, "span_ids": ["test_set_index_on_empty"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_on_empty():\n test_vals = [1, 2, 3, 4]\n converters = [int, float, str, lambda x: pd.to_datetime(x, unit=\"ns\")]\n\n for converter in converters:\n df = pd.DataFrame([{\"x\": converter(x), \"y\": x} for x in test_vals])\n ddf = dd.from_pandas(df, npartitions=4)\n\n assert ddf.npartitions > 1\n\n ddf = ddf[ddf.y > df.y.max()].set_index(\"x\")\n expected_df = df[df.y > df.y.max()].set_index(\"x\")\n\n assert assert_eq(ddf, expected_df, **CHECK_FREQ)\n assert ddf.npartitions == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_categorical_test_set_index_categorical.assert_categorical_equal_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_categorical_test_set_index_categorical.assert_categorical_equal_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 781, "end_line": 794, "span_ids": ["test_set_index_categorical"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_categorical():\n # https://github.com/dask/dask/issues/5671\n order = list(reversed(string.ascii_letters))\n values = list(string.ascii_letters)\n random.shuffle(values)\n dtype = pd.api.types.CategoricalDtype(order, ordered=True)\n df = pd.DataFrame({\"A\": pd.Categorical(values, dtype=dtype), \"B\": 1})\n\n result = dd.from_pandas(df, npartitions=2).set_index(\"A\")\n assert len(result) == len(df)\n\n # sorted with the metric defined by the Categorical\n divisions = pd.Categorical(result.divisions, dtype=dtype)\n assert_categorical_equal(divisions, divisions.sort_values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_divisions_test_compute_divisions.assert_b_known_divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_divisions_test_compute_divisions.assert_b_known_divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 797, "end_line": 810, "span_ids": ["test_compute_divisions"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_divisions():\n from dask.dataframe.shuffle import compute_and_set_divisions\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4], \"y\": [10, 20, 20, 40], \"z\": [4, 3, 2, 1]},\n index=[1, 3, 10, 20],\n )\n a = dd.from_pandas(df, 2, sort=False)\n assert not a.known_divisions\n\n b = compute_and_set_divisions(copy(a))\n\n assert_eq(a, b, check_divisions=False)\n assert b.known_divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_empty_partitions_test_empty_partitions.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_empty_partitions_test_empty_partitions.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 813, "end_line": 826, "span_ids": ["test_empty_partitions"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_partitions():\n # See https://github.com/dask/dask/issues/2408\n df = pd.DataFrame({\"a\": list(range(10))})\n df[\"b\"] = df[\"a\"] % 3\n df[\"c\"] = df[\"b\"].astype(str)\n\n ddf = dd.from_pandas(df, npartitions=3)\n ddf = ddf.set_index(\"b\")\n ddf = ddf.repartition(npartitions=3)\n ddf.get_partition(0).compute()\n assert_eq(ddf, df.set_index(\"b\"))\n\n ddf = ddf.set_index(\"c\")\n assert_eq(ddf, df.set_index(\"b\").set_index(\"c\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_remove_nans_test_remove_nans.for_conv_none_val_in_con.for_inputs_expected_in_t.assert_remove_nans_params": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_remove_nans_test_remove_nans.for_conv_none_val_in_con.for_inputs_expected_in_t.assert_remove_nans_params", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 829, "end_line": 852, "span_ids": ["test_remove_nans"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_remove_nans():\n tests = [\n ((1, 1, 2), (1, 1, 2)),\n ((None, 1, 2), (1, 1, 2)),\n ((1, None, 2), (1, 2, 2)),\n ((1, 2, None), (1, 2, 2)),\n ((1, 2, None, None), (1, 2, 2, 2)),\n ((None, None, 1, 2), (1, 1, 1, 2)),\n ((1, None, None, 2), (1, 2, 2, 2)),\n ((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),\n ]\n\n converters = [\n (int, np.nan),\n (float, np.nan),\n (str, np.nan),\n (lambda x: pd.to_datetime(x, unit=\"ns\"), np.datetime64(\"NaT\")),\n ]\n\n for conv, none_val in converters:\n for inputs, expected in tests:\n params = [none_val if x is None else conv(x) for x in inputs]\n expected = [conv(x) for x in expected]\n assert remove_nans(params) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_gh_2730_test_gh_2730.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_gh_2730_test_gh_2730.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 855, "end_line": 869, "span_ids": ["test_gh_2730"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_gh_2730():\n large = pd.DataFrame({\"KEY\": np.arange(0, 50000)})\n small = pd.DataFrame({\"KEY\": np.arange(25, 500)})\n\n dd_left = dd.from_pandas(small, npartitions=3)\n dd_right = dd.from_pandas(large, npartitions=257)\n\n with dask.config.set(shuffle=\"tasks\", scheduler=\"sync\"):\n dd_merged = dd_left.merge(dd_right, how=\"inner\", on=\"KEY\")\n result = dd_merged.compute()\n\n expected = large.merge(small, how=\"inner\", on=\"KEY\")\n\n tm.assert_frame_equal(result.sort_values(\"KEY\").reset_index(drop=True), expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_does_not_repeat_work_due_to_optimizations_test_set_index_errors_with_inplace_kwarg.with_pytest_raises_NotImp.ddf_set_index_a_inplac": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_does_not_repeat_work_due_to_optimizations_test_set_index_errors_with_inplace_kwarg.with_pytest_raises_NotImp.ddf_set_index_a_inplac", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 872, "end_line": 903, "span_ids": ["test_set_index_does_not_repeat_work_due_to_optimizations", "test_set_index_errors_with_inplace_kwarg"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [None, \"auto\"])\ndef test_set_index_does_not_repeat_work_due_to_optimizations(npartitions):\n # Atomic counter\n count = itertools.count()\n\n def increment():\n next(count)\n\n def make_part(dummy, n):\n return pd.DataFrame({\"x\": np.random.random(n), \"y\": np.random.random(n)})\n\n nbytes = 1e6\n nparts = 50\n n = int(nbytes / (nparts * 8))\n\n dsk = {(\"inc\", i): (increment,) for i in range(nparts)}\n dsk.update({(\"x\", i): (make_part, (\"inc\", i), n) for i in range(nparts)})\n ddf = dd.DataFrame(dsk, \"x\", make_part(None, 1), [None] * (nparts + 1))\n\n ddf.set_index(\"x\", npartitions=npartitions)\n ntimes = next(count)\n assert ntimes == nparts\n\n\ndef test_set_index_errors_with_inplace_kwarg():\n df = pd.DataFrame({\"a\": [9, 8, 7], \"b\": [6, 5, 4], \"c\": [3, 2, 1]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n ddf.set_index(\"a\")\n\n with pytest.raises(NotImplementedError):\n ddf.set_index(\"a\", inplace=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timestamp_test_set_index_timestamp.assert_eq_df2_ddf_set_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timestamp_test_set_index_timestamp.assert_eq_df2_ddf_set_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1012, "end_line": 1028, "span_ids": ["test_set_index_timestamp"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_timestamp():\n df = pd.DataFrame({\"A\": pd.date_range(\"2000\", periods=12, tz=\"US/Central\"), \"B\": 1})\n ddf = dd.from_pandas(df, 2)\n divisions = (\n pd.Timestamp(\"2000-01-01 00:00:00-0600\", tz=\"US/Central\"),\n pd.Timestamp(\"2000-01-12 00:00:00-0600\", tz=\"US/Central\"),\n )\n\n # Note: `freq` is lost during round trip\n df2 = df.set_index(\"A\")\n ddf_new_div = ddf.set_index(\"A\", divisions=divisions)\n for (ts1, ts2) in zip(divisions, ddf_new_div.divisions):\n assert ts1.value == ts2.value\n assert ts1.tz == ts2.tz\n\n assert_eq(df2, ddf_new_div, **CHECK_FREQ)\n assert_eq(df2, ddf.set_index(\"A\"), **CHECK_FREQ)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_with_compression_option_test_disk_shuffle_with_unknown_compression.with_dask_config_set_da.with_pytest_raises_.test_shuffle_disk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_with_compression_option_test_disk_shuffle_with_unknown_compression.with_dask_config_set_da.with_pytest_raises_.test_shuffle_disk_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1049, "end_line": 1069, "span_ids": ["test_disk_shuffle_with_compression_option", "test_disk_shuffle_with_unknown_compression"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression\", [None, \"ZLib\"])\ndef test_disk_shuffle_with_compression_option(compression):\n # test if dataframe shuffle works both with and without compression\n with dask.config.set({\"dataframe.shuffle-compression\": compression}):\n test_shuffle(\"disk\")\n\n\n@pytest.mark.parametrize(\"compression\", [\"UNKOWN_COMPRESSION_ALGO\"])\ndef test_disk_shuffle_with_unknown_compression(compression):\n # test if dask raises an error in case of fault config string\n with dask.config.set({\"dataframe.shuffle-compression\": compression}):\n with pytest.raises(\n ImportError,\n match=(\n \"Not able to import and load {} as compression algorithm.\"\n \"Please check if the library is installed and supported by Partd.\".format(\n compression\n )\n ),\n ):\n test_shuffle(\"disk\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_check_actual_compression_test_disk_shuffle_check_actual_compression.assert_len_uncompressed_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_disk_shuffle_check_actual_compression_test_disk_shuffle_check_actual_compression.assert_len_uncompressed_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1072, "end_line": 1092, "span_ids": ["test_disk_shuffle_check_actual_compression"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_disk_shuffle_check_actual_compression():\n # test if the compression switch is really respected by testing the size of the actual partd-data on disk\n def generate_raw_partd_file(compression):\n # generate and write a dummy dataframe to disk and return the raw data bytes\n df1 = pd.DataFrame({\"a\": list(range(10000))})\n df1[\"b\"] = (df1[\"a\"] * 123).astype(str)\n with dask.config.set({\"dataframe.shuffle-compression\": compression}):\n p1 = maybe_buffered_partd(buffer=False, tempdir=None)()\n p1.append({\"x\": df1})\n # get underlying filename from partd - depending on nested structure of partd object\n filename = (\n p1.partd.partd.filename(\"x\") if compression else p1.partd.filename(\"x\")\n )\n with open(filename, \"rb\") as f:\n return f.read()\n\n # get compressed and uncompressed raw data\n uncompressed_data = generate_raw_partd_file(compression=None)\n compressed_data = generate_raw_partd_file(compression=\"BZ2\")\n\n assert len(uncompressed_data) > len(compressed_data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_pytest__BASE_UFUNCS._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_pytest__BASE_UFUNCS._", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 55, "span_ids": ["imports"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npd = pytest.importorskip(\"pandas\")\n\nimport numpy as np\n\nimport dask.array as da\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import PANDAS_GT_120\nfrom dask.dataframe.utils import assert_eq\n\n_BASE_UFUNCS = [\n \"conj\",\n \"exp\",\n \"log\",\n \"log2\",\n \"log10\",\n \"log1p\",\n \"expm1\",\n \"sqrt\",\n \"square\",\n \"sin\",\n \"cos\",\n \"tan\",\n \"arcsin\",\n \"arccos\",\n \"arctan\",\n \"sinh\",\n \"cosh\",\n \"tanh\",\n \"arcsinh\",\n \"arccosh\",\n \"arctanh\",\n \"deg2rad\",\n \"rad2deg\",\n \"isfinite\",\n \"isinf\",\n \"isnan\",\n \"signbit\",\n \"degrees\",\n \"radians\",\n \"rint\",\n \"fabs\",\n \"sign\",\n \"absolute\",\n \"floor\",\n \"ceil\",\n \"trunc\",\n \"logical_not\",\n \"cbrt\",\n \"exp2\",\n \"negative\",\n \"reciprocal\",\n \"spacing\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_test_ufunc.None_4.assert_eq_dafunc_pandas_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_test_ufunc.None_4.assert_eq_dafunc_pandas_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 58, "end_line": 140, "span_ids": ["test_ufunc"], "tokens": 693}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"pandas_input\",\n [\n pd.Series(np.random.randint(1, 100, size=20)),\n pd.Series(np.abs(np.random.randn(100))),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n }\n ),\n pd.Series(\n np.random.randint(1, 100, size=20), index=list(\"abcdefghijklmnopqrst\")\n ),\n pd.Series(np.abs(np.random.randn(20)), index=list(\"abcdefghijklmnopqrst\")),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n },\n index=list(\"abcdefghijklmnopqrst\"),\n ),\n ],\n)\n@pytest.mark.parametrize(\"ufunc\", _BASE_UFUNCS)\ndef test_ufunc(pandas_input, ufunc):\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n dask_input = dd.from_pandas(pandas_input, 3)\n pandas_type = pandas_input.__class__\n dask_type = dask_input.__class__\n\n # applying Dask ufunc doesn't trigger computation\n with pytest.warns(None):\n # Some cause warnings (arcsine)\n assert isinstance(dafunc(dask_input), dask_type)\n assert_eq(dafunc(dask_input), npfunc(pandas_input))\n\n # applying NumPy ufunc is lazy\n if isinstance(npfunc, np.ufunc):\n assert isinstance(npfunc(dask_input), dask_type)\n else:\n assert isinstance(npfunc(dask_input), pandas_type)\n assert_eq(npfunc(dask_input), npfunc(pandas_input))\n\n # applying Dask ufunc to normal Series triggers computation\n assert isinstance(dafunc(pandas_input), pandas_type)\n assert_eq(dafunc(dask_input), npfunc(pandas_input))\n\n # Index\n if pandas_input.index.dtype in [object, str]:\n return\n if ufunc in (\"logical_not\", \"signbit\", \"isnan\", \"isinf\", \"isfinite\"):\n return\n\n with pytest.warns(None):\n assert isinstance(dafunc(dask_input.index), dd.Index)\n assert_eq(\n dafunc(dask_input.index),\n npfunc(pandas_input.index),\n check_divisions=ufunc != \"spacing\",\n )\n\n # applying NumPy ufunc is lazy\n if isinstance(npfunc, np.ufunc):\n assert isinstance(npfunc(dask_input.index), dd.Index)\n else:\n assert isinstance(npfunc(dask_input.index), pd.Index)\n\n assert_eq(\n npfunc(dask_input.index),\n npfunc(dask_input.index),\n check_divisions=ufunc != \"spacing\",\n )\n\n # applying Dask ufunc to normal Series triggers computation\n with pytest.warns(None):\n # some (da.log) cause warnings\n assert isinstance(dafunc(pandas_input.index), pd.Index)\n assert_eq(dafunc(pandas_input), npfunc(pandas_input))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_array_wrap_test_ufunc_array_wrap.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_array_wrap_test_ufunc_array_wrap.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 143, "end_line": 210, "span_ids": ["test_ufunc_array_wrap"], "tokens": 573}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"ufunc\",\n [\n pytest.param(\n \"isreal\", marks=pytest.mark.filterwarnings(\"ignore::FutureWarning\")\n ),\n \"iscomplex\",\n pytest.param(\"real\", marks=pytest.mark.filterwarnings(\"ignore::FutureWarning\")),\n pytest.param(\"imag\", marks=pytest.mark.filterwarnings(\"ignore::FutureWarning\")),\n \"angle\",\n \"fix\",\n \"i0\",\n \"sinc\",\n \"nan_to_num\",\n ],\n)\ndef test_ufunc_array_wrap(ufunc):\n \"\"\"\n some np.ufuncs doesn't call __array_wrap__\n (or __array_ufunc__ starting from numpy v.1.13.0), it should work as below\n\n - da.ufunc(dd.Series) => dd.Series\n - da.ufunc(pd.Series) => np.ndarray\n - np.ufunc(dd.Series) => np.ndarray\n - np.ufunc(pd.Series) => np.ndarray\n \"\"\"\n if ufunc == \"fix\":\n pytest.skip(\"fix calls floor in a way that we do not yet support\")\n\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n s = pd.Series(\n np.random.randint(1, 100, size=20), index=list(\"abcdefghijklmnopqrst\")\n )\n ds = dd.from_pandas(s, 3)\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(ds), dd.Series)\n assert_eq(dafunc(ds), pd.Series(npfunc(s), index=s.index))\n\n assert isinstance(npfunc(ds), np.ndarray)\n np.testing.assert_equal(npfunc(ds), npfunc(s))\n\n assert isinstance(dafunc(s), np.ndarray)\n np.testing.assert_array_equal(dafunc(s), npfunc(s))\n\n df = pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n },\n index=list(\"abcdefghijklmnopqrst\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(ddf), dd.DataFrame)\n # result may be read-only ndarray\n exp = pd.DataFrame(npfunc(df).copy(), columns=df.columns, index=df.index)\n assert_eq(dafunc(ddf), exp)\n\n assert isinstance(npfunc(ddf), np.ndarray)\n np.testing.assert_array_equal(npfunc(ddf), npfunc(df))\n\n assert isinstance(dafunc(df), np.ndarray)\n np.testing.assert_array_equal(dafunc(df), npfunc(df))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py__UFUNCS_2ARG__UFUNCS_2ARG._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py__UFUNCS_2ARG__UFUNCS_2ARG._", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 239, "span_ids": ["impl:5"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "_UFUNCS_2ARG = [\n \"logaddexp\",\n \"logaddexp2\",\n \"arctan2\",\n \"hypot\",\n \"copysign\",\n \"nextafter\",\n pytest.param(\"ldexp\", marks=[pytest.mark.filterwarnings(\"ignore::RuntimeWarning\")]),\n pytest.param(\"fmod\", marks=[pytest.mark.filterwarnings(\"ignore::RuntimeWarning\")]),\n \"logical_and\",\n \"logical_or\",\n \"logical_xor\",\n \"maximum\",\n \"minimum\",\n \"fmax\",\n \"fmin\",\n \"greater\",\n \"greater_equal\",\n \"less\",\n \"less_equal\",\n \"not_equal\",\n \"equal\",\n \"logical_or\",\n \"logical_and\",\n \"logical_xor\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_2args_test_ufunc_with_2args.assert_eq_dafunc_pandas1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_2args_test_ufunc_with_2args.assert_eq_dafunc_pandas1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 241, "end_line": 286, "span_ids": ["test_ufunc_with_2args"], "tokens": 477}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", _UFUNCS_2ARG)\n@pytest.mark.parametrize(\n \"make_pandas_input\",\n [\n lambda: pd.Series(np.random.randint(1, 100, size=20)),\n lambda: pd.DataFrame(\n np.random.randint(1, 100, size=(20, 2)), columns=[\"A\", \"B\"]\n ),\n ],\n)\ndef test_ufunc_with_2args(ufunc, make_pandas_input):\n\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n pandas1 = make_pandas_input()\n pandas2 = make_pandas_input()\n\n dask1 = dd.from_pandas(pandas1, 3)\n dask2 = dd.from_pandas(pandas2, 4)\n\n pandas_type = pandas1.__class__\n dask_type = dask1.__class__\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(dask1, dask2), dask_type)\n assert_eq(dafunc(dask1, dask2), npfunc(pandas1, pandas2))\n\n # should be fine with pandas as a second arg, too\n assert isinstance(dafunc(dask1, pandas2), dask_type)\n assert_eq(dafunc(dask1, pandas2), npfunc(pandas1, pandas2))\n\n # applying NumPy ufunc is lazy\n if isinstance(npfunc, np.ufunc):\n assert isinstance(npfunc(dask1, dask2), dask_type)\n assert isinstance(npfunc(dask1, pandas2), dask_type)\n else:\n assert isinstance(npfunc(dask1, dask2), pandas_type)\n assert isinstance(npfunc(dask1, pandas2), pandas_type)\n\n assert_eq(npfunc(dask1, dask2), npfunc(pandas1, pandas2))\n assert_eq(npfunc(dask1, pandas2), npfunc(pandas1, pandas2))\n\n # applying Dask ufunc to normal Series triggers computation\n assert isinstance(dafunc(pandas1, pandas2), pandas_type)\n assert_eq(dafunc(pandas1, pandas2), npfunc(pandas1, pandas2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_clip_test_clip.assert_eq_da_clip_pandas_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_clip_test_clip.assert_eq_da_clip_pandas_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 289, "end_line": 318, "span_ids": ["test_clip"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"pandas,min,max\",\n [\n (pd.Series(np.random.randint(1, 100, size=20)), 5, 50),\n (\n pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=[\"A\", \"B\"]),\n 5.5,\n 40.5,\n ),\n ],\n)\ndef test_clip(pandas, min, max):\n\n dask = dd.from_pandas(pandas, 3)\n pandas_type = pandas.__class__\n dask_type = dask.__class__\n\n # clip internally calls dd.Series.clip\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(da.clip(dask, min, max), dask_type)\n assert_eq(da.clip(dask, min, max), np.clip(pandas, min, max))\n\n # applying Numpy ufunc doesn't trigger computation\n assert isinstance(np.clip(dask, min, max), dask_type)\n assert_eq(np.clip(dask, min, max), np.clip(pandas, min, max))\n\n # applying Dask ufunc to normal pandas objects triggers computation\n assert isinstance(da.clip(pandas, min, max), pandas_type)\n assert_eq(da.clip(pandas, min, max), np.clip(pandas, min, max))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_ufunc_out_test_frame_ufunc_out.None_1.assert_eq_ddf_out_np_exp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_ufunc_out_test_frame_ufunc_out.None_1.assert_eq_ddf_out_np_exp", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 321, "end_line": 341, "span_ids": ["test_frame_ufunc_out"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", _BASE_UFUNCS)\ndef test_frame_ufunc_out(ufunc):\n npfunc = getattr(np, ufunc)\n dafunc = getattr(da, ufunc)\n\n input_matrix = np.random.randint(1, 100, size=(20, 2))\n\n df = pd.DataFrame(input_matrix, columns=[\"A\", \"B\"])\n ddf = dd.from_pandas(df, 3)\n df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=[\"Y\", \"Z\"])\n ddf_out_np = dd.from_pandas(df_out, 3)\n ddf_out_da = dd.from_pandas(df_out, 3)\n\n with pytest.warns(None):\n npfunc(ddf, out=ddf_out_np)\n dafunc(ddf, out=ddf_out_da)\n assert_eq(ddf_out_np, ddf_out_da)\n\n with pytest.warns(None):\n expected = pd.DataFrame(npfunc(input_matrix), columns=[\"A\", \"B\"])\n assert_eq(ddf_out_np, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_2ufunc_out_test_frame_2ufunc_out.assert_eq_ddf_out_expect": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_frame_2ufunc_out_test_frame_2ufunc_out.assert_eq_ddf_out_expect", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 344, "end_line": 372, "span_ids": ["test_frame_2ufunc_out"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frame_2ufunc_out():\n input_matrix = np.random.randint(1, 100, size=(20, 2))\n\n df = pd.DataFrame(input_matrix, columns=[\"A\", \"B\"])\n ddf = dd.from_pandas(df, 3)\n\n # column number mismatch\n df_out = pd.DataFrame(\n np.random.randint(1, 100, size=(20, 3)), columns=[\"X\", \"Y\", \"Z\"]\n )\n ddf_out = dd.from_pandas(df_out, 3)\n\n with pytest.raises(ValueError):\n np.sin(ddf, out=ddf_out)\n\n # types mismatch\n ddf_out = dd.from_pandas(pd.Series([0]), 1)\n with pytest.raises(TypeError):\n np.sin(ddf, out=ddf_out)\n\n df_out = pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=[\"X\", \"Y\"])\n ddf_out = dd.from_pandas(df_out, 3)\n\n np.sin(ddf, out=ddf_out)\n np.add(ddf_out, 10, out=ddf_out)\n\n expected = pd.DataFrame(np.sin(input_matrix) + 10, columns=[\"A\", \"B\"])\n\n assert_eq(ddf_out, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_mixed_types_test_mixed_types.assert_eq_dafunc_arg2_ar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_mixed_types_test_mixed_types.assert_eq_dafunc_arg2_ar", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 375, "end_line": 427, "span_ids": ["test_mixed_types"], "tokens": 498}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"arg1\",\n [\n pd.Series(np.abs(np.random.randn(100))),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n }\n ),\n ],\n)\n@pytest.mark.parametrize(\"arg2\", [2, dd.from_pandas(pd.Series([0]), 1).sum()])\n@pytest.mark.parametrize(\"ufunc\", _UFUNCS_2ARG)\ndef test_mixed_types(ufunc, arg1, arg2):\n npfunc = getattr(np, ufunc)\n dafunc = getattr(da, ufunc)\n\n dask = dd.from_pandas(arg1, 3)\n\n pandas_type = arg1.__class__\n dask_type = dask.__class__\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(dask, arg2), dask_type)\n assert_eq(dafunc(dask, arg2), npfunc(dask, arg2))\n\n # applying NumPy ufunc is lazy\n assert isinstance(npfunc(dask, arg2), dask_type)\n assert_eq(npfunc(dask, arg2), npfunc(arg1, arg2))\n\n # applying Dask ufunc to normal Series triggers computation\n assert isinstance(dafunc(arg1, arg2), pandas_type)\n assert_eq(dafunc(arg1, arg2), npfunc(arg1, arg2))\n\n # swapping arguments\n\n # first parameter of ldexp should be array-like\n if ufunc == \"ldexp\":\n return\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(arg2, dask), dask_type)\n assert_eq(dafunc(arg2, dask), npfunc(arg2, dask))\n\n # applying NumPy ufunc is lazy\n assert isinstance(npfunc(arg2, dask), dask_type)\n assert_eq(npfunc(arg2, dask), npfunc(arg2, dask))\n\n # applying Dask ufunc to normal Series triggers computation\n assert isinstance(dafunc(arg2, arg1), pandas_type)\n assert_eq(dafunc(arg2, arg1), npfunc(arg2, arg1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_2args_with_array_test_2args_with_array.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_2args_with_array_test_2args_with_array.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 430, "end_line": 468, "span_ids": ["test_2args_with_array"], "tokens": 374}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ufunc\", _UFUNCS_2ARG)\n@pytest.mark.parametrize(\n \"pandas,darray\",\n [\n (\n pd.Series(np.random.randint(1, 100, size=(100,))),\n da.from_array(np.random.randint(1, 100, size=(100,)), chunks=(50,)),\n ),\n (\n pd.DataFrame(np.random.randint(1, 100, size=(20, 2)), columns=[\"A\", \"B\"]),\n da.from_array(np.random.randint(1, 100, size=(20, 2)), chunks=(10, 2)),\n ),\n ],\n)\ndef test_2args_with_array(ufunc, pandas, darray):\n dafunc = getattr(da, ufunc)\n npfunc = getattr(np, ufunc)\n\n dask = dd.from_pandas(pandas, 2)\n dask_type = dask.__class__\n\n # applying Dask ufunc doesn't trigger computation\n assert isinstance(dafunc(dask, darray), dask_type)\n assert isinstance(dafunc(darray, dask), dask_type)\n\n np.testing.assert_array_equal(\n dafunc(dask, darray).compute().values, npfunc(pandas.values, darray).compute()\n )\n\n # applying NumPy ufunc is lazy\n assert isinstance(npfunc(dask, darray), dask_type)\n assert isinstance(npfunc(darray, dask), dask_type)\n\n np.testing.assert_array_equal(\n npfunc(dask, darray).compute().values, npfunc(pandas.values, darray.compute())\n )\n np.testing.assert_array_equal(\n npfunc(darray, dask).compute().values, npfunc(darray.compute(), pandas.values)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_reduction_test_ufunc_with_reduction.with_pytest_warns_None_.assert_eq_np_redfunc_np_u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_with_reduction_test_ufunc_with_reduction.with_pytest_warns_None_.assert_eq_np_redfunc_np_u", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 472, "end_line": 507, "span_ids": ["test_ufunc_with_reduction"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"redfunc\", [\"sum\", \"prod\", \"min\", \"max\", \"mean\"])\n@pytest.mark.parametrize(\"ufunc\", _BASE_UFUNCS)\n@pytest.mark.parametrize(\n \"pandas\",\n [\n pd.Series(np.abs(np.random.randn(100))),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n }\n ),\n ],\n)\ndef test_ufunc_with_reduction(redfunc, ufunc, pandas):\n dask = dd.from_pandas(pandas, 3)\n\n np_redfunc = getattr(np, redfunc)\n np_ufunc = getattr(np, ufunc)\n\n if (\n PANDAS_GT_120\n and (redfunc == \"prod\")\n and ufunc in [\"conj\", \"square\", \"negative\", \"absolute\"]\n and isinstance(pandas, pd.DataFrame)\n ):\n # TODO(pandas) follow pandas behaviour?\n # starting with pandas 1.2.0, the ufunc is applied column-wise, and therefore\n # applied on the integer columns separately, overflowing for those columns\n # (instead of being applied on 2D ndarray that was converted to float)\n pytest.xfail(\"'prod' overflowing with integer columns in pandas 1.2.0\")\n\n with pytest.warns(None):\n assert isinstance(np_redfunc(dask), (dd.DataFrame, dd.Series, dd.core.Scalar))\n assert_eq(np_redfunc(np_ufunc(dask)), np_redfunc(np_ufunc(pandas)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_numpy_scalar_comparison_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_ufunc.py_test_ufunc_numpy_scalar_comparison_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 497, "end_line": 518, "span_ids": ["test_ufunc_numpy_scalar_comparison"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"pandas\",\n [\n pd.Series(np.random.randint(1, 100, size=100)),\n pd.DataFrame(\n {\n \"A\": np.random.randint(1, 100, size=20),\n \"B\": np.random.randint(1, 100, size=20),\n \"C\": np.abs(np.random.randn(20)),\n }\n ),\n ],\n)\n@pytest.mark.parametrize(\"scalar\", [15, 16.4, np.int64(15), np.float64(16.4)])\ndef test_ufunc_numpy_scalar_comparison(pandas, scalar):\n # Regression test for issue #3392\n\n dask_compare = scalar >= dd.from_pandas(pandas, npartitions=3)\n pandas_compare = scalar >= pandas\n\n assert_eq(dask_compare, pandas_compare)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_shard_df_on_index_test_shard_df_on_index.assert_list_result_2_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_shard_df_on_index_test_shard_df_on_index.assert_list_result_2_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 26, "end_line": 34, "span_ids": ["test_shard_df_on_index"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shard_df_on_index():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n\n result = list(shard_df_on_index(df, [20, 50]))\n assert list(result[0].index) == [10]\n assert list(result[1].index) == [20, 30, 40]\n assert list(result[2].index) == [50, 60]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_test_meta_nonempty.assert_df3_A_s_al": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_test_meta_nonempty.assert_df3_A_s_al", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 123, "end_line": 159, "span_ids": ["test_meta_nonempty"], "tokens": 465}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_nonempty():\n df1 = pd.DataFrame(\n {\n \"A\": pd.Categorical([\"Alice\", \"Bob\", \"Carol\"]),\n \"B\": list(\"abc\"),\n \"C\": \"bar\",\n \"D\": np.float32(1),\n \"E\": np.int32(1),\n \"F\": pd.Timestamp(\"2016-01-01\"),\n \"G\": pd.date_range(\"2016-01-01\", periods=3, tz=\"America/New_York\"),\n \"H\": pd.Timedelta(\"1 hours\"),\n \"I\": np.void(b\" \"),\n \"J\": pd.Categorical([UNKNOWN_CATEGORIES] * 3),\n \"K\": pd.Categorical([None, None, None]),\n },\n columns=list(\"DCBAHGFEIJK\"),\n )\n df2 = df1.iloc[0:0]\n df3 = meta_nonempty(df2)\n assert (df3.dtypes == df2.dtypes).all()\n assert df3[\"A\"][0] == \"Alice\"\n assert df3[\"B\"][0] == \"foo\"\n assert df3[\"C\"][0] == \"foo\"\n assert df3[\"D\"][0] == np.float32(1)\n assert df3[\"D\"][0].dtype == \"f4\"\n assert df3[\"E\"][0] == np.int32(1)\n assert df3[\"E\"][0].dtype == \"i4\"\n assert df3[\"F\"][0] == pd.Timestamp(\"1970-01-01 00:00:00\")\n assert df3[\"G\"][0] == pd.Timestamp(\"1970-01-01 00:00:00\", tz=\"America/New_York\")\n assert df3[\"H\"][0] == pd.Timedelta(\"1\")\n assert df3[\"I\"][0] == \"foo\"\n assert df3[\"J\"][0] == UNKNOWN_CATEGORIES\n assert len(df3[\"K\"].cat.categories) == 0\n\n s = meta_nonempty(df2[\"A\"])\n assert s.dtype == df2[\"A\"].dtype\n assert (df3[\"A\"] == s).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_duplicated_test_meta_nonempty_empty_categories.for_dtype_in_O_f8_.assert_res_name_s_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_duplicated_test_meta_nonempty_empty_categories.for_dtype_in_O_f8_.assert_res_name_s_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 160, "end_line": 190, "span_ids": ["test_meta_nonempty_empty_categories", "test_meta_duplicated"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_duplicated():\n df = pd.DataFrame(columns=[\"A\", \"A\", \"B\"])\n res = meta_nonempty(df)\n\n exp = pd.DataFrame(\n [[\"foo\", \"foo\", \"foo\"], [\"foo\", \"foo\", \"foo\"]],\n index=[\"a\", \"b\"],\n columns=[\"A\", \"A\", \"B\"],\n )\n tm.assert_frame_equal(res, exp)\n\n\ndef test_meta_nonempty_empty_categories():\n for dtype in [\"O\", \"f8\", \"M8[ns]\"]:\n # Index\n idx = pd.CategoricalIndex(\n [], pd.Index([], dtype=dtype), ordered=True, name=\"foo\"\n )\n res = meta_nonempty(idx)\n assert type(res) is pd.CategoricalIndex\n assert type(res.categories) is type(idx.categories)\n assert res.ordered == idx.ordered\n assert res.name == idx.name\n # Series\n s = idx.to_series()\n res = meta_nonempty(s)\n assert res.dtype == \"category\"\n assert s.dtype == \"category\"\n assert type(res.cat.categories) is type(s.cat.categories)\n assert res.cat.ordered == s.cat.ordered\n assert res.name == s.name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_matching_columns_raises_appropriate_errors_test_check_meta_typename.assert_pandas_in_str_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_matching_columns_raises_appropriate_errors_test_check_meta_typename.assert_pandas_in_str_in", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 373, "end_line": 397, "span_ids": ["test_check_matching_columns_raises_appropriate_errors", "test_check_meta_typename"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_check_matching_columns_raises_appropriate_errors():\n df = pd.DataFrame(columns=[\"a\", \"b\", \"c\"])\n\n meta = pd.DataFrame(columns=[\"b\", \"a\", \"c\"])\n with pytest.raises(ValueError, match=\"Order of columns does not match\"):\n assert check_matching_columns(meta, df)\n\n meta = pd.DataFrame(columns=[\"a\", \"b\", \"c\", \"d\"])\n with pytest.raises(ValueError, match=\"Missing: \\\\['d'\\\\]\"):\n assert check_matching_columns(meta, df)\n\n meta = pd.DataFrame(columns=[\"a\", \"b\"])\n with pytest.raises(ValueError, match=\"Extra: \\\\['c'\\\\]\"):\n assert check_matching_columns(meta, df)\n\n\ndef test_check_meta_typename():\n df = pd.DataFrame({\"x\": []})\n ddf = dd.from_pandas(df, npartitions=1)\n check_meta(df, df)\n with pytest.raises(Exception) as info:\n check_meta(ddf, df)\n\n assert \"dask\" in str(info.value)\n assert \"pandas\" in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py__resample_bin_and_out_divs__resample_bin_and_out_divs.return.tuple_map_pd_Timestamp_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py__resample_bin_and_out_divs__resample_bin_and_out_divs.return.tuple_map_pd_Timestamp_n", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 59, "end_line": 96, "span_ids": ["_resample_bin_and_out_divs"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _resample_bin_and_out_divs(divisions, rule, closed=\"left\", label=\"left\"):\n rule = pd.tseries.frequencies.to_offset(rule)\n g = pd.Grouper(freq=rule, how=\"count\", closed=closed, label=label)\n\n # Determine bins to apply `how` to. Disregard labeling scheme.\n divs = pd.Series(range(len(divisions)), index=divisions)\n temp = divs.resample(rule, closed=closed, label=\"left\").count()\n tempdivs = temp.loc[temp > 0].index\n\n # Cleanup closed == 'right' and label == 'right'\n res = pd.offsets.Nano() if hasattr(rule, \"delta\") else pd.offsets.Day()\n if g.closed == \"right\":\n newdivs = tempdivs + res\n else:\n newdivs = tempdivs\n if g.label == \"right\":\n outdivs = tempdivs + rule\n else:\n outdivs = tempdivs\n\n newdivs = methods.tolist(newdivs)\n outdivs = methods.tolist(outdivs)\n\n # Adjust ends\n if newdivs[0] < divisions[0]:\n newdivs[0] = divisions[0]\n if newdivs[-1] < divisions[-1]:\n if len(newdivs) < len(divs):\n setter = lambda a, val: a.append(val)\n else:\n setter = lambda a, val: a.__setitem__(-1, val)\n setter(newdivs, divisions[-1] + res)\n if outdivs[-1] > divisions[-1]:\n setter(outdivs, outdivs[-1])\n elif outdivs[-1] < divisions[-1]:\n setter(outdivs, temp.index[-1])\n\n return tuple(map(pd.Timestamp, newdivs)), tuple(map(pd.Timestamp, outdivs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler_Resampler.__init__.self._kwargs.kwargs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler_Resampler.__init__.self._kwargs.kwargs", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 93, "end_line": 123, "span_ids": ["Resampler.__init__", "Resampler"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Resampler:\n \"\"\"Class for resampling timeseries data.\n\n This class is commonly encountered when using ``obj.resample(...)`` which\n return ``Resampler`` objects.\n\n Parameters\n ----------\n obj : Dask DataFrame or Series\n Data to be resampled.\n rule : str, tuple, datetime.timedelta, DateOffset or None\n The offset string or object representing the target conversion.\n kwargs : optional\n Keyword arguments passed to underlying pandas resampling function.\n\n Returns\n -------\n Resampler instance of the appropriate type\n \"\"\"\n\n def __init__(self, obj, rule, **kwargs):\n if not obj.known_divisions:\n msg = (\n \"Can only resample dataframes with known divisions\\n\"\n \"See https://docs.dask.org/en/latest/dataframe-design.html#partitions\\n\"\n \"for more information.\"\n )\n raise ValueError(msg)\n self.obj = obj\n self._rule = pd.tseries.frequencies.to_offset(rule)\n self._kwargs = kwargs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler._agg_Resampler._agg.return.Series_graph_name_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler._agg_Resampler._agg.return.Series_graph_name_meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 125, "end_line": 184, "span_ids": ["Resampler._agg"], "tokens": 453}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Resampler:\n\n def _agg(self, how, meta=None, fill_value=np.nan, how_args=(), how_kwargs={}):\n \"\"\"Aggregate using one or more operations\n\n Parameters\n ----------\n how : str\n Name of aggregation operation\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling.\n Default is NaN.\n how_args : optional\n Positional arguments for aggregation operation.\n how_kwargs : optional\n Keyword arguments for aggregation operation.\n\n Returns\n -------\n Dask DataFrame or Series\n \"\"\"\n rule = self._rule\n kwargs = self._kwargs\n name = \"resample-\" + tokenize(\n self.obj, rule, kwargs, how, *how_args, **how_kwargs\n )\n\n # Create a grouper to determine closed and label conventions\n newdivs, outdivs = _resample_bin_and_out_divs(\n self.obj.divisions, rule, **kwargs\n )\n\n # Repartition divs into bins. These won't match labels after mapping\n partitioned = self.obj.repartition(newdivs, force=True)\n\n keys = partitioned.__dask_keys__()\n dsk = {}\n\n args = zip(keys, outdivs, outdivs[1:], [\"left\"] * (len(keys) - 1) + [None])\n for i, (k, s, e, c) in enumerate(args):\n dsk[(name, i)] = (\n _resample_series,\n k,\n s,\n e,\n c,\n rule,\n kwargs,\n how,\n fill_value,\n list(how_args),\n how_kwargs,\n )\n\n # Infer output metadata\n meta_r = self.obj._meta_nonempty.resample(self._rule, **self._kwargs)\n meta = getattr(meta_r, how)(*how_args, **how_kwargs)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[partitioned])\n if isinstance(meta, pd.DataFrame):\n return DataFrame(graph, name, meta, outdivs)\n return Series(graph, name, meta, outdivs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler.agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_Resampler.agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 186, "end_line": 253, "span_ids": ["Resampler.sum", "Resampler.agg", "Resampler.min", "Resampler.std", "Resampler.quantile", "Resampler.size", "Resampler.count", "Resampler.max", "Resampler.ohlc", "Resampler.sem", "Resampler.var", "Resampler.mean", "Resampler.first", "Resampler.nunique", "Resampler.prod", "Resampler.last", "Resampler.median"], "tokens": 410}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Resampler:\n\n @derived_from(pd_Resampler)\n def agg(self, agg_funcs, *args, **kwargs):\n return self._agg(\"agg\", how_args=(agg_funcs,) + args, how_kwargs=kwargs)\n\n @derived_from(pd_Resampler)\n def count(self):\n return self._agg(\"count\", fill_value=0)\n\n @derived_from(pd_Resampler)\n def first(self):\n return self._agg(\"first\")\n\n @derived_from(pd_Resampler)\n def last(self):\n return self._agg(\"last\")\n\n @derived_from(pd_Resampler)\n def mean(self):\n return self._agg(\"mean\")\n\n @derived_from(pd_Resampler)\n def min(self):\n return self._agg(\"min\")\n\n @derived_from(pd_Resampler)\n def median(self):\n return self._agg(\"median\")\n\n @derived_from(pd_Resampler)\n def max(self):\n return self._agg(\"max\")\n\n @derived_from(pd_Resampler)\n def nunique(self):\n return self._agg(\"nunique\", fill_value=0)\n\n @derived_from(pd_Resampler)\n def ohlc(self):\n return self._agg(\"ohlc\")\n\n @derived_from(pd_Resampler)\n def prod(self):\n return self._agg(\"prod\")\n\n @derived_from(pd_Resampler)\n def sem(self):\n return self._agg(\"sem\")\n\n @derived_from(pd_Resampler)\n def std(self):\n return self._agg(\"std\")\n\n @derived_from(pd_Resampler)\n def size(self):\n return self._agg(\"size\", fill_value=0)\n\n @derived_from(pd_Resampler)\n def sum(self):\n return self._agg(\"sum\", fill_value=0)\n\n @derived_from(pd_Resampler)\n def var(self):\n return self._agg(\"var\")\n\n @derived_from(pd_Resampler)\n def quantile(self):\n return self._agg(\"quantile\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_from_itertools_import_pro_test_series_resample.assert_expected_index_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_from_itertools_import_pro_test_series_resample.assert_expected_index_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 50, "span_ids": ["imports", "test_series_resample", "resample"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from itertools import product\n\nimport pandas as pd\nimport pytest\n\nimport dask.dataframe as dd\nfrom dask.dataframe.tseries.resample import getnanos\nfrom dask.dataframe.utils import assert_eq\n\nCHECK_FREQ = {}\nif dd._compat.PANDAS_GT_110:\n CHECK_FREQ[\"check_freq\"] = False\n\n\ndef resample(df, freq, how=\"mean\", **kwargs):\n return getattr(df.resample(freq, **kwargs), how)()\n\n\n@pytest.mark.parametrize(\n [\"obj\", \"method\", \"npartitions\", \"freq\", \"closed\", \"label\"],\n list(\n product(\n [\"series\", \"frame\"],\n [\"count\", \"mean\", \"ohlc\"],\n [2, 5],\n [\"30T\", \"h\", \"d\", \"w\", \"M\"],\n [\"right\", \"left\"],\n [\"right\", \"left\"],\n )\n ),\n)\ndef test_series_resample(obj, method, npartitions, freq, closed, label):\n index = pd.date_range(\"1-1-2000\", \"2-15-2000\", freq=\"h\")\n index = index.union(pd.date_range(\"4-15-2000\", \"5-15-2000\", freq=\"h\"))\n if obj == \"series\":\n ps = pd.Series(range(len(index)), index=index)\n elif obj == \"frame\":\n ps = pd.DataFrame({\"a\": range(len(index))}, index=index)\n ds = dd.from_pandas(ps, npartitions=npartitions)\n # Series output\n\n result = resample(ds, freq, how=method, closed=closed, label=label)\n expected = resample(ps, freq, how=method, closed=closed, label=label)\n\n assert_eq(result, expected, check_dtype=False)\n\n divisions = result.divisions\n\n assert expected.index[0] == divisions[0]\n assert expected.index[-1] == divisions[-1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_has_correct_fill_value_test_resample_has_correct_fill_value.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_has_correct_fill_value_test_resample_has_correct_fill_value.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 53, "end_line": 62, "span_ids": ["test_resample_has_correct_fill_value"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"count\", \"nunique\", \"size\", \"sum\"])\ndef test_resample_has_correct_fill_value(method):\n index = pd.date_range(\"2000-01-01\", \"2000-02-15\", freq=\"h\")\n index = index.union(pd.date_range(\"4-15-2000\", \"5-15-2000\", freq=\"h\"))\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=2)\n\n assert_eq(\n getattr(ds.resample(\"30min\"), method)(), getattr(ps.resample(\"30min\"), method)()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_test_resample_agg.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_test_resample_agg.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 65, "end_line": 74, "span_ids": ["test_resample_agg"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_agg():\n index = pd.date_range(\"2000-01-01\", \"2000-02-15\", freq=\"h\")\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=2)\n\n assert_eq(ds.resample(\"10min\").agg(\"mean\"), ps.resample(\"10min\").agg(\"mean\"))\n assert_eq(\n ds.resample(\"10min\").agg([\"mean\", \"min\"]),\n ps.resample(\"10min\").agg([\"mean\", \"min\"]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_passes_kwargs_test_resample_agg_passes_kwargs.assert_ds_resample_2h_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_agg_passes_kwargs_test_resample_agg_passes_kwargs.assert_ds_resample_2h_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 86, "span_ids": ["test_resample_agg_passes_kwargs"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_agg_passes_kwargs():\n index = pd.date_range(\"2000-01-01\", \"2000-02-15\", freq=\"h\")\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=2)\n\n def foo(series, bar=1, *args, **kwargs):\n return bar\n\n assert_eq(ds.resample(\"2h\").agg(foo, bar=2), ps.resample(\"2h\").agg(foo, bar=2))\n assert (ds.resample(\"2h\").agg(foo, bar=2) == 2).compute().all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_throws_error_when_parition_index_does_not_match_index_test_resample_throws_error_when_parition_index_does_not_match_index.with_pytest_raises_ValueE.ds_resample_2M_count_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_throws_error_when_parition_index_does_not_match_index_test_resample_throws_error_when_parition_index_does_not_match_index.with_pytest_raises_ValueE.ds_resample_2M_count_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 89, "end_line": 95, "span_ids": ["test_resample_throws_error_when_parition_index_does_not_match_index"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_throws_error_when_parition_index_does_not_match_index():\n index = pd.date_range(\"1-1-2000\", \"2-15-2000\", freq=\"D\")\n index = index.union(pd.date_range(\"4-15-2000\", \"5-15-2000\", freq=\"D\"))\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=5)\n with pytest.raises(ValueError, match=\"Index is not contained within new index.\"):\n ds.resample(\"2M\").count().compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_pads_last_division_to_avoid_off_by_one_test_resample_pads_last_division_to_avoid_off_by_one.assert_eq_actual_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_pads_last_division_to_avoid_off_by_one_test_resample_pads_last_division_to_avoid_off_by_one.assert_eq_actual_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 98, "end_line": 132, "span_ids": ["test_resample_pads_last_division_to_avoid_off_by_one"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_pads_last_division_to_avoid_off_by_one():\n # https://github.com/dask/dask/issues/6230\n times = [\n 1545362463409128000,\n 1545362504369352000,\n 1545362545326966000,\n 1545363118769636000,\n 1545363159726490000,\n 1545363200687178000,\n 1545363241648824000,\n 1573318190393973000,\n 1573318231353350000,\n 1573318272313774000,\n 1573318313275299000,\n 1573318354233962000,\n 1573318395195456000,\n 1573318436154609000,\n 1580687544437145000,\n 1580687585394881000,\n 1580687667316809000,\n 1580687708275414000,\n 1580687790195742000,\n 1580687831154951000,\n 1580687872115363000,\n 1580687954035133000,\n 1559127673402811000,\n ]\n\n df = pd.DataFrame({\"Time\": times, \"Counts\": range(len(times))})\n df[\"Time\"] = pd.to_datetime(df[\"Time\"], utc=True)\n expected = df.set_index(\"Time\").resample(\"1Q\").size()\n\n ddf = dd.from_pandas(df, npartitions=2).set_index(\"Time\")\n actual = ddf.resample(\"1Q\").size().compute()\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_does_not_evenly_divide_day_test_resample_does_not_evenly_divide_day.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_resample_does_not_evenly_divide_day_test_resample_does_not_evenly_divide_day.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 135, "end_line": 146, "span_ids": ["test_resample_does_not_evenly_divide_day"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_resample_does_not_evenly_divide_day():\n import numpy as np\n\n index = pd.date_range(\"2012-01-02\", \"2012-02-02\", freq=\"H\")\n index = index.union(pd.date_range(\"2012-03-02\", \"2012-04-02\", freq=\"H\"))\n df = pd.DataFrame({\"p\": np.random.random(len(index))}, index=index)\n ddf = dd.from_pandas(df, npartitions=5)\n # Frequency doesn't evenly divide day\n expected = df.resample(\"2D\").count()\n result = ddf.resample(\"2D\").count().compute()\n\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_does_not_evenly_divide_day_test_series_resample_does_not_evenly_divide_day.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_does_not_evenly_divide_day_test_series_resample_does_not_evenly_divide_day.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 149, "end_line": 160, "span_ids": ["test_series_resample_does_not_evenly_divide_day"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_resample_does_not_evenly_divide_day():\n index = pd.date_range(\"2012-01-02 00:00:00\", \"2012-01-02 01:00:00\", freq=\"T\")\n index = index.union(\n pd.date_range(\"2012-01-02 06:00:00\", \"2012-01-02 08:00:00\", freq=\"T\")\n )\n s = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(s, npartitions=5)\n # Frequency doesn't evenly divide day\n expected = s.resample(\"57T\").mean()\n result = ds.resample(\"57T\").mean().compute()\n\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_unknown_divisions_error_test_resample_index_name.assert_ddf_resample_D_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_unknown_divisions_error_test_resample_index_name.assert_ddf_resample_D_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 162, "end_line": 186, "span_ids": ["test_unknown_divisions_error", "test_resample_index_name"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unknown_divisions_error():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=2, sort=False)\n try:\n ddf.x.resample(\"1m\").mean()\n assert False\n except ValueError as e:\n assert \"divisions\" in str(e)\n\n\ndef test_resample_index_name():\n from datetime import datetime, timedelta\n\n import numpy as np\n\n date_today = datetime.now()\n days = pd.date_range(date_today, date_today + timedelta(20), freq=\"D\")\n data = np.random.randint(1, high=100, size=len(days))\n\n df = pd.DataFrame({\"date\": days, \"values\": data})\n df = df.set_index(\"date\")\n\n ddf = dd.from_pandas(df, npartitions=4)\n\n assert ddf.resample(\"D\").mean().head().index.name == \"date\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_non_existent_datetime_test_series_resample_non_existent_datetime.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_series_resample_non_existent_datetime_test_series_resample_non_existent_datetime.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 188, "end_line": 200, "span_ids": ["test_series_resample_non_existent_datetime"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_series_resample_non_existent_datetime():\n index = [\n pd.Timestamp(\"2016-10-15 00:00:00\"),\n pd.Timestamp(\"2016-10-16 10:00:00\"),\n pd.Timestamp(\"2016-10-17 00:00:00\"),\n ]\n df = pd.DataFrame([[1], [2], [3]], index=index)\n df.index = df.index.tz_localize(\"America/Sao_Paulo\")\n ddf = dd.from_pandas(df, npartitions=1)\n result = ddf.resample(\"1D\").mean()\n expected = df.resample(\"1D\").mean()\n\n assert_eq(result, expected, **CHECK_FREQ)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_common_aggs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/tests/test_resample.py_test_common_aggs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/tests/test_resample.py", "file_name": "test_resample.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 222, "span_ids": ["test_getnanos_deprecated", "test_common_aggs"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"agg\", [\"nunique\", \"mean\", \"count\", \"size\", \"quantile\"])\ndef test_common_aggs(agg):\n index = pd.date_range(\"2000-01-01\", \"2000-02-15\", freq=\"h\")\n ps = pd.Series(range(len(index)), index=index)\n ds = dd.from_pandas(ps, npartitions=2)\n\n f = lambda df: getattr(df, agg)()\n\n res = f(ps.resample(\"1d\"))\n expected = f(ds.resample(\"1d\"))\n\n assert_eq(res, expected, check_dtype=False)\n\n\ndef test_getnanos_deprecated():\n with pytest.warns(FutureWarning, match=\"getnanos was deprecated\"):\n getnanos(None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_shard_df_on_index_shard_df_on_index.if_not_len_divisions_.else_.yield_df_iloc_indices_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_shard_df_on_index_shard_df_on_index.if_not_len_divisions_.else_.yield_df_iloc_indices_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 65, "end_line": 118, "span_ids": ["shard_df_on_index"], "tokens": 423}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def shard_df_on_index(df, divisions):\n \"\"\"Shard a DataFrame by ranges on its index\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})\n >>> df\n a b\n 0 0 5\n 1 10 4\n 2 20 3\n 3 30 2\n 4 40 1\n\n >>> shards = list(shard_df_on_index(df, [2, 4]))\n >>> shards[0]\n a b\n 0 0 5\n 1 10 4\n\n >>> shards[1]\n a b\n 2 20 3\n 3 30 2\n\n >>> shards[2]\n a b\n 4 40 1\n\n >>> list(shard_df_on_index(df, []))[0] # empty case\n a b\n 0 0 5\n 1 10 4\n 2 20 3\n 3 30 2\n 4 40 1\n \"\"\"\n\n if isinstance(divisions, Iterator):\n divisions = list(divisions)\n if not len(divisions):\n yield df\n else:\n divisions = np.array(divisions)\n df = df.sort_index()\n index = df.index\n if is_categorical_dtype(index):\n index = index.as_ordered()\n indices = index.searchsorted(divisions)\n yield df.iloc[: indices[0]]\n for i in range(len(indices) - 1):\n yield df.iloc[indices[i] : indices[i + 1]]\n yield df.iloc[indices[-1] :]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__META_TYPES__META_DESCRIPTION._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__META_TYPES__META_DESCRIPTION._", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 123, "end_line": 135, "span_ids": ["impl:10"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "_META_TYPES = \"meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional\"\n_META_DESCRIPTION = \"\"\"\\\nAn empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and\ncolumn names of the output. This metadata is necessary for many algorithms\nin dask dataframe to work. For ease of use, some alternative inputs are\nalso available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``\nor iterable of ``(name, dtype)`` can be provided (note that the order of\nthe names should match the order of the columns). Instead of a series, a\ntuple of ``(name, dtype)`` can be used. If not provided, dask will try to\ninfer the metadata. This may lead to unexpected results, so providing\n``meta`` is recommended. For more information, see\n``dask.dataframe.utils.make_meta``.\n\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_insert_meta_param_description_insert_meta_param_description.return.f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_insert_meta_param_description_insert_meta_param_description.return.f", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 137, "end_line": 161, "span_ids": ["insert_meta_param_description"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def insert_meta_param_description(*args, **kwargs):\n \"\"\"Replace `$META` in docstring with param description.\n\n If pad keyword is provided, will pad description by that number of\n spaces (default is 8).\"\"\"\n if not args:\n return lambda f: insert_meta_param_description(f, **kwargs)\n f = args[0]\n indent = \" \" * kwargs.get(\"pad\", 8)\n body = textwrap.wrap(\n _META_DESCRIPTION, initial_indent=indent, subsequent_indent=indent, width=78\n )\n descr = \"{}\\n{}\".format(_META_TYPES, \"\\n\".join(body))\n if f.__doc__:\n if \"$META\" in f.__doc__:\n f.__doc__ = f.__doc__.replace(\"$META\", descr)\n else:\n # Put it at the end of the parameters section\n parameter_header = \"Parameters\\n%s----------\" % indent[4:]\n first, last = re.split(\"Parameters\\\\n[ ]*----------\", f.__doc__)\n parameters, rest = last.split(\"\\n\\n\", 1)\n f.__doc__ = \"{}{}{}\\n{}{}\\n\\n{}\".format(\n first, parameter_header, parameters, indent[4:], descr, rest\n )\n return f", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_raise_on_meta_error_raise_on_meta_error.try_.except_Exception_as_e_.raise_ValueError_msg_fro": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_raise_on_meta_error_raise_on_meta_error.try_.except_Exception_as_e_.raise_ValueError_msg_fro", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 164, "end_line": 196, "span_ids": ["raise_on_meta_error"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef raise_on_meta_error(funcname=None, udf=False):\n \"\"\"Reraise errors in this block to show metadata inference failure.\n\n Parameters\n ----------\n funcname : str, optional\n If provided, will be added to the error message to indicate the\n name of the method that failed.\n \"\"\"\n try:\n yield\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n msg = \"Metadata inference failed{0}.\\n\\n\"\n if udf:\n msg += (\n \"You have supplied a custom function and Dask is unable to \\n\"\n \"determine the type of output that that function returns. \\n\\n\"\n \"To resolve this please provide a meta= keyword.\\n\"\n \"The docstring of the Dask function you ran should have more information.\\n\\n\"\n )\n msg += (\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n \"{1}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n \"{2}\"\n )\n msg = msg.format(f\" in `{funcname}`\" if funcname else \"\", repr(e), tb)\n raise ValueError(msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_UNKNOWN_CATEGORIES_has_known_categories.raise_TypeError_Expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_UNKNOWN_CATEGORIES_has_known_categories.raise_TypeError_Expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 200, "end_line": 215, "span_ids": ["has_known_categories", "impl:14"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "UNKNOWN_CATEGORIES = \"__UNKNOWN_CATEGORIES__\"\n\n\ndef has_known_categories(x):\n \"\"\"Returns whether the categories in `x` are known.\n\n Parameters\n ----------\n x : Series or CategoricalIndex\n \"\"\"\n x = getattr(x, \"_meta\", x)\n if is_series_like(x):\n return UNKNOWN_CATEGORIES not in x.cat.categories\n elif is_index_like(x) and hasattr(x, \"categories\"):\n return UNKNOWN_CATEGORIES not in x.categories\n raise TypeError(\"Expected Series or CategoricalIndex\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_strip_unknown_categories_strip_unknown_categories.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_strip_unknown_categories_strip_unknown_categories.return.x", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 217, "end_line": 243, "span_ids": ["strip_unknown_categories"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def strip_unknown_categories(x, just_drop_unknown=False):\n \"\"\"Replace any unknown categoricals with empty categoricals.\n\n Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results.\n \"\"\"\n if isinstance(x, (pd.Series, pd.DataFrame)):\n x = x.copy()\n if isinstance(x, pd.DataFrame):\n cat_mask = x.dtypes == \"category\"\n if cat_mask.any():\n cats = cat_mask[cat_mask].index\n for c in cats:\n if not has_known_categories(x[c]):\n if just_drop_unknown:\n x[c].cat.remove_categories(UNKNOWN_CATEGORIES, inplace=True)\n else:\n x[c] = x[c].cat.set_categories([])\n elif isinstance(x, pd.Series):\n if is_categorical_dtype(x.dtype) and not has_known_categories(x):\n x = x.cat.set_categories([])\n if isinstance(x.index, pd.CategoricalIndex) and not has_known_categories(\n x.index\n ):\n x.index = x.index.set_categories([])\n elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x):\n x = x.set_categories([])\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_clear_known_categories_clear_known_categories.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_clear_known_categories_clear_known_categories.return.x", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 246, "end_line": 276, "span_ids": ["clear_known_categories"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def clear_known_categories(x, cols=None, index=True):\n \"\"\"Set categories to be unknown.\n\n Parameters\n ----------\n x : DataFrame, Series, Index\n cols : iterable, optional\n If x is a DataFrame, set only categoricals in these columns to unknown.\n By default, all categorical columns are set to unknown categoricals\n index : bool, optional\n If True and x is a Series or DataFrame, set the clear known categories\n in the index as well.\n \"\"\"\n if isinstance(x, (pd.Series, pd.DataFrame)):\n x = x.copy()\n if isinstance(x, pd.DataFrame):\n mask = x.dtypes == \"category\"\n if cols is None:\n cols = mask[mask].index\n elif not mask.loc[cols].all():\n raise ValueError(\"Not all columns are categoricals\")\n for c in cols:\n x[c] = x[c].cat.set_categories([UNKNOWN_CATEGORIES])\n elif isinstance(x, pd.Series):\n if is_categorical_dtype(x.dtype):\n x = x.cat.set_categories([UNKNOWN_CATEGORIES])\n if index and isinstance(x.index, pd.CategoricalIndex):\n x.index = x.index.set_categories([UNKNOWN_CATEGORIES])\n elif isinstance(x, pd.CategoricalIndex):\n x = x.set_categories([UNKNOWN_CATEGORIES])\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_meta_check_meta.raise_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_meta_check_meta.raise_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 334, "end_line": 408, "span_ids": ["check_meta"], "tokens": 694}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_meta(x, meta, funcname=None, numeric_equal=True):\n \"\"\"Check that the dask metadata matches the result.\n\n If metadata matches, ``x`` is passed through unchanged. A nice error is\n raised if metadata doesn't match.\n\n Parameters\n ----------\n x : DataFrame, Series, or Index\n meta : DataFrame, Series, or Index\n The expected metadata that ``x`` should match\n funcname : str, optional\n The name of the function in which the metadata was specified. If\n provided, the function name will be included in the error message to be\n more helpful to users.\n numeric_equal : bool, optionl\n If True, integer and floating dtypes compare equal. This is useful due\n to panda's implicit conversion of integer to floating upon encountering\n missingness, which is hard to infer statically.\n \"\"\"\n eq_types = {\"i\", \"f\", \"u\"} if numeric_equal else set()\n\n def equal_dtypes(a, b):\n if is_categorical_dtype(a) != is_categorical_dtype(b):\n return False\n if isinstance(a, str) and a == \"-\" or isinstance(b, str) and b == \"-\":\n return False\n if is_categorical_dtype(a) and is_categorical_dtype(b):\n if UNKNOWN_CATEGORIES in a.categories or UNKNOWN_CATEGORIES in b.categories:\n return True\n return a == b\n return (a.kind in eq_types and b.kind in eq_types) or is_dtype_equal(a, b)\n\n if not (\n is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta)\n ) or is_dask_collection(meta):\n raise TypeError(\n \"Expected partition to be DataFrame, Series, or \"\n \"Index, got `%s`\" % typename(type(meta))\n )\n\n # Notice, we use .__class__ as opposed to type() in order to support\n # object proxies see \n if x.__class__ != meta.__class__:\n errmsg = \"Expected partition of type `{}` but got `{}`\".format(\n typename(type(meta)),\n typename(type(x)),\n )\n elif is_dataframe_like(meta):\n dtypes = pd.concat([x.dtypes, meta.dtypes], axis=1, sort=True)\n bad_dtypes = [\n (repr(col), a, b)\n for col, a, b in dtypes.fillna(\"-\").itertuples()\n if not equal_dtypes(a, b)\n ]\n if bad_dtypes:\n errmsg = \"Partition type: `{}`\\n{}\".format(\n typename(type(meta)),\n asciitable([\"Column\", \"Found\", \"Expected\"], bad_dtypes),\n )\n else:\n check_matching_columns(meta, x)\n return x\n else:\n if equal_dtypes(x.dtype, meta.dtype):\n return x\n errmsg = \"Partition type: `{}`\\n{}\".format(\n typename(type(meta)),\n asciitable([\"\", \"dtype\"], [(\"Found\", x.dtype), (\"Expected\", meta.dtype)]),\n )\n\n raise ValueError(\n \"Metadata mismatch found%s.\\n\\n\"\n \"%s\" % ((\" in `%s`\" % funcname if funcname else \"\"), errmsg)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py____check_dask.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py____check_dask.return.dsk", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 442, "end_line": 505, "span_ids": ["index_summary", "_check_dask"], "tokens": 543}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Testing\n###############################################################\n\n\ndef _check_dask(dsk, check_names=True, check_dtypes=True, result=None, scheduler=None):\n import dask.dataframe as dd\n\n if hasattr(dsk, \"__dask_graph__\"):\n graph = dsk.__dask_graph__()\n if hasattr(graph, \"validate\"):\n graph.validate()\n if result is None:\n result = dsk.compute(scheduler=scheduler)\n if isinstance(dsk, dd.Index):\n assert \"Index\" in type(result).__name__, type(result)\n # assert type(dsk._meta) == type(result), type(dsk._meta)\n if check_names:\n assert dsk.name == result.name\n assert dsk._meta.name == result.name\n if isinstance(result, pd.MultiIndex):\n assert result.names == dsk._meta.names\n if check_dtypes:\n assert_dask_dtypes(dsk, result)\n elif isinstance(dsk, dd.Series):\n assert \"Series\" in type(result).__name__, type(result)\n assert type(dsk._meta) == type(result), type(dsk._meta)\n if check_names:\n assert dsk.name == result.name, (dsk.name, result.name)\n assert dsk._meta.name == result.name\n if check_dtypes:\n assert_dask_dtypes(dsk, result)\n _check_dask(\n dsk.index,\n check_names=check_names,\n check_dtypes=check_dtypes,\n result=result.index,\n )\n elif isinstance(dsk, dd.DataFrame):\n assert \"DataFrame\" in type(result).__name__, type(result)\n assert isinstance(dsk.columns, pd.Index), type(dsk.columns)\n assert type(dsk._meta) == type(result), type(dsk._meta)\n if check_names:\n tm.assert_index_equal(dsk.columns, result.columns)\n tm.assert_index_equal(dsk._meta.columns, result.columns)\n if check_dtypes:\n assert_dask_dtypes(dsk, result)\n _check_dask(\n dsk.index,\n check_names=check_names,\n check_dtypes=check_dtypes,\n result=result.index,\n )\n elif isinstance(dsk, dd.core.Scalar):\n assert np.isscalar(result) or isinstance(\n result, (pd.Timestamp, pd.Timedelta)\n )\n if check_dtypes:\n assert_dask_dtypes(dsk, result)\n else:\n msg = f\"Unsupported dask instance {type(dsk)} found\"\n raise AssertionError(msg)\n return result\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_graph_assert_sane_keynames.for_k_in_ddf_dask_keys_.assert_k_split_0_is": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_graph_assert_sane_keynames.for_k_in_ddf_dask_keys_.assert_k_split_0_is", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 580, "end_line": 630, "span_ids": ["assert_dask_graph", "assert_divisions", "assert_sane_keynames"], "tokens": 382}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_dask_graph(dask, label):\n if hasattr(dask, \"dask\"):\n dask = dask.dask\n assert isinstance(dask, Mapping)\n for k in dask:\n if isinstance(k, tuple):\n k = k[0]\n if k.startswith(label):\n return True\n raise AssertionError(f\"given dask graph doesn't contain label: {label}\")\n\n\ndef assert_divisions(ddf, scheduler=None):\n if not hasattr(ddf, \"divisions\"):\n return\n\n assert isinstance(ddf.divisions, tuple)\n\n if not getattr(ddf, \"known_divisions\", False):\n return\n\n def index(x):\n if is_index_like(x):\n return x\n try:\n return x.index.get_level_values(0)\n except AttributeError:\n return x.index\n\n get = get_scheduler(scheduler=scheduler, collections=[type(ddf)])\n results = get(ddf.dask, ddf.__dask_keys__())\n for i, df in enumerate(results[:-1]):\n if len(df):\n assert index(df).min() >= ddf.divisions[i]\n assert index(df).max() < ddf.divisions[i + 1]\n\n if len(results[-1]):\n assert index(results[-1]).min() >= ddf.divisions[-2]\n assert index(results[-1]).max() <= ddf.divisions[-1]\n\n\ndef assert_sane_keynames(ddf):\n if not hasattr(ddf, \"dask\"):\n return\n for k in ddf.dask.keys():\n while isinstance(k, tuple):\n k = k[0]\n assert isinstance(k, (str, bytes))\n assert len(k) < 100\n assert \" \" not in k\n assert k.split(\"-\")[0].isidentifier(), k", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_dtypes_assert_dask_dtypes.if_not_is_dask_collection.else_.if_hasattr_ddf__meta_dt.else_.assert_type_ddf__meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_dask_dtypes_assert_dask_dtypes.if_not_is_dask_collection.else_.if_hasattr_ddf__meta_dt.else_.assert_type_ddf__meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 891, "end_line": 924, "span_ids": ["assert_dask_dtypes"], "tokens": 336}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_dask_dtypes(ddf, res, numeric_equal=True):\n \"\"\"Check that the dask metadata matches the result.\n\n If `numeric_equal`, integer and floating dtypes compare equal. This is\n useful due to the implicit conversion of integer to floating upon\n encountering missingness, which is hard to infer statically.\"\"\"\n\n eq_type_sets = [{\"O\", \"S\", \"U\", \"a\"}] # treat object and strings alike\n if numeric_equal:\n eq_type_sets.append({\"i\", \"f\", \"u\"})\n\n def eq_dtypes(a, b):\n return any(\n a.kind in eq_types and b.kind in eq_types for eq_types in eq_type_sets\n ) or (a == b)\n\n if not is_dask_collection(res) and is_dataframe_like(res):\n for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes], axis=1).itertuples():\n assert eq_dtypes(a, b)\n elif not is_dask_collection(res) and (is_index_like(res) or is_series_like(res)):\n a = ddf._meta.dtype\n b = res.dtype\n assert eq_dtypes(a, b)\n else:\n if hasattr(ddf._meta, \"dtype\"):\n a = ddf._meta.dtype\n if not hasattr(res, \"dtype\"):\n assert np.isscalar(res)\n b = np.dtype(type(res))\n else:\n b = res.dtype\n assert eq_dtypes(a, b)\n else:\n assert type(ddf._meta) == type(res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_max_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_max_deps_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 671, "end_line": 723, "span_ids": ["valid_divisions", "drop_by_shallow_copy", "assert_max_deps"], "tokens": 345}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_max_deps(x, n, eq=True):\n dependencies, dependents = get_deps(x.dask)\n if eq:\n assert max(map(len, dependencies.values())) == n\n else:\n assert max(map(len, dependencies.values())) <= n\n\n\ndef valid_divisions(divisions):\n \"\"\"Are the provided divisions valid?\n\n Examples\n --------\n >>> valid_divisions([1, 2, 3])\n True\n >>> valid_divisions([3, 2, 1])\n False\n >>> valid_divisions([1, 1, 1])\n False\n >>> valid_divisions([0, 1, 1])\n True\n >>> valid_divisions(123)\n False\n >>> valid_divisions([0, float('nan'), 1])\n False\n \"\"\"\n if not isinstance(divisions, (tuple, list)):\n return False\n\n for i, x in enumerate(divisions[:-2]):\n if x >= divisions[i + 1]:\n return False\n if isinstance(x, Number) and math.isnan(x):\n return False\n\n for x in divisions[-2:]:\n if isinstance(x, Number) and math.isnan(x):\n return False\n\n if divisions[-2] > divisions[-1]:\n return False\n\n return True\n\n\ndef drop_by_shallow_copy(df, columns, errors=\"raise\"):\n \"\"\"Use shallow copy to drop columns in place\"\"\"\n df2 = df.copy(deep=False)\n if not pd.api.types.is_list_like(columns):\n columns = [columns]\n df2.drop(columns=columns, inplace=True, errors=errors)\n return df2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_random_timeseries.return.make_timeseries_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_random_timeseries.return.make_timeseries_", "embedding": null, "metadata": {"file_path": "dask/datasets.py", "file_name": "datasets.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 64, "span_ids": ["imports", "timeseries"], "tokens": 602}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\n\nfrom .utils import import_required\n\n\ndef timeseries(\n start=\"2000-01-01\",\n end=\"2000-01-31\",\n freq=\"1s\",\n partition_freq=\"1d\",\n dtypes={\"name\": str, \"id\": int, \"x\": float, \"y\": float},\n seed=None,\n **kwargs,\n):\n \"\"\"Create timeseries dataframe with random data\n\n Parameters\n ----------\n start : datetime (or datetime-like string)\n Start of time series\n end : datetime (or datetime-like string)\n End of time series\n dtypes : dict\n Mapping of column names to types.\n Valid types include {float, int, str, 'category'}\n freq : string\n String like '2s' or '1H' or '12W' for the time series frequency\n partition_freq : string\n String like '1M' or '2Y' to divide the dataframe into partitions\n seed : int (optional)\n Randomstate seed\n kwargs:\n Keywords to pass down to individual column creation functions.\n Keywords should be prefixed by the column name and then an underscore.\n\n Examples\n --------\n >>> import dask\n >>> df = dask.datasets.timeseries()\n >>> df.head() # doctest: +SKIP\n timestamp id name x y\n 2000-01-01 00:00:00 967 Jerry -0.031348 -0.040633\n 2000-01-01 00:00:01 1066 Michael -0.262136 0.307107\n 2000-01-01 00:00:02 988 Wendy -0.526331 0.128641\n 2000-01-01 00:00:03 1016 Yvonne 0.620456 0.767270\n 2000-01-01 00:00:04 998 Ursula 0.684902 -0.463278\n >>> df = dask.datasets.timeseries(\n ... '2000', '2010',\n ... freq='2H', partition_freq='1D', seed=1, # data frequency\n ... dtypes={'value': float, 'name': str, 'id': int}, # data types\n ... id_lam=1000 # control number of items in id column\n ... )\n \"\"\"\n from dask.dataframe.io.demo import make_timeseries\n\n return make_timeseries(\n start=start,\n end=end,\n freq=freq,\n partition_freq=partition_freq,\n seed=seed,\n dtypes=dtypes,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py__generate_mimesis__make_mimesis.return.db_Bag_dsk_name_npartit": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py__generate_mimesis__make_mimesis.return.db_Bag_dsk_name_npartit", "embedding": null, "metadata": {"file_path": "dask/datasets.py", "file_name": "datasets.py", "file_type": "text/x-python", "category": "implementation", "start_line": 67, "end_line": 120, "span_ids": ["_generate_mimesis", "_make_mimesis"], "tokens": 368}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _generate_mimesis(field, schema_description, records_per_partition, seed):\n \"\"\"Generate data for a single partition of a dask bag\n\n See Also\n --------\n _make_mimesis\n \"\"\"\n from mimesis.schema import Field, Schema\n\n field = Field(seed=seed, **field)\n schema = Schema(schema=lambda: schema_description(field))\n return [schema.create(iterations=1)[0] for i in range(records_per_partition)]\n\n\ndef _make_mimesis(field, schema, npartitions, records_per_partition, seed=None):\n \"\"\"\n Make a Dask Bag filled with data randomly generated by the mimesis projet\n\n Parameters\n ----------\n field: dict\n keyword arguments to pass to ``mimesis.Field``\n schema: Callable[Field] -> dict\n The schema to use to generate the data\n npartitions: int\n records_per_partition: int\n seed: int, None\n Seed for random data\n\n Returns\n -------\n Dask Bag\n\n See Also\n --------\n make_people\n \"\"\"\n import dask.bag as db\n from dask.base import tokenize\n\n field = field or {}\n\n random_state = random.Random(seed)\n seeds = [random_state.randint(0, 1 << 32) for _ in range(npartitions)]\n\n name = \"mimesis-\" + tokenize(\n field, schema, npartitions, records_per_partition, seed\n )\n dsk = {\n (name, i): (_generate_mimesis, field, schema, records_per_partition, seed)\n for i, seed in enumerate(seeds)\n }\n\n return db.Bag(dsk, name, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_make_people_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/datasets.py_make_people_", "embedding": null, "metadata": {"file_path": "dask/datasets.py", "file_name": "datasets.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 166, "span_ids": ["make_people"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_people(npartitions=10, records_per_partition=1000, seed=None, locale=\"en\"):\n \"\"\"Make a dataset of random people\n\n This makes a Dask Bag with dictionary records of randomly generated people.\n This requires the optional library ``mimesis`` to generate records.\n\n Parameters\n ----------\n npartitions : int\n Number of partitions\n records_per_partition : int\n Number of records in each partition\n seed : int, (optional)\n Random seed\n locale : str\n Language locale, like 'en', 'fr', 'zh', or 'ru'\n\n Returns\n -------\n b: Dask Bag\n \"\"\"\n import_required(\n \"mimesis\",\n \"The mimesis module is required for this function. Try:\\n\"\n \" python -m pip install mimesis\",\n )\n\n schema = lambda field: {\n \"age\": field(\"person.age\"),\n \"name\": (field(\"person.name\"), field(\"person.surname\")),\n \"occupation\": field(\"person.occupation\"),\n \"telephone\": field(\"person.telephone\"),\n \"address\": {\"address\": field(\"address.address\"), \"city\": field(\"address.city\")},\n \"credit-card\": {\n \"number\": field(\"payment.credit_card_number\"),\n \"expiration-date\": field(\"payment.credit_card_expiration_date\"),\n },\n }\n\n return _make_mimesis(\n {\"locale\": locale}, schema, npartitions, records_per_partition, seed\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_operator_finalize.return.Delayed_name_graph_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_operator_finalize.return.Delayed_name_graph_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 42, "span_ids": ["finalize", "imports", "unzip"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\nimport types\nimport uuid\nimport warnings\nfrom collections.abc import Iterator\nfrom dataclasses import fields, is_dataclass\n\nfrom tlz import concat, curry, merge, unique\n\nfrom . import config, threaded\nfrom .base import (\n DaskMethodsMixin,\n dont_optimize,\n is_dask_collection,\n replace_name_in_key,\n)\nfrom .base import tokenize as _tokenize\nfrom .context import globalmethod\nfrom .core import flatten, quote\nfrom .highlevelgraph import HighLevelGraph\nfrom .utils import OperatorMethodMixin, apply, funcname, methodcaller\n\n__all__ = [\"Delayed\", \"delayed\"]\n\n\ndef unzip(ls, nout):\n \"\"\"Unzip a list of lists into ``nout`` outputs.\"\"\"\n out = list(zip(*ls))\n if not out:\n out = [()] * nout\n return out\n\n\ndef finalize(collection):\n assert is_dask_collection(collection)\n\n name = \"finalize-\" + tokenize(collection)\n keys = collection.__dask_keys__()\n finalize, args = collection.__dask_postcompute__()\n layer = {name: (finalize, keys) + args}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[collection])\n return Delayed(name, graph)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_unpack_collections_unpack_collections.return.expr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_unpack_collections_unpack_collections.return.expr_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 120, "span_ids": ["unpack_collections"], "tokens": 534}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unpack_collections(expr):\n \"\"\"Normalize a python object and merge all sub-graphs.\n\n - Replace ``Delayed`` with their keys\n - Convert literals to things the schedulers can handle\n - Extract dask graphs from all enclosed values\n\n Parameters\n ----------\n expr : object\n The object to be normalized. This function knows how to handle\n dask collections, as well as most builtin python types.\n\n Returns\n -------\n task : normalized task to be run\n collections : a tuple of collections\n\n Examples\n --------\n >>> import dask\n >>> a = delayed(1, 'a')\n >>> b = delayed(2, 'b')\n >>> task, collections = unpack_collections([a, b, 3])\n >>> task\n ['a', 'b', 3]\n >>> collections\n (Delayed('a'), Delayed('b'))\n\n >>> task, collections = unpack_collections({a: 1, b: 2})\n >>> task\n (, [['a', 1], ['b', 2]])\n >>> collections\n (Delayed('a'), Delayed('b'))\n \"\"\"\n if isinstance(expr, Delayed):\n return expr._key, (expr,)\n\n if is_dask_collection(expr):\n finalized = finalize(expr)\n return finalized._key, (finalized,)\n\n if isinstance(expr, Iterator):\n expr = tuple(expr)\n\n typ = type(expr)\n\n if typ in (list, tuple, set):\n args, collections = unzip((unpack_collections(e) for e in expr), 2)\n args = list(args)\n collections = tuple(unique(concat(collections), key=id))\n # Ensure output type matches input type\n if typ is not list:\n args = (typ, args)\n return args, collections\n\n if typ is dict:\n args, collections = unpack_collections([[k, v] for k, v in expr.items()])\n return (dict, args), collections\n\n if typ is slice:\n args, collections = unpack_collections([expr.start, expr.stop, expr.step])\n return (slice,) + tuple(args), collections\n\n if is_dataclass(expr):\n args, collections = unpack_collections(\n [\n [f.name, getattr(expr, f.name)]\n for f in fields(expr)\n if hasattr(expr, f.name) # if init=False, field might not exist\n ]\n )\n\n return (apply, typ, (), (dict, args)), collections\n\n return expr, ()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_to_task_dask_to_task_dask.return.expr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_to_task_dask_to_task_dask.return.expr_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 207, "span_ids": ["to_task_dask"], "tokens": 720}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_task_dask(expr):\n \"\"\"Normalize a python object and merge all sub-graphs.\n\n - Replace ``Delayed`` with their keys\n - Convert literals to things the schedulers can handle\n - Extract dask graphs from all enclosed values\n\n Parameters\n ----------\n expr : object\n The object to be normalized. This function knows how to handle\n ``Delayed``s, as well as most builtin python types.\n\n Returns\n -------\n task : normalized task to be run\n dask : a merged dask graph that forms the dag for this task\n\n Examples\n --------\n >>> import dask\n >>> a = delayed(1, 'a')\n >>> b = delayed(2, 'b')\n >>> task, dask = to_task_dask([a, b, 3]) # doctest: +SKIP\n >>> task # doctest: +SKIP\n ['a', 'b', 3]\n >>> dict(dask) # doctest: +SKIP\n {'a': 1, 'b': 2}\n\n >>> task, dasks = to_task_dask({a: 1, b: 2}) # doctest: +SKIP\n >>> task # doctest: +SKIP\n (dict, [['a', 1], ['b', 2]])\n >>> dict(dask) # doctest: +SKIP\n {'a': 1, 'b': 2}\n \"\"\"\n warnings.warn(\n \"The dask.delayed.to_dask_dask function has been \"\n \"Deprecated in favor of unpack_collections\",\n stacklevel=2,\n )\n\n if isinstance(expr, Delayed):\n return expr.key, expr.dask\n\n if is_dask_collection(expr):\n name = \"finalize-\" + tokenize(expr, pure=True)\n keys = expr.__dask_keys__()\n opt = getattr(expr, \"__dask_optimize__\", dont_optimize)\n finalize, args = expr.__dask_postcompute__()\n dsk = {name: (finalize, keys) + args}\n dsk.update(opt(expr.__dask_graph__(), keys))\n return name, dsk\n\n if isinstance(expr, Iterator):\n expr = list(expr)\n typ = type(expr)\n\n if typ in (list, tuple, set):\n args, dasks = unzip((to_task_dask(e) for e in expr), 2)\n args = list(args)\n dsk = merge(dasks)\n # Ensure output type matches input type\n return (args, dsk) if typ is list else ((typ, args), dsk)\n\n if typ is dict:\n args, dsk = to_task_dask([[k, v] for k, v in expr.items()])\n return (dict, args), dsk\n\n if is_dataclass(expr):\n args, dsk = to_task_dask(\n [\n [f.name, getattr(expr, f.name)]\n for f in fields(expr)\n if hasattr(expr, f.name) # if init=False, field might not exist\n ]\n )\n\n return (apply, typ, (), (dict, args)), dsk\n\n if typ is slice:\n args, dsk = to_task_dask([expr.start, expr.stop, expr.step])\n return (slice,) + tuple(args), dsk\n\n return expr, {}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_tokenize_delayed": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_tokenize_delayed", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 209, "end_line": 460, "span_ids": ["tokenize", "delayed"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def tokenize(*args, pure=None, **kwargs):\n \"\"\"Mapping function from task -> consistent name.\n\n Parameters\n ----------\n args : object\n Python objects that summarize the task.\n pure : boolean, optional\n If True, a consistent hash function is tried on the input. If this\n fails, then a unique identifier is used. If False (default), then a\n unique identifier is always used.\n \"\"\"\n if pure is None:\n pure = config.get(\"delayed_pure\", False)\n\n if pure:\n return _tokenize(*args, **kwargs)\n else:\n return str(uuid.uuid4())\n\n\n@curry\ndef delayed(obj, name=None, pure=None, nout=None, traverse=True):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed._Wraps_a_function_or_ob_delayed._Wraps_a_function_or_ob": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed._Wraps_a_function_or_ob_delayed._Wraps_a_function_or_ob", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 232, "end_line": 434, "span_ids": ["delayed"], "tokens": 1951}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@curry\ndef delayed(obj, name=None, pure=None, nout=None, traverse=True):\n \"\"\"Wraps a function or object to produce a ``Delayed``.\n\n ``Delayed`` objects act as proxies for the object they wrap, but all\n operations on them are done lazily by building up a dask graph internally.\n\n Parameters\n ----------\n obj : object\n The function or object to wrap\n name : string or hashable, optional\n The key to use in the underlying graph for the wrapped object. Defaults\n to hashing content. Note that this only affects the name of the object\n wrapped by this call to delayed, and *not* the output of delayed\n function calls - for that use ``dask_key_name=`` as described below.\n\n .. note::\n\n Because this ``name`` is used as the key in task graphs, you should\n ensure that it uniquely identifies ``obj``. If you'd like to provide\n a descriptive name that is still unique, combine the descriptive name\n with :func:`dask.base.tokenize` of the ``array_like``. See\n :ref:`graphs` for more.\n\n pure : bool, optional\n Indicates whether calling the resulting ``Delayed`` object is a pure\n operation. If True, arguments to the call are hashed to produce\n deterministic keys. If not provided, the default is to check the global\n ``delayed_pure`` setting, and fallback to ``False`` if unset.\n nout : int, optional\n The number of outputs returned from calling the resulting ``Delayed``\n object. If provided, the ``Delayed`` output of the call can be iterated\n into ``nout`` objects, allowing for unpacking of results. By default\n iteration over ``Delayed`` objects will error. Note, that ``nout=1``\n expects ``obj`` to return a tuple of length 1, and consequently for\n ``nout=0``, ``obj`` should return an empty tuple.\n traverse : bool, optional\n By default dask traverses builtin python collections looking for dask\n objects passed to ``delayed``. For large collections this can be\n expensive. If ``obj`` doesn't contain any dask objects, set\n ``traverse=False`` to avoid doing this traversal.\n\n Examples\n --------\n Apply to functions to delay execution:\n\n >>> from dask import delayed\n >>> def inc(x):\n ... return x + 1\n\n >>> inc(10)\n 11\n\n >>> x = delayed(inc, pure=True)(10)\n >>> type(x) == Delayed\n True\n >>> x.compute()\n 11\n\n Can be used as a decorator:\n\n >>> @delayed(pure=True)\n ... def add(a, b):\n ... return a + b\n >>> add(1, 2).compute()\n 3\n\n ``delayed`` also accepts an optional keyword ``pure``. If False, then\n subsequent calls will always produce a different ``Delayed``. This is\n useful for non-pure functions (such as ``time`` or ``random``).\n\n >>> from random import random\n >>> out1 = delayed(random, pure=False)()\n >>> out2 = delayed(random, pure=False)()\n >>> out1.key == out2.key\n False\n\n If you know a function is pure (output only depends on the input, with no\n global state), then you can set ``pure=True``. This will attempt to apply a\n consistent name to the output, but will fallback on the same behavior of\n ``pure=False`` if this fails.\n\n >>> @delayed(pure=True)\n ... def add(a, b):\n ... return a + b\n >>> out1 = add(1, 2)\n >>> out2 = add(1, 2)\n >>> out1.key == out2.key\n True\n\n Instead of setting ``pure`` as a property of the callable, you can also set\n it contextually using the ``delayed_pure`` setting. Note that this\n influences the *call* and not the *creation* of the callable:\n\n >>> @delayed\n ... def mul(a, b):\n ... return a * b\n >>> import dask\n >>> with dask.config.set(delayed_pure=True):\n ... print(mul(1, 2).key == mul(1, 2).key)\n True\n >>> with dask.config.set(delayed_pure=False):\n ... print(mul(1, 2).key == mul(1, 2).key)\n False\n\n The key name of the result of calling a delayed object is determined by\n hashing the arguments by default. To explicitly set the name, you can use\n the ``dask_key_name`` keyword when calling the function:\n\n >>> add(1, 2) # doctest: +SKIP\n Delayed('add-3dce7c56edd1ac2614add714086e950f')\n >>> add(1, 2, dask_key_name='three')\n Delayed('three')\n\n Note that objects with the same key name are assumed to have the same\n result. If you set the names explicitly you should make sure your key names\n are different for different results.\n\n >>> add(1, 2, dask_key_name='three')\n Delayed('three')\n >>> add(2, 1, dask_key_name='three')\n Delayed('three')\n >>> add(2, 2, dask_key_name='four')\n Delayed('four')\n\n ``delayed`` can also be applied to objects to make operations on them lazy:\n\n >>> a = delayed([1, 2, 3])\n >>> isinstance(a, Delayed)\n True\n >>> a.compute()\n [1, 2, 3]\n\n The key name of a delayed object is hashed by default if ``pure=True`` or\n is generated randomly if ``pure=False`` (default). To explicitly set the\n name, you can use the ``name`` keyword. To ensure that the key is unique\n you should include the tokenized value as well, or otherwise ensure that\n it's unique:\n\n >>> from dask.base import tokenize\n >>> data = [1, 2, 3]\n >>> a = delayed(data, name='mylist-' + tokenize(data))\n >>> a # doctest: +SKIP\n Delayed('mylist-55af65871cb378a4fa6de1660c3e8fb7')\n\n Delayed results act as a proxy to the underlying object. Many operators\n are supported:\n\n >>> (a + [1, 2]).compute()\n [1, 2, 3, 1, 2]\n >>> a[1].compute()\n 2\n\n Method and attribute access also works:\n\n >>> a.count(2).compute()\n 1\n\n Note that if a method doesn't exist, no error will be thrown until runtime:\n\n >>> res = a.not_a_real_method() # doctest: +SKIP\n >>> res.compute() # doctest: +SKIP\n AttributeError(\"'list' object has no attribute 'not_a_real_method'\")\n\n \"Magic\" methods (e.g. operators and attribute access) are assumed to be\n pure, meaning that subsequent calls must return the same results. This\n behavior is not overrideable through the ``delayed`` call, but can be\n modified using other ways as described below.\n\n To invoke an impure attribute or operator, you'd need to use it in a\n delayed function with ``pure=False``:\n\n >>> class Incrementer:\n ... def __init__(self):\n ... self._n = 0\n ... @property\n ... def n(self):\n ... self._n += 1\n ... return self._n\n ...\n >>> x = delayed(Incrementer())\n >>> x.n.key == x.n.key\n True\n >>> get_n = delayed(lambda x: x.n, pure=False)\n >>> get_n(x).key == get_n(x).key\n False\n\n In contrast, methods are assumed to be impure by default, meaning that\n subsequent calls may return different results. To assume purity, set\n ``pure=True``. This allows sharing of any intermediate values.\n\n >>> a.count(2, pure=True).key == a.count(2, pure=True).key\n True\n\n As with function calls, method calls also respect the global\n ``delayed_pure`` setting and support the ``dask_key_name`` keyword:\n\n >>> a.count(2, dask_key_name=\"count_2\")\n Delayed('count_2')\n >>> import dask\n >>> with dask.config.set(delayed_pure=True):\n ... print(a.count(2).key == a.count(2).key)\n True\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_call_function_call_function.return.Delayed_name_graph_leng": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_call_function_call_function.return.Delayed_name_graph_leng", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 614, "end_line": 640, "span_ids": ["call_function"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def call_function(func, func_token, args, kwargs, pure=None, nout=None):\n dask_key_name = kwargs.pop(\"dask_key_name\", None)\n pure = kwargs.pop(\"pure\", pure)\n\n if dask_key_name is None:\n name = \"{}-{}\".format(\n funcname(func),\n tokenize(func_token, *args, pure=pure, **kwargs),\n )\n else:\n name = dask_key_name\n\n args2, collections = unzip(map(unpack_collections, args), 2)\n collections = list(concat(collections))\n\n if kwargs:\n dask_kwargs, collections2 = unpack_collections(kwargs)\n collections.extend(collections2)\n task = (apply, func, list(args2), dask_kwargs)\n else:\n task = (func,) + args2\n\n graph = HighLevelGraph.from_collections(\n name, {name: task}, dependencies=collections\n )\n nout = nout if nout is not None else None\n return Delayed(name, graph, length=nout)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedLeaf_DelayedLeaf.__call__.return.call_function_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedLeaf_DelayedLeaf.__call__.return.call_function_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 624, "end_line": 642, "span_ids": ["DelayedLeaf.dask", "DelayedLeaf.__call__", "DelayedLeaf.__init__", "DelayedLeaf"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DelayedLeaf(Delayed):\n __slots__ = (\"_obj\", \"_pure\", \"_nout\")\n\n def __init__(self, obj, key, pure=None, nout=None):\n super().__init__(key, None)\n self._obj = obj\n self._pure = pure\n self._nout = nout\n\n @property\n def dask(self):\n return HighLevelGraph.from_collections(\n self._key, {self._key: self._obj}, dependencies=()\n )\n\n def __call__(self, *args, **kwargs):\n return call_function(\n self._obj, self._key, args, kwargs, pure=self._pure, nout=self._nout\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedAttr_DelayedAttr.__call__.return.call_function_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_DelayedAttr_DelayedAttr.__call__.return.call_function_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 645, "end_line": 674, "span_ids": ["DelayedAttr.dask", "DelayedAttr.__init__", "DelayedAttr.__call__", "DelayedAttr.__getattr__", "DelayedAttr"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DelayedAttr(Delayed):\n __slots__ = (\"_obj\", \"_attr\")\n\n def __init__(self, obj, attr):\n key = \"getattr-%s\" % tokenize(obj, attr, pure=True)\n super().__init__(key, None)\n self._obj = obj\n self._attr = attr\n\n def __getattr__(self, attr):\n # Calling np.dtype(dask.delayed(...)) used to result in a segfault, as\n # numpy recursively tries to get `dtype` from the object. This is\n # likely a bug in numpy. For now, we can do a dumb for if\n # `x.dtype().dtype()` is called (which shouldn't ever show up in real\n # code). See https://github.com/dask/dask/pull/4374#issuecomment-454381465\n if attr == \"dtype\" and self._attr == \"dtype\":\n raise AttributeError(\"Attribute dtype not found\")\n return super().__getattr__(attr)\n\n @property\n def dask(self):\n layer = {self._key: (getattr, self._obj._key, self._attr)}\n return HighLevelGraph.from_collections(\n self._key, layer, dependencies=[self._obj]\n )\n\n def __call__(self, *args, **kwargs):\n return call_function(\n methodcaller(self._attr), self._attr, (self._obj,) + args, kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_for_op_in__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_for_op_in__", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 687, "end_line": 724, "span_ids": ["impl:4", "single_key"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "for op in [\n operator.abs,\n operator.neg,\n operator.pos,\n operator.invert,\n operator.add,\n operator.sub,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n operator.mod,\n operator.pow,\n operator.and_,\n operator.or_,\n operator.xor,\n operator.lshift,\n operator.rshift,\n operator.eq,\n operator.ge,\n operator.gt,\n operator.ne,\n operator.le,\n operator.lt,\n operator.getitem,\n]:\n Delayed._bind_operator(op)\n\n\ntry:\n Delayed._bind_operator(operator.matmul)\nexcept AttributeError:\n pass\n\n\ndef single_key(seq):\n \"\"\"Pick out the only element of this list, a list of keys\"\"\"\n return seq[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/__init__.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/__init__.py__", "embedding": null, "metadata": {"file_path": "dask/diagnostics/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 5, "span_ids": ["imports"], "tokens": 32}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from ..callbacks import Callback\nfrom .profile import CacheProfiler, Profiler, ResourceProfiler\nfrom .profile_visualize import visualize\nfrom .progress import ProgressBar", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_from_collections_import_n_Profiler.clear.self._dsk._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_from_collections_import_n_Profiler.clear.self._dsk._", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 103, "span_ids": ["Profiler._posttask", "Profiler.visualize", "Profiler._finish", "imports", "Profiler.__enter__", "Profiler._pretask", "Profiler.clear", "Profiler", "Profiler._plot", "Profiler._start", "Profiler.__init__"], "tokens": 724}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import namedtuple\nfrom itertools import starmap\nfrom multiprocessing import Pipe, Process, current_process\nfrom time import sleep\nfrom timeit import default_timer\n\nfrom ..callbacks import Callback\nfrom ..utils import import_required\n\n# Stores execution data for each task\nTaskData = namedtuple(\n \"TaskData\", (\"key\", \"task\", \"start_time\", \"end_time\", \"worker_id\")\n)\n\n\nclass Profiler(Callback):\n \"\"\"A profiler for dask execution at the task level.\n\n Records the following information for each task:\n 1. Key\n 2. Task\n 3. Start time in seconds since the epoch\n 4. Finish time in seconds since the epoch\n 5. Worker id\n\n Examples\n --------\n\n >>> from operator import add, mul\n >>> from dask.threaded import get\n >>> from dask.diagnostics import Profiler\n >>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}\n >>> with Profiler() as prof:\n ... get(dsk, 'z')\n 22\n\n >>> prof.results # doctest: +SKIP\n [TaskData(key='y', task=(add, 'x', 10), start_time=..., end_time=..., worker_id=...),\n TaskData(key='z', task=(mul, 'y', 2), start_time=..., end_time=..., worker_id=...)]\n\n These results can be visualized in a bokeh plot using the ``visualize``\n method. Note that this requires bokeh to be installed.\n\n >>> prof.visualize() # doctest: +SKIP\n\n You can activate the profiler globally\n\n >>> prof.register()\n\n If you use the profiler globally you will need to clear out old results\n manually.\n\n >>> prof.clear()\n >>> prof.unregister()\n\n \"\"\"\n\n def __init__(self):\n self._results = {}\n self.results = []\n self._dsk = {}\n\n def __enter__(self):\n self.clear()\n return super().__enter__()\n\n def _start(self, dsk):\n self._dsk.update(dsk)\n\n def _pretask(self, key, dsk, state):\n start = default_timer()\n self._results[key] = (key, dsk[key], start)\n\n def _posttask(self, key, value, dsk, state, id):\n end = default_timer()\n self._results[key] += (end, id)\n\n def _finish(self, dsk, state, failed):\n results = {k: v for k, v in self._results.items() if len(v) == 5}\n self.results += list(starmap(TaskData, results.values()))\n self._results.clear()\n\n def _plot(self, **kwargs):\n from .profile_visualize import plot_tasks\n\n return plot_tasks(self.results, self._dsk, **kwargs)\n\n def visualize(self, **kwargs):\n \"\"\"Visualize the profiling run in a bokeh plot.\n\n See also\n --------\n dask.diagnostics.profile_visualize.visualize\n \"\"\"\n from .profile_visualize import visualize\n\n return visualize(self, **kwargs)\n\n def clear(self):\n \"\"\"Clear out old results from profiler\"\"\"\n self._results.clear()\n del self.results[:]\n self._dsk = {}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_ResourceData_ResourceProfiler.visualize.return.visualize_self_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_ResourceData_ResourceProfiler.visualize.return.visualize_self_kwargs_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 106, "end_line": 212, "span_ids": ["impl:3", "ResourceProfiler", "ResourceProfiler._is_running", "ResourceProfiler.__enter__", "ResourceProfiler.__exit__", "ResourceProfiler._start_collect", "ResourceProfiler._stop_collect", "ResourceProfiler:3", "ResourceProfiler.clear", "ResourceProfiler.visualize", "ResourceProfiler._finish", "ResourceProfiler.__init__", "ResourceProfiler.close", "ResourceProfiler._plot", "ResourceProfiler._start"], "tokens": 666}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "ResourceData = namedtuple(\"ResourceData\", (\"time\", \"mem\", \"cpu\"))\n\n\nclass ResourceProfiler(Callback):\n \"\"\"A profiler for resource use.\n\n Records the following each timestep\n 1. Time in seconds since the epoch\n 2. Memory usage in MB\n 3. % CPU usage\n\n Examples\n --------\n\n >>> from operator import add, mul\n >>> from dask.threaded import get\n >>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}\n >>> with ResourceProfiler() as prof:\n ... get(dsk, 'z')\n 22\n\n These results can be visualized in a bokeh plot using the ``visualize``\n method. Note that this requires bokeh to be installed.\n\n >>> prof.visualize() # doctest: +SKIP\n\n You can activate the profiler globally\n\n >>> prof.register()\n\n If you use the profiler globally you will need to clear out old results\n manually.\n\n >>> prof.clear()\n\n Note that when used as a context manager data will be collected throughout\n the duration of the enclosed block. In contrast, when registered globally\n data will only be collected while a dask scheduler is active.\n\n >>> prof.unregister()\n \"\"\"\n\n def __init__(self, dt=1):\n self._dt = dt\n self._entered = False\n self._tracker = None\n self.results = []\n\n def _is_running(self):\n return self._tracker is not None and self._tracker.is_alive()\n\n def _start_collect(self):\n if not self._is_running():\n self._tracker = _Tracker(self._dt)\n self._tracker.start()\n self._tracker.parent_conn.send(\"collect\")\n\n def _stop_collect(self):\n if self._is_running():\n self._tracker.parent_conn.send(\"send_data\")\n self.results.extend(starmap(ResourceData, self._tracker.parent_conn.recv()))\n\n def __enter__(self):\n self._entered = True\n self.clear()\n self._start_collect()\n return super().__enter__()\n\n def __exit__(self, *args):\n self._entered = False\n self._stop_collect()\n self.close()\n super().__exit__(*args)\n\n def _start(self, dsk):\n self._start_collect()\n\n def _finish(self, dsk, state, failed):\n if not self._entered:\n self._stop_collect()\n\n def close(self):\n \"\"\"Shutdown the resource tracker process\"\"\"\n if self._is_running():\n self._tracker.shutdown()\n self._tracker = None\n\n __del__ = close\n\n def clear(self):\n self.results = []\n\n def _plot(self, **kwargs):\n from .profile_visualize import plot_resources\n\n return plot_resources(self.results, **kwargs)\n\n def visualize(self, **kwargs):\n \"\"\"Visualize the profiling run in a bokeh plot.\n\n See also\n --------\n dask.diagnostics.profile_visualize.visualize\n \"\"\"\n from .profile_visualize import visualize\n\n return visualize(self, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker__Tracker._update_pids.return._self_parent_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker__Tracker._update_pids.return._self_parent_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 213, "end_line": 232, "span_ids": ["_Tracker._update_pids", "_Tracker.shutdown", "_Tracker.__init__", "_Tracker"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Tracker(Process):\n \"\"\"Background process for tracking resource usage\"\"\"\n\n def __init__(self, dt=1):\n super().__init__()\n self.daemon = True\n self.dt = dt\n self.parent_pid = current_process().pid\n self.parent_conn, self.child_conn = Pipe()\n\n def shutdown(self):\n if not self.parent_conn.closed:\n self.parent_conn.send(\"shutdown\")\n self.parent_conn.close()\n self.join()\n\n def _update_pids(self, pid):\n return [self.parent] + [\n p for p in self.parent.children() if p.pid != pid and p.status() != \"zombie\"\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker.run_CacheData.namedtuple_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py__Tracker.run_CacheData.namedtuple_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 274, "span_ids": ["_Tracker.run", "impl:5"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Tracker(Process):\n\n def run(self):\n\n psutil = import_required(\n \"psutil\", \"Tracking resource usage requires `psutil` to be installed\"\n )\n self.parent = psutil.Process(self.parent_pid)\n\n pid = current_process()\n data = []\n while True:\n try:\n msg = self.child_conn.recv()\n except KeyboardInterrupt:\n continue\n if msg == \"shutdown\":\n break\n elif msg == \"collect\":\n ps = self._update_pids(pid)\n while not data or not self.child_conn.poll():\n tic = default_timer()\n mem = cpu = 0\n for p in ps:\n try:\n mem2 = p.memory_info().rss\n cpu2 = p.cpu_percent()\n except Exception: # could be a few different exceptions\n pass\n else:\n # Only increment if both were successful\n mem += mem2\n cpu += cpu2\n data.append((tic, mem / 1e6, cpu))\n sleep(self.dt)\n elif msg == \"send_data\":\n self.child_conn.send(data)\n data = []\n self.child_conn.close()\n\n\nCacheData = namedtuple(\n \"CacheData\", (\"key\", \"task\", \"metric\", \"cache_time\", \"free_time\")\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_CacheProfiler_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile.py_CacheProfiler_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile.py", "file_name": "profile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 280, "end_line": 388, "span_ids": ["CacheProfiler", "CacheProfiler._posttask", "CacheProfiler._finish", "CacheProfiler._plot", "CacheProfiler.__init__", "CacheProfiler.visualize", "CacheProfiler.clear", "CacheProfiler.__enter__", "CacheProfiler._start"], "tokens": 860}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CacheProfiler(Callback):\n \"\"\"A profiler for dask execution at the scheduler cache level.\n\n Records the following information for each task:\n 1. Key\n 2. Task\n 3. Size metric\n 4. Cache entry time in seconds since the epoch\n 5. Cache exit time in seconds since the epoch\n\n Examples\n --------\n\n >>> from operator import add, mul\n >>> from dask.threaded import get\n >>> from dask.diagnostics import CacheProfiler\n >>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}\n >>> with CacheProfiler() as prof:\n ... get(dsk, 'z')\n 22\n\n >>> prof.results # doctest: +SKIP\n [CacheData(key='y', task=(add, 'x', 10), metric=1, cache_time=..., free_time=...),\n CacheData(key='z', task=(mul, 'y', 2), metric=1, cache_time=..., free_time=...)]\n\n The default is to count each task (``metric`` is 1 for all tasks). Other\n functions may used as a metric instead through the ``metric`` keyword. For\n example, the ``nbytes`` function found in ``cachey`` can be used to measure\n the number of bytes in the cache.\n\n >>> from cachey import nbytes # doctest: +SKIP\n >>> with CacheProfiler(metric=nbytes) as prof: # doctest: +SKIP\n ... get(dsk, 'z')\n 22\n\n The profiling results can be visualized in a bokeh plot using the\n ``visualize`` method. Note that this requires bokeh to be installed.\n\n >>> prof.visualize() # doctest: +SKIP\n\n You can activate the profiler globally\n\n >>> prof.register()\n\n If you use the profiler globally you will need to clear out old results\n manually.\n\n >>> prof.clear()\n >>> prof.unregister()\n\n \"\"\"\n\n def __init__(self, metric=None, metric_name=None):\n self.clear()\n self._metric = metric if metric else lambda value: 1\n if metric_name:\n self._metric_name = metric_name\n elif metric:\n self._metric_name = metric.__name__\n else:\n self._metric_name = \"count\"\n\n def __enter__(self):\n self.clear()\n return super().__enter__()\n\n def _start(self, dsk):\n self._dsk.update(dsk)\n if not self._start_time:\n self._start_time = default_timer()\n\n def _posttask(self, key, value, dsk, state, id):\n t = default_timer()\n self._cache[key] = (self._metric(value), t)\n for k in state[\"released\"] & self._cache.keys():\n metric, start = self._cache.pop(k)\n self.results.append(CacheData(k, dsk[k], metric, start, t))\n\n def _finish(self, dsk, state, failed):\n t = default_timer()\n for k, (metric, start) in self._cache.items():\n self.results.append(CacheData(k, dsk[k], metric, start, t))\n self._cache.clear()\n\n def _plot(self, **kwargs):\n from .profile_visualize import plot_cache\n\n return plot_cache(\n self.results, self._dsk, self._start_time, self._metric_name, **kwargs\n )\n\n def visualize(self, **kwargs):\n \"\"\"Visualize the profiling run in a bokeh plot.\n\n See also\n --------\n dask.diagnostics.profile_visualize.visualize\n \"\"\"\n from .profile_visualize import visualize\n\n return visualize(self, **kwargs)\n\n def clear(self):\n \"\"\"Clear out old results from profiler\"\"\"\n self.results = []\n self._cache = {}\n self._dsk = {}\n self._start_time = None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_random_unquote.return.expr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_random_unquote.return.expr", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 33, "span_ids": ["imports", "BOKEH_VERSION", "impl", "unquote"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\nimport warnings\nfrom bisect import bisect_left\nfrom itertools import cycle\nfrom operator import add, itemgetter\n\nfrom tlz import accumulate, groupby, pluck, unique\n\nfrom ..core import istask\nfrom ..utils import apply, funcname, import_required\n\n\ndef BOKEH_VERSION():\n import bokeh\n from packaging.version import parse as parse_version\n\n return parse_version(bokeh.__version__)\n\n\n_BOKEH_MISSING_MSG = \"Diagnostics plots require `bokeh` to be installed\"\n\n\ndef unquote(expr):\n if istask(expr):\n if expr[0] in (tuple, list, set):\n return expr[0](map(unquote, expr[1]))\n elif (\n expr[0] == dict\n and isinstance(expr[1], list)\n and isinstance(expr[1][0], list)\n ):\n return dict(map(unquote, expr[1]))\n return expr", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_pprint_task_pprint_task.if_istask_task_.else_.try_.except_TypeError_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_pprint_task_pprint_task.if_istask_task_.else_.try_.except_TypeError_.return._", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 36, "end_line": 125, "span_ids": ["pprint_task"], "tokens": 715}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pprint_task(task, keys, label_size=60):\n \"\"\"Return a nicely formatted string for a task.\n\n Parameters\n ----------\n task:\n Value within dask graph to render as text\n keys: iterable\n List of keys within dask graph\n label_size: int (optional)\n Maximum size of output label, defaults to 60\n\n Examples\n --------\n >>> from operator import add, mul\n >>> dsk = {'a': 1,\n ... 'b': 2,\n ... 'c': (add, 'a', 'b'),\n ... 'd': (add, (mul, 'a', 'b'), 'c'),\n ... 'e': (sum, ['a', 'b', 5]),\n ... 'f': (add,),\n ... 'g': []}\n\n >>> pprint_task(dsk['c'], dsk)\n 'add(_, _)'\n >>> pprint_task(dsk['d'], dsk)\n 'add(mul(_, _), _)'\n >>> pprint_task(dsk['e'], dsk)\n 'sum([_, _, *])'\n >>> pprint_task(dsk['f'], dsk)\n 'add()'\n >>> pprint_task(dsk['g'], dsk)\n '[]'\n \"\"\"\n if istask(task):\n func = task[0]\n if func is apply:\n head = funcname(task[1])\n tail = \")\"\n args = unquote(task[2]) if len(task) > 2 else ()\n kwargs = unquote(task[3]) if len(task) > 3 else {}\n else:\n if hasattr(func, \"funcs\"):\n head = \"(\".join(funcname(f) for f in func.funcs)\n tail = \")\" * len(func.funcs)\n else:\n head = funcname(task[0])\n tail = \")\"\n args = task[1:]\n kwargs = {}\n if args or kwargs:\n label_size2 = int(\n (label_size - len(head) - len(tail)) // (len(args) + len(kwargs))\n )\n pprint = lambda t: pprint_task(t, keys, label_size2)\n if args:\n if label_size2 > 5:\n args = \", \".join(pprint(t) for t in args)\n else:\n args = \"...\"\n else:\n args = \"\"\n if kwargs:\n if label_size2 > 5:\n kwargs = \", \" + \", \".join(\n f\"{k}={pprint(v)}\" for k, v in sorted(kwargs.items())\n )\n else:\n kwargs = \", ...\"\n else:\n kwargs = \"\"\n return f\"{head}({args}{kwargs}{tail}\"\n elif isinstance(task, list):\n if not task:\n return \"[]\"\n elif len(task) > 3:\n result = pprint_task(task[:3], keys, label_size)\n return result[:-1] + \", ...]\"\n else:\n label_size2 = int((label_size - 2 - 2 * len(task)) // len(task))\n args = \", \".join(pprint_task(t, keys, label_size2) for t in task)\n return f\"[{args}]\"\n else:\n try:\n if task in keys:\n return \"_\"\n else:\n return \"*\"\n except TypeError:\n return \"*\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_get_colors_get_colors.return._color_lookup_n_for_n_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_get_colors_get_colors.return._color_lookup_n_for_n_in", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 120, "end_line": 145, "span_ids": ["get_colors"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_colors(palette, funcs):\n \"\"\"Get a dict mapping funcs to colors from palette.\n\n Parameters\n ----------\n palette : string\n Name of the bokeh palette to use, must be a member of\n bokeh.palettes.all_palettes.\n funcs : iterable\n Iterable of function names\n \"\"\"\n palettes = import_required(\"bokeh.palettes\", _BOKEH_MISSING_MSG)\n\n unique_funcs = sorted(unique(funcs))\n n_funcs = len(unique_funcs)\n palette_lookup = palettes.all_palettes[palette]\n keys = list(sorted(palette_lookup.keys()))\n index = keys[min(bisect_left(keys, n_funcs), len(keys) - 1)]\n palette = palette_lookup[index]\n # Some bokeh palettes repeat colors, we want just the unique set\n palette = list(unique(palette))\n if len(palette) > n_funcs:\n # Consistently shuffle palette - prevents just using low-range\n random.Random(42).shuffle(palette)\n color_lookup = dict(zip(unique_funcs, cycle(palette)))\n return [color_lookup[n] for n in funcs]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks_plot_tasks.hover.p_select_HoverTool_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks_plot_tasks.hover.p_select_HoverTool_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 234, "end_line": 323, "span_ids": ["plot_tasks"], "tokens": 729}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plot_tasks(results, dsk, palette=\"Viridis\", label_size=60, **kwargs):\n \"\"\"Visualize the results of profiling in a bokeh plot.\n\n Parameters\n ----------\n results : sequence\n Output of Profiler.results\n dsk : dict\n The dask graph being profiled.\n palette : string, optional\n Name of the bokeh palette to use, must be a member of\n bokeh.palettes.all_palettes.\n label_size: int (optional)\n Maximum size of output labels in plot, defaults to 60\n **kwargs\n Other keyword arguments, passed to bokeh.figure. These will override\n all defaults set by visualize.\n\n Returns\n -------\n The completed bokeh plot object.\n \"\"\"\n bp = import_required(\"bokeh.plotting\", _BOKEH_MISSING_MSG)\n from bokeh.models import HoverTool\n\n defaults = dict(\n title=\"Profile Results\",\n tools=\"hover,save,reset,xwheel_zoom,xpan\",\n toolbar_location=\"above\",\n width=800,\n height=300,\n )\n # Support plot_width and plot_height for backwards compatibility\n if \"plot_width\" in kwargs:\n kwargs[\"width\"] = kwargs.pop(\"plot_width\")\n if \"plot_height\" in kwargs:\n kwargs[\"height\"] = kwargs.pop(\"plot_height\")\n defaults.update(**kwargs)\n\n if results:\n keys, tasks, starts, ends, ids = zip(*results)\n\n id_group = groupby(itemgetter(4), results)\n timings = {\n k: [i.end_time - i.start_time for i in v] for (k, v) in id_group.items()\n }\n id_lk = {\n t[0]: n\n for (n, t) in enumerate(\n sorted(timings.items(), key=itemgetter(1), reverse=True)\n )\n }\n\n left = min(starts)\n right = max(ends)\n\n p = bp.figure(\n y_range=[str(i) for i in range(len(id_lk))],\n x_range=[0, right - left],\n **defaults,\n )\n\n data = {}\n data[\"width\"] = width = [e - s for (s, e) in zip(starts, ends)]\n data[\"x\"] = [w / 2 + s - left for (w, s) in zip(width, starts)]\n data[\"y\"] = [id_lk[i] + 1 for i in ids]\n data[\"function\"] = funcs = [pprint_task(i, dsk, label_size) for i in tasks]\n data[\"color\"] = get_colors(palette, funcs)\n data[\"key\"] = [str(i) for i in keys]\n\n source = bp.ColumnDataSource(data=data)\n\n p.rect(\n source=source,\n x=\"x\",\n y=\"y\",\n height=1,\n width=\"width\",\n color=\"color\",\n line_color=\"gray\",\n )\n else:\n p = bp.figure(y_range=[str(i) for i in range(8)], x_range=[0, 10], **defaults)\n p.grid.grid_line_color = None\n p.axis.axis_line_color = None\n p.axis.major_tick_line_color = None\n p.yaxis.axis_label = \"Worker ID\"\n p.xaxis.axis_label = \"Time (s)\"\n\n hover = p.select(HoverTool)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks.hover.tooltips_plot_tasks.return.p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_tasks.hover.tooltips_plot_tasks.return.p", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 310, "end_line": 322, "span_ids": ["plot_tasks"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plot_tasks(results, dsk, palette=\"Viridis\", label_size=60, **kwargs):\n # ... other code\n hover.tooltips = \"\"\"\n
\n Key: \n @key\n
\n
\n Task: \n @function\n
\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n\n return p", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_resources_fix_bounds.return.start_max_end_start_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_resources_fix_bounds.return.start_max_end_start_m", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 339, "end_line": 425, "span_ids": ["fix_bounds", "plot_resources"], "tokens": 678}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plot_resources(results, palette=\"Viridis\", **kwargs):\n \"\"\"Plot resource usage in a bokeh plot.\n\n Parameters\n ----------\n results : sequence\n Output of ResourceProfiler.results\n palette : string, optional\n Name of the bokeh palette to use, must be a member of\n bokeh.palettes.all_palettes.\n **kwargs\n Other keyword arguments, passed to bokeh.figure. These will override\n all defaults set by plot_resources.\n\n Returns\n -------\n The completed bokeh plot object.\n \"\"\"\n bp = import_required(\"bokeh.plotting\", _BOKEH_MISSING_MSG)\n from bokeh import palettes\n from bokeh.models import LinearAxis, Range1d\n\n defaults = dict(\n title=\"Profile Results\",\n tools=\"save,reset,xwheel_zoom,xpan\",\n toolbar_location=\"above\",\n width=800,\n height=300,\n )\n # Support plot_width and plot_height for backwards compatibility\n if \"plot_width\" in kwargs:\n kwargs[\"width\"] = kwargs.pop(\"plot_width\")\n if BOKEH_VERSION().major >= 3:\n warnings.warn(\"Use width instead of plot_width with Bokeh >= 3\")\n if \"plot_height\" in kwargs:\n kwargs[\"height\"] = kwargs.pop(\"plot_height\")\n if BOKEH_VERSION().major >= 3:\n warnings.warn(\"Use height instead of plot_height with Bokeh >= 3\")\n\n # Drop `label_size` to match `plot_cache` and `plot_tasks` kwargs\n if \"label_size\" in kwargs:\n kwargs.pop(\"label_size\")\n\n defaults.update(**kwargs)\n\n if results:\n t, mem, cpu = zip(*results)\n left, right = min(t), max(t)\n t = [i - left for i in t]\n p = bp.figure(\n y_range=fix_bounds(0, max(cpu), 100),\n x_range=fix_bounds(0, right - left, 1),\n **defaults,\n )\n else:\n t = mem = cpu = []\n p = bp.figure(y_range=(0, 100), x_range=(0, 1), **defaults)\n colors = palettes.all_palettes[palette][6]\n p.line(\n t,\n cpu,\n color=colors[0],\n line_width=4,\n legend_label=\"% CPU\",\n )\n p.yaxis.axis_label = \"% CPU\"\n p.extra_y_ranges = {\n \"memory\": Range1d(\n *fix_bounds(min(mem) if mem else 0, max(mem) if mem else 100, 100)\n )\n }\n p.line(\n t,\n mem,\n color=colors[2],\n y_range_name=\"memory\",\n line_width=4,\n legend_label=\"Memory\",\n )\n p.add_layout(LinearAxis(y_range_name=\"memory\", axis_label=\"Memory (MB)\"), \"right\")\n p.xaxis.axis_label = \"Time (s)\"\n return p\n\n\ndef fix_bounds(start, end, min_span):\n \"\"\"Adjust end point to ensure span of at least `min_span`\"\"\"\n return start, max(end, start + min_span)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_cache_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_plot_cache_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 428, "end_line": 516, "span_ids": ["plot_cache"], "tokens": 757}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def plot_cache(\n results, dsk, start_time, metric_name, palette=\"Viridis\", label_size=60, **kwargs\n):\n \"\"\"Visualize the results of profiling in a bokeh plot.\n\n Parameters\n ----------\n results : sequence\n Output of CacheProfiler.results\n dsk : dict\n The dask graph being profiled.\n start_time : float\n Start time of the profile.\n metric_name : string\n Metric used to measure cache size\n palette : string, optional\n Name of the bokeh palette to use, must be a member of\n bokeh.palettes.all_palettes.\n label_size: int (optional)\n Maximum size of output labels in plot, defaults to 60\n **kwargs\n Other keyword arguments, passed to bokeh.figure. These will override\n all defaults set by visualize.\n\n Returns\n -------\n The completed bokeh plot object.\n \"\"\"\n bp = import_required(\"bokeh.plotting\", _BOKEH_MISSING_MSG)\n from bokeh.models import HoverTool\n\n defaults = dict(\n title=\"Profile Results\",\n tools=\"hover,save,reset,wheel_zoom,xpan\",\n toolbar_location=\"above\",\n width=800,\n height=300,\n )\n # Support plot_width and plot_height for backwards compatibility\n if \"plot_width\" in kwargs:\n kwargs[\"width\"] = kwargs.pop(\"plot_width\")\n if BOKEH_VERSION().major >= 3:\n warnings.warn(\"Use width instead of plot_width with Bokeh >= 3\")\n if \"plot_height\" in kwargs:\n kwargs[\"height\"] = kwargs.pop(\"plot_height\")\n if BOKEH_VERSION().major >= 3:\n warnings.warn(\"Use height instead of plot_height with Bokeh >= 3\")\n defaults.update(**kwargs)\n\n if results:\n starts, ends = list(zip(*results))[3:]\n tics = sorted(unique(starts + ends))\n groups = groupby(lambda d: pprint_task(d[1], dsk, label_size), results)\n data = {}\n for k, vals in groups.items():\n cnts = dict.fromkeys(tics, 0)\n for v in vals:\n cnts[v.cache_time] += v.metric\n cnts[v.free_time] -= v.metric\n data[k] = [0] + list(accumulate(add, pluck(1, sorted(cnts.items()))))\n\n tics = [0] + [i - start_time for i in tics]\n p = bp.figure(x_range=[0, max(tics)], **defaults)\n\n for (key, val), color in zip(data.items(), get_colors(palette, data.keys())):\n p.line(\n \"x\",\n \"y\",\n line_color=color,\n line_width=3,\n source=bp.ColumnDataSource(\n {\"x\": tics, \"y\": val, \"label\": [key for i in val]}\n ),\n )\n\n else:\n p = bp.figure(y_range=[0, 10], x_range=[0, 10], **defaults)\n p.yaxis.axis_label = f\"Cache Size ({metric_name})\"\n p.xaxis.axis_label = \"Time (s)\"\n\n hover = p.select(HoverTool)\n hover.tooltips = \"\"\"\n
\n Task: \n @label\n
\n \"\"\"\n return p", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_test_profiler.assert_prof_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_test_profiler.assert_prof_results_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 33, "end_line": 43, "span_ids": ["test_profiler"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_profiler():\n with prof:\n out = get(dsk, \"e\")\n assert out == 6\n prof_data = sorted(prof.results, key=lambda d: d.key)\n keys = [i.key for i in prof_data]\n assert keys == [\"c\", \"d\", \"e\"]\n tasks = [i.task for i in prof_data]\n assert tasks == [(add, \"a\", \"b\"), (mul, \"a\", \"b\"), (mul, \"c\", \"d\")]\n prof.clear()\n assert prof.results == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_test_resource_profiler.assert_len_rprof_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_test_resource_profiler.assert_len_rprof_results_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 98, "span_ids": ["test_resource_profiler"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not psutil\")\ndef test_resource_profiler():\n with ResourceProfiler(dt=0.01) as rprof:\n get(dsk2, \"c\")\n results = rprof.results\n assert len(results) > 0\n assert all(isinstance(i, tuple) and len(i) == 3 for i in results)\n\n # Tracker stopped on exit\n assert not rprof._is_running()\n\n rprof.clear()\n assert rprof.results == []\n\n # Close is idempotent\n rprof.close()\n assert not rprof._is_running()\n\n # Restarts tracker if already closed\n with rprof:\n get(dsk2, \"c\")\n assert len(rprof.results) > 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_multiple_gets_test_resource_profiler_multiple_gets.assert_not_rprof__is_runn": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_multiple_gets_test_resource_profiler_multiple_gets.assert_not_rprof__is_runn", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 121, "span_ids": ["test_resource_profiler_multiple_gets"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not psutil\")\ndef test_resource_profiler_multiple_gets():\n with ResourceProfiler(dt=0.01) as rprof:\n get(dsk2, \"c\")\n assert len(rprof.results) == 0\n get(dsk2, \"c\")\n results = rprof.results\n assert all(isinstance(i, tuple) and len(i) == 3 for i in results)\n\n rprof.clear()\n rprof.register()\n get(dsk2, \"c\")\n assert len(rprof.results) > 0\n get(dsk2, \"c\")\n rprof.unregister()\n\n results = rprof.results\n assert all(isinstance(i, tuple) and len(i) == 3 for i in results)\n\n rprof.close()\n assert not rprof._is_running()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_test_cache_profiler.assert_CacheProfiler_metr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_test_cache_profiler.assert_CacheProfiler_metr", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 124, "end_line": 146, "span_ids": ["test_cache_profiler"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cache_profiler():\n with CacheProfiler() as cprof:\n get(dsk2, \"c\")\n results = cprof.results\n assert all(isinstance(i, tuple) and len(i) == 5 for i in results)\n\n cprof.clear()\n assert cprof.results == []\n\n tics = [0]\n\n def nbytes(res):\n tics[0] += 1\n return tics[0]\n\n with CacheProfiler(nbytes) as cprof:\n get(dsk2, \"c\")\n\n results = cprof.results\n assert tics[-1] == len(results)\n assert tics[-1] == results[-1].metric\n assert cprof._metric_name == \"nbytes\"\n assert CacheProfiler(metric=nbytes, metric_name=\"foo\")._metric_name == \"foo\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_register_test_register.try_.finally_.prof_unregister_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_register_test_register.try_.finally_.prof_unregister_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 149, "end_line": 169, "span_ids": ["test_register"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"profiler\",\n [\n Profiler,\n pytest.param(\n lambda: ResourceProfiler(dt=0.01), marks=pytest.mark.skipif(\"not psutil\")\n ),\n CacheProfiler,\n ],\n)\ndef test_register(profiler):\n prof = profiler()\n try:\n prof.register()\n get(dsk2, \"c\")\n n = len(prof.results)\n assert n > 0\n get(dsk2, \"c\")\n assert len(prof.results) > n\n finally:\n prof.unregister()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_unquote_test_unquote.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_unquote_test_unquote.None_2", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 169, "end_line": 183, "span_ids": ["test_unquote"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\ndef test_unquote():\n from dask.diagnostics.profile_visualize import unquote\n\n t = {\"a\": 1, \"b\": 2, \"c\": 3}\n task_dask = (dict, [[\"a\", 1], [\"b\", 2], [\"c\", 3]])\n assert unquote(task_dask) == t\n\n t = {\"a\": [1, 2, 3], \"b\": 2, \"c\": 3}\n task_dask = (dict, [[\"a\", [1, 2, 3]], [\"b\", 2], [\"c\", 3]])\n assert unquote(task_dask) == t\n\n t = [1, 2, 3]\n task_dask = [1, 2, 3]\n assert unquote(task_dask) == t", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_pprint_task_test_pprint_task.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_pprint_task_test_pprint_task.None_9", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 187, "end_line": 216, "span_ids": ["test_pprint_task"], "tokens": 476}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\ndef test_pprint_task():\n from dask.diagnostics.profile_visualize import pprint_task\n\n keys = {\"a\", \"b\", \"c\", \"d\", \"e\"}\n assert pprint_task((add, \"a\", 1), keys) == \"add(_, *)\"\n assert pprint_task((add, (add, \"a\", 1)), keys) == \"add(add(_, *))\"\n res = \"sum([*, _, add(_, *)])\"\n assert pprint_task((sum, [1, \"b\", (add, \"a\", 1)]), keys) == res\n assert pprint_task((sum, (1, 2, 3, 4, 5, 6, 7)), keys) == \"sum(*)\"\n\n assert len(pprint_task((sum, list(keys) * 100), keys)) < 100\n assert pprint_task((sum, list(keys) * 100), keys) == \"sum([_, _, _, ...])\"\n assert (\n pprint_task((sum, [1, 2, (sum, [\"a\", 4]), 5, 6] * 100), keys)\n == \"sum([*, *, sum([_, *]), ...])\"\n )\n assert (\n pprint_task((sum, [1, 2, (sum, [\"a\", (sum, [1, 2, 3])]), 5, 6]), keys)\n == \"sum([*, *, sum([_, sum(...)]), ...])\"\n )\n\n # With kwargs\n def foo(w, x, y=(), z=3):\n return w + x + sum(y) + z\n\n task = (apply, foo, (tuple, [\"a\", \"b\"]), (dict, [[\"y\", [\"a\", \"b\"]], [\"z\", \"c\"]]))\n assert pprint_task(task, keys) == \"foo(_, _, y=[_, _], z=_)\"\n task = (apply, foo, (tuple, [\"a\", \"b\"]), (dict, [[\"y\", [\"a\", 1]], [\"z\", 1]]))\n assert pprint_task(task, keys) == \"foo(_, _, y=[_, *], z=*)\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_plot_test_resource_profiler_plot.for_results_in_1_0.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_resource_profiler_plot_test_resource_profiler_plot.for_results_in_1_0.None_6", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 246, "end_line": 282, "span_ids": ["test_resource_profiler_plot"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\n@pytest.mark.skipif(\"not psutil\")\ndef test_resource_profiler_plot():\n with ResourceProfiler(dt=0.01) as rprof:\n get(dsk2, \"c\")\n p = rprof.visualize(\n width=500,\n height=300,\n tools=\"hover\",\n title=\"Not the default\",\n show=False,\n save=False,\n )\n if BOKEH_VERSION().major < 3:\n assert p.plot_width == 500\n assert p.plot_height == 300\n else:\n assert p.width == 500\n assert p.height == 300\n assert len(p.tools) == 1\n assert isinstance(p.tools[0], bokeh.models.HoverTool)\n assert p.title.text == \"Not the default\"\n\n # Test with empty and one point, checking for errors\n rprof.clear()\n for results in [[], [(1.0, 0, 0)]]:\n rprof.results = results\n with warnings.catch_warnings(record=True) as record:\n p = rprof.visualize(show=False, save=False)\n assert not record\n # Check bounds are valid\n assert p.x_range.start == 0\n assert p.x_range.end == 1\n assert p.y_range.start == 0\n assert p.y_range.end == 100\n assert p.extra_y_ranges[\"memory\"].start == 0\n assert p.extra_y_ranges[\"memory\"].end == 100", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_from_operator_import_add__test_no_tasks.check_bar_completed_capsy": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_from_operator_import_add__test_no_tasks.check_bar_completed_capsy", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_progress.py", "file_name": "test_progress.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 90, "span_ids": ["test_register", "imports", "test_minimum_time", "test_array_compute", "test_progressbar", "test_clean_exit", "test_no_tasks", "check_bar_completed", "test_format_time"], "tokens": 664}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from operator import add, mul\n\nimport pytest\n\nfrom dask.callbacks import Callback\nfrom dask.diagnostics import ProgressBar\nfrom dask.diagnostics.progress import format_time\nfrom dask.local import get_sync\nfrom dask.threaded import get as get_threaded\n\ndsk = {\"a\": 1, \"b\": 2, \"c\": (add, \"a\", \"b\"), \"d\": (mul, \"a\", \"b\"), \"e\": (mul, \"c\", \"d\")}\n\n\ndef check_bar_completed(capsys, width=40):\n out, err = capsys.readouterr()\n assert out.count(\"100% Completed\") == 1\n bar, percent, time = (i.strip() for i in out.split(\"\\r\")[-1].split(\"|\"))\n assert bar == \"[\" + \"#\" * width + \"]\"\n assert percent == \"100% Completed\"\n\n\ndef test_array_compute(capsys):\n da = pytest.importorskip(\"dask.array\")\n\n data = da.ones((100, 100), dtype=\"f4\", chunks=(100, 100))\n with ProgressBar():\n out = data.sum().compute()\n assert out == 10000\n check_bar_completed(capsys)\n\n\ndef test_progressbar(capsys):\n with ProgressBar():\n out = get_threaded(dsk, \"e\")\n assert out == 6\n check_bar_completed(capsys)\n with ProgressBar(width=20):\n out = get_threaded(dsk, \"e\")\n check_bar_completed(capsys, 20)\n\n\ndef test_minimum_time(capsys):\n with ProgressBar(10.0):\n out = get_threaded(dsk, \"e\")\n out, err = capsys.readouterr()\n assert out == \"\" and err == \"\"\n\n\n@pytest.mark.parametrize(\"get\", [get_threaded, get_sync])\ndef test_clean_exit(get):\n dsk = {\"a\": (lambda: 1 / 0,)}\n try:\n with ProgressBar() as pbar:\n get_threaded(dsk, \"a\")\n except ZeroDivisionError:\n pass\n assert not pbar._running\n assert not pbar._timer.is_alive()\n\n\ndef test_format_time():\n assert format_time(1.4) == \" 1.4s\"\n assert format_time(10.4) == \"10.4s\"\n assert format_time(100.4) == \" 1min 40.4s\"\n assert format_time(1000.4) == \"16min 40.4s\"\n assert format_time(10000.4) == \" 2hr 46min 40.4s\"\n\n\ndef test_register(capsys):\n try:\n assert not Callback.active\n p = ProgressBar()\n p.register()\n\n assert Callback.active\n\n get_threaded(dsk, \"e\")\n check_bar_completed(capsys)\n\n p.unregister()\n\n assert not Callback.active\n finally:\n Callback.active.clear()\n\n\ndef test_no_tasks(capsys):\n with ProgressBar():\n get_threaded({\"x\": 1}, \"x\")\n check_bar_completed(capsys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_cache_test_with_cache.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_cache_test_with_cache.None_1", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_progress.py", "file_name": "test_progress.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 109, "span_ids": ["test_with_cache"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_with_cache(capsys):\n cachey = pytest.importorskip(\"cachey\")\n from dask.cache import Cache\n\n c = cachey.Cache(10000)\n cc = Cache(c)\n\n with cc:\n with ProgressBar():\n assert get_threaded({\"x\": (mul, 1, 2)}, \"x\") == 2\n check_bar_completed(capsys)\n assert c.data[\"x\"] == 2\n\n with cc:\n with ProgressBar():\n assert get_threaded({\"x\": (mul, 1, 2), \"y\": (mul, \"x\", 3)}, \"y\") == 6\n check_bar_completed(capsys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_alias_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_progress.py_test_with_alias_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_progress.py", "file_name": "test_progress.py", "file_type": "text/x-python", "category": "test", "start_line": 112, "end_line": 132, "span_ids": ["test_store_time", "test_with_alias"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_with_alias(capsys):\n dsk = {\n \"a\": 1,\n \"b\": 2,\n \"c\": (add, \"a\", \"b\"),\n \"d\": (add, 1, 2),\n \"e\": \"d\",\n \"f\": (mul, \"e\", \"c\"),\n }\n with ProgressBar():\n get_threaded(dsk, \"f\")\n check_bar_completed(capsys)\n\n\ndef test_store_time():\n p = ProgressBar()\n with p:\n get_threaded({\"x\": 1}, \"x\")\n\n assert isinstance(p.last_duration, float)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/distributed.py__flake8_noqa_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/distributed.py__flake8_noqa_", "embedding": null, "metadata": {"file_path": "dask/distributed.py", "file_name": "distributed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["__getattr__", "docstring"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# flake8: noqa\n\n_import_error_message = (\n \"dask.distributed is not installed.\\n\\n\"\n \"Please either conda or pip install distributed:\\n\\n\"\n \" conda install dask distributed # either conda install\\n\"\n ' python -m pip install \"dask[distributed]\" --upgrade # or pip install'\n)\n\ntry:\n from distributed import *\nexcept ImportError as e:\n if e.msg == \"No module named 'distributed'\":\n raise ImportError(_import_error_message) from e\n else:\n raise\n\n\ndef __getattr__(value):\n try:\n import distributed\n except ImportError as e:\n raise ImportError(_import_error_message) from e\n return getattr(distributed, value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_has_sub_tasks__UUIDPAT.re_compile_0_9a_z_8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_has_sub_tasks__UUIDPAT.re_compile_0_9a_z_8_", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 63, "span_ids": ["name", "has_sub_tasks", "impl:3"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile(\"([0-9a-z]{32})\")\n_UUIDPAT = re.compile(\"([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_label_label.return.s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_label_label.return.s", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 66, "end_line": 104, "span_ids": ["label"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n\n >>> from uuid import uuid1\n >>> x = 'x-%s-hello' % uuid1()\n >>> x # doctest: +SKIP\n 'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n for pattern in (_HASHPAT, _UUIDPAT):\n m = re.search(pattern, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = f\"#{n}\"\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = \"#\"\n s = s.replace(h, label)\n return s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_box_label_box_label.if_isinstance_key_tuple_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_box_label_box_label.if_isinstance_key_tuple_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 107, "end_line": 125, "span_ids": ["box_label"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def box_label(key, verbose=False):\n \"\"\"Label boxes in graph by chunk index\n\n >>> box_label(('x', 1, 2, 3))\n '(1, 2, 3)'\n >>> box_label(('x', 123))\n '123'\n >>> box_label('x')\n ''\n \"\"\"\n if isinstance(key, tuple):\n key = key[1:]\n if len(key) == 1:\n [key] = key\n return str(key)\n elif verbose:\n return str(key)\n else:\n return \"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_to_graphviz_to_graphviz.return.g": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_to_graphviz_to_graphviz.return.g", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 196, "span_ids": ["to_graphviz"], "tokens": 489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_graphviz(\n dsk,\n data_attributes=None,\n function_attributes=None,\n rankdir=\"BT\",\n graph_attr=None,\n node_attr=None,\n edge_attr=None,\n collapse_outputs=False,\n verbose=False,\n **kwargs,\n):\n data_attributes = data_attributes or {}\n function_attributes = function_attributes or {}\n graph_attr = graph_attr or {}\n node_attr = node_attr or {}\n edge_attr = edge_attr or {}\n\n graph_attr[\"rankdir\"] = rankdir\n node_attr[\"fontname\"] = \"helvetica\"\n\n graph_attr.update(kwargs)\n g = graphviz.Digraph(\n graph_attr=graph_attr, node_attr=node_attr, edge_attr=edge_attr\n )\n\n seen = set()\n connected = set()\n\n for k, v in dsk.items():\n k_name = name(k)\n if istask(v):\n func_name = name((k, \"function\")) if not collapse_outputs else k_name\n if collapse_outputs or func_name not in seen:\n seen.add(func_name)\n attrs = function_attributes.get(k, {}).copy()\n attrs.setdefault(\"label\", key_split(k))\n attrs.setdefault(\"shape\", \"circle\")\n g.node(func_name, **attrs)\n if not collapse_outputs:\n g.edge(func_name, k_name)\n connected.add(func_name)\n connected.add(k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n attrs = data_attributes.get(dep, {}).copy()\n attrs.setdefault(\"label\", box_label(dep, verbose))\n attrs.setdefault(\"shape\", \"box\")\n g.node(dep_name, **attrs)\n g.edge(dep_name, func_name)\n connected.add(dep_name)\n connected.add(func_name)\n\n elif ishashable(v) and v in dsk:\n v_name = name(v)\n g.edge(v_name, k_name)\n connected.add(v_name)\n connected.add(k_name)\n\n if (not collapse_outputs or k_name in connected) and k_name not in seen:\n seen.add(k_name)\n attrs = data_attributes.get(k, {}).copy()\n attrs.setdefault(\"label\", box_label(k, verbose))\n attrs.setdefault(\"shape\", \"box\")\n g.node(k_name, **attrs)\n return g", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_IPYTHON_IMAGE_FORMATS__get_display_cls.if_format_in_IPYTHON_NO_D.else_.raise_ValueError_Unknown": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_IPYTHON_IMAGE_FORMATS__get_display_cls.if_format_in_IPYTHON_NO_D.else_.raise_ValueError_Unknown", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 199, "end_line": 230, "span_ids": ["_get_display_cls", "impl:7"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "IPYTHON_IMAGE_FORMATS = frozenset([\"jpeg\", \"png\"])\nIPYTHON_NO_DISPLAY_FORMATS = frozenset([\"dot\", \"pdf\"])\n\n\ndef _get_display_cls(format):\n \"\"\"\n Get the appropriate IPython display class for `format`.\n\n Returns `IPython.display.SVG` if format=='svg', otherwise\n `IPython.display.Image`.\n\n If IPython is not importable, return dummy function that swallows its\n arguments and returns None.\n \"\"\"\n dummy = lambda *args, **kwargs: None\n try:\n import IPython.display as display\n except ImportError:\n # Can't return a display object if no IPython.\n return dummy\n\n if format in IPYTHON_NO_DISPLAY_FORMATS:\n # IPython can't display this format natively, so just return None.\n return dummy\n elif format in IPYTHON_IMAGE_FORMATS:\n # Partially apply `format` so that `Image` and `SVG` supply a uniform\n # interface to the caller.\n return partial(display.Image, format=format)\n elif format == \"svg\":\n return display.SVG\n else:\n raise ValueError(\"Unknown format '%s' passed to `dot_graph`\" % format)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_dot_graph_dot_graph.return.graphviz_to_file_g_filen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_dot_graph_dot_graph.return.graphviz_to_file_g_filen", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 272, "span_ids": ["dot_graph"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dot_graph(dsk, filename=\"mydask\", format=None, **kwargs):\n \"\"\"\n Render a task graph using dot.\n\n If `filename` is not None, write a file to disk with the specified name and extension.\n If no extension is specified, '.png' will be used by default.\n\n Parameters\n ----------\n dsk : dict\n The graph to display.\n filename : str or None, optional\n The name of the file to write to disk. If the provided `filename`\n doesn't include an extension, '.png' will be used by default.\n If `filename` is None, no file will be written, and we communicate\n with dot using only pipes. Default is 'mydask'.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n **kwargs\n Additional keyword arguments to forward to `to_graphviz`.\n\n Returns\n -------\n result : None or IPython.display.Image or IPython.display.SVG (See below.)\n\n Notes\n -----\n If IPython is installed, we return an IPython.display object in the\n requested format. If IPython is not installed, we just return None.\n\n We always return None if format is 'pdf' or 'dot', because IPython can't\n display these formats natively. Passing these formats with filename=None\n will not produce any useful output.\n\n See Also\n --------\n dask.dot.to_graphviz\n \"\"\"\n g = to_graphviz(dsk, **kwargs)\n return graphviz_to_file(g, filename, format)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_graphviz_to_file_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_graphviz_to_file_", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 274, "end_line": 308, "span_ids": ["graphviz_to_file"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def graphviz_to_file(g, filename, format):\n fmts = [\".png\", \".pdf\", \".dot\", \".svg\", \".jpeg\", \".jpg\"]\n\n if (\n format is None\n and filename is not None\n and any(filename.lower().endswith(fmt) for fmt in fmts)\n ):\n filename, format = os.path.splitext(filename)\n format = format[1:].lower()\n\n if format is None:\n format = \"png\"\n\n data = g.pipe(format=format)\n if not data:\n raise RuntimeError(\n \"Graphviz failed to properly produce an image. \"\n \"This probably means your installation of graphviz \"\n \"is missing png support. See: \"\n \"https://github.com/ContinuumIO/anaconda-issues/\"\n \"issues/485 for more information.\"\n )\n\n display_cls = _get_display_cls(format)\n\n if filename is None:\n return display_cls(data=data)\n\n full_filename = \".\".join([filename, format])\n with open(full_filename, \"wb\") as f:\n f.write(data)\n\n return display_cls(filename=full_filename)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_binascii_hashers_append__hash_sha1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_binascii_hashers_append__hash_sha1", "embedding": null, "metadata": {"file_path": "dask/hashing.py", "file_name": "hashing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 68, "span_ids": ["_hash_sha1", "imports", "impl:19"], "tokens": 405}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import binascii\nimport hashlib\n\nhashers = [] # In decreasing performance order\n\n\n# Timings on a largish array:\n# - CityHash is 2x faster than MurmurHash\n# - xxHash is slightly slower than CityHash\n# - MurmurHash is 8x faster than SHA1\n# - SHA1 is significantly faster than all other hashlib algorithms\n\ntry:\n import cityhash # `python -m pip install cityhash`\nexcept ImportError:\n pass\nelse:\n # CityHash disabled unless the reference leak in\n # https://github.com/escherba/python-cityhash/pull/16\n # is fixed.\n if cityhash.__version__ >= \"0.2.2\":\n\n def _hash_cityhash(buf):\n \"\"\"\n Produce a 16-bytes hash of *buf* using CityHash.\n \"\"\"\n h = cityhash.CityHash128(buf)\n return h.to_bytes(16, \"little\")\n\n hashers.append(_hash_cityhash)\n\ntry:\n import xxhash # `python -m pip install xxhash`\nexcept ImportError:\n pass\nelse:\n\n def _hash_xxhash(buf):\n \"\"\"\n Produce a 8-bytes hash of *buf* using xxHash.\n \"\"\"\n return xxhash.xxh64(buf).digest()\n\n hashers.append(_hash_xxhash)\n\ntry:\n import mmh3 # `python -m pip install mmh3`\nexcept ImportError:\n pass\nelse:\n\n def _hash_murmurhash(buf):\n \"\"\"\n Produce a 16-bytes hash of *buf* using MurmurHash.\n \"\"\"\n return mmh3.hash_bytes(buf)\n\n hashers.append(_hash_murmurhash)\n\n\ndef _hash_sha1(buf):\n \"\"\"\n Produce a 20-bytes hash of *buf* using SHA1.\n \"\"\"\n return hashlib.sha1(buf).digest()\n\n\nhashers.append(_hash_sha1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_hash_buffer_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/hashing.py_hash_buffer_", "embedding": null, "metadata": {"file_path": "dask/hashing.py", "file_name": "hashing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 71, "end_line": 99, "span_ids": ["hash_buffer", "hash_buffer_hex"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def hash_buffer(buf, hasher=None):\n \"\"\"\n Hash a bytes-like (buffer-compatible) object. This function returns\n a good quality hash but is not cryptographically secure. The fastest\n available algorithm is selected. A fixed-length bytes object is returned.\n \"\"\"\n if hasher is not None:\n try:\n return hasher(buf)\n except (TypeError, OverflowError):\n # Some hash libraries may have overly-strict type checking,\n # not accepting all buffers\n pass\n for hasher in hashers:\n try:\n return hasher(buf)\n except (TypeError, OverflowError):\n pass\n raise TypeError(f\"unsupported type for hashing: {type(buf)}\")\n\n\ndef hash_buffer_hex(buf, hasher=None):\n \"\"\"\n Same as hash_buffer, but returns its result in hex-encoded form.\n \"\"\"\n h = hash_buffer(buf, hasher)\n s = binascii.b2a_hex(h)\n return s.decode()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._from_collection_HighLevelGraph._from_collection.return.cls_layers_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._from_collection_HighLevelGraph._from_collection.return.cls_layers_deps_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 644, "end_line": 661, "span_ids": ["HighLevelGraph._from_collection"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n @classmethod\n def _from_collection(cls, name, layer, collection):\n \"\"\"`from_collections` optimized for a single collection\"\"\"\n if not is_dask_collection(collection):\n raise TypeError(type(collection))\n\n graph = collection.__dask_graph__()\n if isinstance(graph, HighLevelGraph):\n layers = ensure_dict(graph.layers, copy=True)\n layers[name] = layer\n deps = ensure_dict(graph.dependencies, copy=True)\n deps[name] = set(collection.__dask_layers__())\n else:\n key = _get_some_layer_name(collection)\n layers = {name: layer, key: graph}\n deps = {name: {key}, key: set()}\n\n return cls(layers, deps)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.from_collections_HighLevelGraph.from_collections.return.cls_layers_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.from_collections_HighLevelGraph.from_collections.return.cls_layers_deps_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 663, "end_line": 717, "span_ids": ["HighLevelGraph.from_collections"], "tokens": 469}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n @classmethod\n def from_collections(cls, name, layer, dependencies=()):\n \"\"\"Construct a HighLevelGraph from a new layer and a set of collections\n\n This constructs a HighLevelGraph in the common case where we have a single\n new layer and a set of old collections on which we want to depend.\n\n This pulls out the ``__dask_layers__()`` method of the collections if\n they exist, and adds them to the dependencies for this new layer. It\n also merges all of the layers from all of the dependent collections\n together into the new layers for this graph.\n\n Parameters\n ----------\n name : str\n The name of the new layer\n layer : Mapping\n The graph layer itself\n dependencies : List of Dask collections\n A list of other dask collections (like arrays or dataframes) that\n have graphs themselves\n\n Examples\n --------\n\n In typical usage we make a new task layer, and then pass that layer\n along with all dependent collections to this method.\n\n >>> def add(self, other):\n ... name = 'add-' + tokenize(self, other)\n ... layer = {(name, i): (add, input_key, other)\n ... for i, input_key in enumerate(self.__dask_keys__())}\n ... graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n ... return new_collection(name, graph)\n \"\"\"\n if len(dependencies) == 1:\n return cls._from_collection(name, layer, dependencies[0])\n layers = {name: layer}\n deps = {name: set()}\n for collection in toolz.unique(dependencies, key=id):\n if is_dask_collection(collection):\n graph = collection.__dask_graph__()\n if isinstance(graph, HighLevelGraph):\n layers.update(graph.layers)\n deps.update(graph.dependencies)\n deps[name] |= set(collection.__dask_layers__())\n else:\n key = _get_some_layer_name(collection)\n layers[key] = graph\n deps[name].add(key)\n deps[key] = set()\n else:\n raise TypeError(type(collection))\n\n return cls(layers, deps)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.validate_HighLevelGraph.validate.for_k_in_dep_key1_.if_self_dependencies_k_.raise_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.validate_HighLevelGraph.validate.for_k_in_dep_key1_.if_self_dependencies_k_.raise_ValueError_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 872, "end_line": 904, "span_ids": ["HighLevelGraph.validate"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def validate(self):\n # Check dependencies\n for layer_name, deps in self.dependencies.items():\n if layer_name not in self.layers:\n raise ValueError(\n f\"dependencies[{repr(layer_name)}] not found in layers\"\n )\n for dep in deps:\n if dep not in self.dependencies:\n raise ValueError(f\"{repr(dep)} not found in dependencies\")\n\n for layer in self.layers.values():\n assert hasattr(layer, \"annotations\")\n\n # Re-calculate all layer dependencies\n dependencies = compute_layer_dependencies(self.layers)\n\n # Check keys\n dep_key1 = self.dependencies.keys()\n dep_key2 = dependencies.keys()\n if dep_key1 != dep_key2:\n raise ValueError(\n f\"incorrect dependencies keys {set(dep_key1)!r} \"\n f\"expected {set(dep_key2)!r}\"\n )\n\n # Check values\n for k in dep_key1:\n if self.dependencies[k] != dependencies[k]:\n raise ValueError(\n f\"incorrect dependencies[{repr(k)}]: {repr(self.dependencies[k])} \"\n f\"expected {repr(dependencies[k])}\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py___os": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py___os", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 107, "span_ids": ["imports", "docstring"], "tokens": 880}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nAsynchronous Shared-Memory Scheduler for Dask Graphs.\n\nThis scheduler coordinates several workers to execute tasks in a dask graph in\nparallel. It depends on a ``concurrent.futures.Executor``\nand a corresponding Queue for worker-to-scheduler communication.\n\nIt tries to execute tasks in an order which maintains a small memory footprint\nthroughout execution. It does this by running tasks that allow us to release\ndata resources.\n\n\nTask Selection Policy\n=====================\n\nWhen we complete a task we add more data in to our set of available data; this\nnew data makes new tasks available. We preferentially choose tasks that were\njust made available in a last-in-first-out fashion. We implement this as a\nsimple stack. This results in more depth-first rather than breadth first\nbehavior which encourages us to process batches of data to completion before\nstarting in on new data when possible.\n\nWhen the addition of new data readies multiple tasks simultaneously we add\ntasks to the stack in sorted order so that tasks with greater keynames are run\nfirst. This can be handy to break ties in a predictable fashion.\n\n\nState\n=====\n\nMany functions pass around a ``state`` variable that holds the current state of\nthe computation. This variable consists of several other dictionaries and\nsets, explained below.\n\nConstant state\n--------------\n\n1. dependencies: {x: [a, b ,c]} a,b,c, must be run before x\n2. dependents: {a: [x, y]} a must run before x or y\n\nChanging state\n--------------\n\n### Data\n\n1. cache: available concrete data. {key: actual-data}\n2. released: data that we've seen, used, and released because it is no longer\n needed\n\n### Jobs\n\n1. ready: A fifo stack of ready-to-run tasks\n2. running: A set of tasks currently in execution\n3. finished: A set of finished tasks\n4. waiting: which tasks are still waiting on others :: {key: {keys}}\n Real-time equivalent of dependencies\n5. waiting_data: available data to yet-to-be-run-tasks :: {key: {keys}}\n Real-time equivalent of dependents\n\n\nExamples\n--------\n\n>>> import pprint # doctest: +SKIP\n>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')} # doctest: +SKIP\n>>> pprint.pprint(start_state_from_dask(dsk)) # doctest: +SKIP\n{'cache': {'x': 1, 'y': 2},\n 'dependencies': {'w': {'z', 'y'}, 'x': set(), 'y': set(), 'z': {'x'}},\n 'dependents': defaultdict(None, {'w': set(), 'x': {'z'}, 'y': {'w'}, 'z': {'w'}}),\n 'finished': set(),\n 'ready': ['z'],\n 'released': set(),\n 'running': set(),\n 'waiting': {'w': {'z'}},\n 'waiting_data': {'x': {'z'}, 'y': {'w'}, 'z': {'w'}}}\n\nOptimizations\n=============\n\nWe build this scheduler with out-of-core array operations in mind. To this end\nwe have encoded some particular optimizations.\n\nCompute to release data\n-----------------------\n\nWhen we choose a new task to execute we often have many options. Policies at\nthis stage are cheap and can significantly impact performance. One could\nimagine policies that expose parallelism, drive towards a particular output,\netc..\n\nOur current policy is to run tasks that were most recently made available.\n\n\nInlining computations\n---------------------\n\nWe hold on to intermediate computations either in memory or on disk.\n\nFor very cheap computations that may emit new copies of the data, like\n``np.transpose`` or possibly even ``x + 1`` we choose not to store these as\nseparate pieces of data / tasks. Instead we combine them with the computations\nthat require them. This may result in repeated computation but saves\nsignificantly on space and computation complexity.\n\nSee the function ``inline_functions`` for more information.\n\"\"\"\nimport os", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_start_state_from_dask_start_state_from_dask.return.state": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_start_state_from_dask_start_state_from_dask.return.state", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 137, "end_line": 196, "span_ids": ["start_state_from_dask"], "tokens": 567}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def start_state_from_dask(dsk, cache=None, sortkey=None):\n \"\"\"Start state from a dask\n\n Examples\n --------\n\n >>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')} # doctest: +SKIP\n >>> from pprint import pprint # doctest: +SKIP\n >>> pprint(start_state_from_dask(dsk)) # doctest: +SKIP\n {'cache': {'x': 1, 'y': 2},\n 'dependencies': {'w': {'z', 'y'}, 'x': set(), 'y': set(), 'z': {'x'}},\n 'dependents': defaultdict(None, {'w': set(), 'x': {'z'}, 'y': {'w'}, 'z': {'w'}}),\n 'finished': set(),\n 'ready': ['z'],\n 'released': set(),\n 'running': set(),\n 'waiting': {'w': {'z'}},\n 'waiting_data': {'x': {'z'}, 'y': {'w'}, 'z': {'w'}}}\n \"\"\"\n if sortkey is None:\n sortkey = order(dsk).get\n if cache is None:\n cache = config.get(\"cache\", None)\n if cache is None:\n cache = dict()\n data_keys = set()\n for k, v in dsk.items():\n if not has_tasks(dsk, v):\n cache[k] = v\n data_keys.add(k)\n\n dsk2 = dsk.copy()\n dsk2.update(cache)\n\n dependencies = {k: get_dependencies(dsk2, k) for k in dsk}\n waiting = {k: v.copy() for k, v in dependencies.items() if k not in data_keys}\n\n dependents = reverse_dict(dependencies)\n for a in cache:\n for b in dependents.get(a, ()):\n waiting[b].remove(a)\n waiting_data = {k: v.copy() for k, v in dependents.items() if v}\n\n ready_set = {k for k, v in waiting.items() if not v}\n ready = sorted(ready_set, key=sortkey, reverse=True)\n waiting = {k: v for k, v in waiting.items() if v}\n\n state = {\n \"dependencies\": dependencies,\n \"dependents\": dependents,\n \"waiting\": waiting,\n \"waiting_data\": waiting_data,\n \"cache\": cache,\n \"ready\": ready,\n \"running\": set(),\n \"finished\": set(),\n \"released\": set(),\n }\n\n return state", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_finish_task_finish_task.return.state": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_finish_task_finish_task.return.state", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 254, "end_line": 281, "span_ids": ["finish_task"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def finish_task(\n dsk, key, state, results, sortkey, delete=True, release_data=release_data\n):\n \"\"\"\n Update execution state after a task finishes\n\n Mutates. This should run atomically (with a lock).\n \"\"\"\n for dep in sorted(state[\"dependents\"][key], key=sortkey, reverse=True):\n s = state[\"waiting\"][dep]\n s.remove(key)\n if not s:\n del state[\"waiting\"][dep]\n state[\"ready\"].append(dep)\n\n for dep in state[\"dependencies\"][key]:\n if dep in state[\"waiting_data\"]:\n s = state[\"waiting_data\"][dep]\n s.remove(key)\n if not s and dep not in results:\n release_data(dep, state, delete=delete)\n elif delete and dep not in results:\n release_data(dep, state, delete=delete)\n\n state[\"finished\"].add(key)\n state[\"running\"].remove(key)\n\n return state", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_nested_get_identity.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_nested_get_identity.return.x", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 284, "end_line": 324, "span_ids": ["nested_get", "default_get_id", "identity", "reraise", "default_pack_exception"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def nested_get(ind, coll):\n \"\"\"Get nested index from collection\n\n Examples\n --------\n\n >>> nested_get(1, 'abc')\n 'b'\n >>> nested_get([1, 0], 'abc')\n ('b', 'a')\n >>> nested_get([[1, 0], [0, 1]], 'abc')\n (('b', 'a'), ('a', 'b'))\n \"\"\"\n if isinstance(ind, list):\n return tuple(nested_get(i, coll) for i in ind)\n else:\n return coll[ind]\n\n\ndef default_get_id():\n \"\"\"Default get_id\"\"\"\n return None\n\n\ndef default_pack_exception(e, dumps):\n raise\n\n\ndef reraise(exc, tb=None):\n if exc.__traceback__ is not tb:\n raise exc.with_traceback(tb)\n raise exc\n\n\ndef identity(x):\n \"\"\"Identity function. Returns x.\n\n >>> identity(3)\n 3\n \"\"\"\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_3_get_async.dsk.dict_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_3_get_async.dsk.dict_dsk_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 327, "end_line": 420, "span_ids": ["identity", "get_async"], "tokens": 674}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nTask Selection\n--------------\n\nWe often have a choice among many tasks to run next. This choice is both\ncheap and can significantly impact performance.\n\nWe currently select tasks that have recently been made ready. We hope that\nthis first-in-first-out policy reduces memory footprint\n\"\"\"\n\n\"\"\"\n`get`\n-----\n\nThe main function of the scheduler. Get is the main entry point.\n\"\"\"\n\n\ndef get_async(\n submit,\n num_workers,\n dsk,\n result,\n cache=None,\n get_id=default_get_id,\n rerun_exceptions_locally=None,\n pack_exception=default_pack_exception,\n raise_exception=reraise,\n callbacks=None,\n dumps=identity,\n loads=identity,\n chunksize=None,\n **kwargs,\n):\n \"\"\"Asynchronous get function\n\n This is a general version of various asynchronous schedulers for dask. It\n takes a ``concurrent.futures.Executor.submit`` function to form a more\n specific ``get`` method that walks through the dask array with parallel\n workers, avoiding repeat computation and minimizing memory use.\n\n Parameters\n ----------\n submit : function\n A ``concurrent.futures.Executor.submit`` function\n num_workers : int\n The number of workers that task submissions can be spread over\n dsk : dict\n A dask dictionary specifying a workflow\n result : key or list of keys\n Keys corresponding to desired data\n cache : dict-like, optional\n Temporary storage of results\n get_id : callable, optional\n Function to return the worker id, takes no arguments. Examples are\n `threading.current_thread` and `multiprocessing.current_process`.\n rerun_exceptions_locally : bool, optional\n Whether to rerun failing tasks in local process to enable debugging\n (False by default)\n pack_exception : callable, optional\n Function to take an exception and ``dumps`` method, and return a\n serialized tuple of ``(exception, traceback)`` to send back to the\n scheduler. Default is to just raise the exception.\n raise_exception : callable, optional\n Function that takes an exception and a traceback, and raises an error.\n callbacks : tuple or list of tuples, optional\n Callbacks are passed in as tuples of length 5. Multiple sets of\n callbacks may be passed in as a list of tuples. For more information,\n see the dask.diagnostics documentation.\n dumps: callable, optional\n Function to serialize task data and results to communicate between\n worker and parent. Defaults to identity.\n loads: callable, optional\n Inverse function of `dumps`. Defaults to identity.\n chunksize: int, optional\n Size of chunks to use when dispatching work. Defaults to 1.\n If -1, will be computed to evenly divide ready work across workers.\n\n See Also\n --------\n threaded.get\n \"\"\"\n chunksize = chunksize or config.get(\"chunksize\", 1)\n\n queue = Queue()\n\n if isinstance(result, list):\n result_flat = set(flatten(result))\n else:\n result_flat = {result}\n results = set(result_flat)\n\n dsk = dict(dsk)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_get_async.with_local_callbacks_call_get_async.return.nested_get_result_state_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_get_async.with_local_callbacks_call_get_async.return.nested_get_result_state_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 421, "end_line": 521, "span_ids": ["get_async"], "tokens": 844}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_async(\n submit,\n num_workers,\n dsk,\n result,\n cache=None,\n get_id=default_get_id,\n rerun_exceptions_locally=None,\n pack_exception=default_pack_exception,\n raise_exception=reraise,\n callbacks=None,\n dumps=identity,\n loads=identity,\n chunksize=None,\n **kwargs,\n):\n # ... other code\n with local_callbacks(callbacks) as callbacks:\n _, _, pretask_cbs, posttask_cbs, _ = unpack_callbacks(callbacks)\n started_cbs = []\n succeeded = False\n # if start_state_from_dask fails, we will have something\n # to pass to the final block.\n state = {}\n try:\n for cb in callbacks:\n if cb[0]:\n cb[0](dsk)\n started_cbs.append(cb)\n\n keyorder = order(dsk)\n\n state = start_state_from_dask(dsk, cache=cache, sortkey=keyorder.get)\n\n for _, start_state, _, _, _ in callbacks:\n if start_state:\n start_state(dsk, state)\n\n if rerun_exceptions_locally is None:\n rerun_exceptions_locally = config.get(\"rerun_exceptions_locally\", False)\n\n if state[\"waiting\"] and not state[\"ready\"]:\n raise ValueError(\"Found no accessible jobs in dask\")\n\n def fire_tasks(chunksize):\n \"\"\"Fire off a task to the thread pool\"\"\"\n # Determine chunksize and/or number of tasks to submit\n nready = len(state[\"ready\"])\n if chunksize == -1:\n ntasks = nready\n chunksize = -(ntasks // -num_workers)\n else:\n used_workers = -(len(state[\"running\"]) // -chunksize)\n avail_workers = max(num_workers - used_workers, 0)\n ntasks = min(nready, chunksize * avail_workers)\n\n # Prep all ready tasks for submission\n args = []\n for _ in range(ntasks):\n # Get the next task to compute (most recently added)\n key = state[\"ready\"].pop()\n # Notify task is running\n state[\"running\"].add(key)\n for f in pretask_cbs:\n f(key, dsk, state)\n\n # Prep args to send\n data = {\n dep: state[\"cache\"][dep] for dep in get_dependencies(dsk, key)\n }\n args.append(\n (\n key,\n dumps((dsk[key], data)),\n dumps,\n loads,\n get_id,\n pack_exception,\n )\n )\n\n # Batch submit\n for i in range(-(len(args) // -chunksize)):\n each_args = args[i * chunksize : (i + 1) * chunksize]\n if not each_args:\n break\n fut = submit(batch_execute_tasks, each_args)\n fut.add_done_callback(queue.put)\n\n # Main loop, wait on tasks to finish, insert new ones\n while state[\"waiting\"] or state[\"ready\"] or state[\"running\"]:\n fire_tasks(chunksize)\n for key, res_info, failed in queue_get(queue).result():\n if failed:\n exc, tb = loads(res_info)\n if rerun_exceptions_locally:\n data = {\n dep: state[\"cache\"][dep]\n for dep in get_dependencies(dsk, key)\n }\n task = dsk[key]\n _execute_task(task, data) # Re-execute locally\n else:\n raise_exception(exc, tb)\n res, worker_id = loads(res_info)\n state[\"cache\"][key] = res\n finish_task(dsk, key, state, results, keyorder.get)\n for f in posttask_cbs:\n f(key, res, dsk, state, worker_id)\n\n succeeded = True\n\n finally:\n for _, _, _, _, finish in started_cbs:\n if finish:\n finish(dsk, state, not succeeded)\n\n return nested_get(result, state[\"cache\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_sortkey_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_sortkey_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 530, "end_line": 546, "span_ids": ["sortkey"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sortkey(item):\n \"\"\"Sorting key function that is robust to different types\n\n Both strings and tuples are common key types in dask graphs.\n However In Python 3 one can not compare strings with tuples directly.\n This function maps many types to a form where they can be compared\n\n Examples\n --------\n >>> sortkey('Hello')\n ('str', 'Hello')\n\n >>> sortkey(('x', 1))\n ('tuple', ('x', 1))\n \"\"\"\n return (type(item).__name__, item)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py__Remote_Exception_Han_RemoteException.__getattr__.try_.except_AttributeError_.return.getattr_self_exception_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py__Remote_Exception_Han_RemoteException.__getattr__.try_.except_AttributeError_.return.getattr_self_exception_k", "embedding": null, "metadata": {"file_path": "dask/multiprocessing.py", "file_name": "multiprocessing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 78, "span_ids": ["_process_get_id", "RemoteException.__init__", "RemoteException", "RemoteException.__getattr__", "RemoteException.__dir__", "RemoteException.__str__"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -- Remote Exception Handling --\n# By default, tracebacks can't be serialized using pickle. However, the\n# `tblib` library can enable support for this. Since we don't mandate\n# that tblib is installed, we do the following:\n#\n# - If tblib is installed, use it to serialize the traceback and reraise\n# in the scheduler process\n# - Otherwise, use a ``RemoteException`` class to contain a serialized\n# version of the formatted traceback, which will then print in the\n# scheduler process.\n#\n# To enable testing of the ``RemoteException`` class even when tblib is\n# installed, we don't wrap the class in the try block below\nclass RemoteException(Exception):\n \"\"\"Remote Exception\n\n Contains the exception and traceback from a remotely run task\n \"\"\"\n\n def __init__(self, exception, traceback):\n self.exception = exception\n self.traceback = traceback\n\n def __str__(self):\n return str(self.exception) + \"\\n\\nTraceback\\n---------\\n\" + self.traceback\n\n def __dir__(self):\n return sorted(set(dir(type(self)) + list(self.__dict__) + dir(self.exception)))\n\n def __getattr__(self, key):\n try:\n return object.__getattribute__(self, key)\n except AttributeError:\n return getattr(self.exception, key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_exceptions_get_context.if_sys_platform_win32.else_.return.multiprocessing_get_conte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_exceptions_get_context.if_sys_platform_win32.else_.return.multiprocessing_get_conte", "embedding": null, "metadata": {"file_path": "dask/multiprocessing.py", "file_name": "multiprocessing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 74, "end_line": 142, "span_ids": ["get_context", "impl:12", "remote_exception", "impl:8", "pack_exception", "impl:6"], "tokens": 424}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "exceptions: dict[type[Exception], type[Exception]] = {}\n\n\ndef remote_exception(exc: Exception, tb) -> Exception:\n \"\"\"Metaclass that wraps exception type in RemoteException\"\"\"\n if type(exc) in exceptions:\n typ = exceptions[type(exc)]\n return typ(exc, tb)\n else:\n try:\n typ = type(\n exc.__class__.__name__,\n (RemoteException, type(exc)),\n {\"exception_type\": type(exc)},\n )\n exceptions[type(exc)] = typ\n return typ(exc, tb)\n except TypeError:\n return exc\n\n\ntry:\n import tblib.pickling_support\n\n tblib.pickling_support.install()\n\n def _pack_traceback(tb):\n return tb\n\nexcept ImportError:\n\n def _pack_traceback(tb):\n return \"\".join(traceback.format_tb(tb))\n\n def reraise(exc, tb=None):\n exc = remote_exception(exc, tb)\n raise exc\n\n\ndef pack_exception(e, dumps):\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = _pack_traceback(exc_traceback)\n try:\n result = dumps((e, tb))\n except BaseException as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = _pack_traceback(exc_traceback)\n result = dumps((e, tb))\n return result\n\n\n_CONTEXT_UNSUPPORTED = \"\"\"\\\nThe 'multiprocessing.context' configuration option will be ignored on Python 2\nand on Windows, because they each only support a single context.\n\"\"\"\n\n\ndef get_context():\n \"\"\"Return the current multiprocessing context.\"\"\"\n # fork context does fork()-without-exec(), which can lead to deadlocks,\n # so default to \"spawn\".\n context_name = config.get(\"multiprocessing.context\", \"spawn\")\n if sys.platform == \"win32\":\n if context_name != \"spawn\":\n # Only spawn is supported on Win32, can't change it:\n warn(_CONTEXT_UNSUPPORTED, UserWarning)\n return multiprocessing\n else:\n return multiprocessing.get_context(context_name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_get_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_get_", "embedding": null, "metadata": {"file_path": "dask/multiprocessing.py", "file_name": "multiprocessing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 144, "end_line": 247, "span_ids": ["initialize_worker_process", "get"], "tokens": 803}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get(\n dsk,\n keys,\n num_workers=None,\n func_loads=None,\n func_dumps=None,\n optimize_graph=True,\n pool=None,\n chunksize=None,\n **kwargs,\n):\n \"\"\"Multiprocessed get function appropriate for Bags\n\n Parameters\n ----------\n dsk : dict\n dask graph\n keys : object or list\n Desired results from graph\n num_workers : int\n Number of worker processes (defaults to number of cores)\n func_dumps : function\n Function to use for function serialization (defaults to cloudpickle.dumps)\n func_loads : function\n Function to use for function deserialization (defaults to cloudpickle.loads)\n optimize_graph : bool\n If True [default], `fuse` is applied to the graph before computation.\n pool : Executor or Pool\n Some sort of `Executor` or `Pool` to use\n chunksize: int, optional\n Size of chunks to use when dispatching work.\n Defaults to 5 as some batching is helpful.\n If -1, will be computed to evenly divide ready work across workers.\n \"\"\"\n chunksize = chunksize or config.get(\"chunksize\", 6)\n pool = pool or config.get(\"pool\", None)\n num_workers = num_workers or config.get(\"num_workers\", None) or CPU_COUNT\n if pool is None:\n # In order to get consistent hashing in subprocesses, we need to set a\n # consistent seed for the Python hash algorithm. Unfortunatley, there\n # is no way to specify environment variables only for the Pool\n # processes, so we have to rely on environment variables being\n # inherited.\n if os.environ.get(\"PYTHONHASHSEED\") in (None, \"0\"):\n # This number is arbitrary; it was chosen to commemorate\n # https://github.com/dask/dask/issues/6640.\n os.environ[\"PYTHONHASHSEED\"] = \"6640\"\n context = get_context()\n pool = ProcessPoolExecutor(\n num_workers, mp_context=context, initializer=initialize_worker_process\n )\n cleanup = True\n else:\n if isinstance(pool, multiprocessing.pool.Pool):\n pool = MultiprocessingPoolExecutor(pool)\n cleanup = False\n\n # Optimize Dask\n dsk = ensure_dict(dsk)\n dsk2, dependencies = cull(dsk, keys)\n if optimize_graph:\n dsk3, dependencies = fuse(dsk2, keys, dependencies)\n else:\n dsk3 = dsk2\n\n # We specify marshalling functions in order to catch serialization\n # errors and report them to the user.\n loads = func_loads or config.get(\"func_loads\", None) or _loads\n dumps = func_dumps or config.get(\"func_dumps\", None) or _dumps\n\n # Note former versions used a multiprocessing Manager to share\n # a Queue between parent and workers, but this is fragile on Windows\n # (issue #1652).\n try:\n # Run\n result = get_async(\n pool.submit,\n pool._max_workers,\n dsk3,\n keys,\n get_id=_process_get_id,\n dumps=dumps,\n loads=loads,\n pack_exception=pack_exception,\n raise_exception=reraise,\n chunksize=chunksize,\n **kwargs,\n )\n finally:\n if cleanup:\n pool.shutdown()\n return result\n\n\ndef initialize_worker_process():\n \"\"\"\n Initialize a worker process before running any tasks in it.\n \"\"\"\n # If Numpy is already imported, presumably its random state was\n # inherited from the parent => re-seed it.\n np = sys.modules.get(\"numpy\")\n if np is not None:\n np.random.seed()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_math_cull.return.out_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_math_cull.return.out_dependencies", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 62, "span_ids": ["imports", "cull"], "tokens": 405}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import math\nimport numbers\nimport uuid\nfrom enum import Enum\n\nfrom . import config, core, utils\nfrom .core import (\n flatten,\n get_dependencies,\n ishashable,\n istask,\n reverse_dict,\n subs,\n toposort,\n)\nfrom .utils_test import add, inc # noqa: F401\n\n\ndef cull(dsk, keys):\n \"\"\"Return new dask with only the tasks required to calculate keys.\n\n In other words, remove unnecessary tasks from dask.\n ``keys`` may be a single key or list of keys.\n\n Examples\n --------\n\n >>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}\n >>> dsk, dependencies = cull(d, 'out')\n >>> dsk # doctest: +ELLIPSIS\n {'out': (, 'x', 10), 'x': 1}\n >>> dependencies # doctest: +ELLIPSIS\n {'out': ['x'], 'x': []}\n\n Returns\n -------\n dsk: culled dask graph\n dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate\n other optimizations, notably fuse.\n \"\"\"\n if not isinstance(keys, (list, set)):\n keys = [keys]\n\n seen = set()\n dependencies = dict()\n out = {}\n work = list(set(flatten(keys)))\n\n while work:\n new_work = []\n for k in work:\n dependencies_k = get_dependencies(dsk, k, as_list=True) # fuse needs lists\n out[k] = dsk[k]\n dependencies[k] = dependencies_k\n for d in dependencies_k:\n if d not in seen:\n seen.add(d)\n new_work.append(d)\n\n work = new_work\n\n return out, dependencies", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_linear_keys_renamer_default_fused_linear_keys_renamer.if_typ_is_str_.else_.return.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_linear_keys_renamer_default_fused_linear_keys_renamer.if_typ_is_str_.else_.return.None", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 63, "end_line": 75, "span_ids": ["default_fused_linear_keys_renamer"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def default_fused_linear_keys_renamer(keys):\n \"\"\"Create new keys for fused tasks\"\"\"\n typ = type(keys[0])\n if typ is str:\n names = [utils.key_split(x) for x in keys[:0:-1]]\n names.append(keys[0])\n return \"-\".join(names)\n elif typ is tuple and len(keys[0]) > 0 and isinstance(keys[0][0], str):\n names = [utils.key_split(x) for x in keys[:0:-1]]\n names.append(keys[0][0])\n return (\"-\".join(names),) + keys[0][1:]\n else:\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear_fuse_linear.dependencies._k_set_v_for_k_v_in_de": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear_fuse_linear.dependencies._k_set_v_for_k_v_in_de", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 80, "end_line": 164, "span_ids": ["fuse_linear"], "tokens": 788}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse_linear(dsk, keys=None, dependencies=None, rename_keys=True):\n \"\"\"Return new dask graph with linear sequence of tasks fused together.\n\n If specified, the keys in ``keys`` keyword argument are *not* fused.\n Supply ``dependencies`` from output of ``cull`` if available to avoid\n recomputing dependencies.\n\n **This function is mostly superseded by ``fuse``**\n\n Parameters\n ----------\n dsk: dict\n keys: list\n dependencies: dict, optional\n {key: [list-of-keys]}. Must be a list to provide count of each key\n This optional input often comes from ``cull``\n rename_keys: bool or func, optional\n Whether to rename fused keys with ``default_fused_linear_keys_renamer``\n or not. Renaming fused keys can keep the graph more understandable\n and comprehensive, but it comes at the cost of additional processing.\n If False, then the top-most key will be used. For advanced usage, a\n func is also accepted, ``new_key = rename_keys(fused_key_list)``.\n\n Examples\n --------\n >>> d = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dsk, dependencies = fuse(d)\n >>> dsk # doctest: +SKIP\n {'a-b-c': (inc, (inc, 1)), 'c': 'a-b-c'}\n >>> dsk, dependencies = fuse(d, rename_keys=False)\n >>> dsk # doctest: +ELLIPSIS\n {'c': (, (, 1))}\n >>> dsk, dependencies = fuse(d, keys=['b'], rename_keys=False)\n >>> dsk # doctest: +ELLIPSIS\n {'b': (, 1), 'c': (, 'b')}\n\n Returns\n -------\n dsk: output graph with keys fused\n dependencies: dict mapping dependencies after fusion. Useful side effect\n to accelerate other downstream optimizations.\n \"\"\"\n if keys is not None and not isinstance(keys, set):\n if not isinstance(keys, list):\n keys = [keys]\n keys = set(flatten(keys))\n\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, k, as_list=True) for k in dsk}\n\n # locate all members of linear chains\n child2parent = {}\n unfusible = set()\n for parent in dsk:\n deps = dependencies[parent]\n has_many_children = len(deps) > 1\n for child in deps:\n if keys is not None and child in keys:\n unfusible.add(child)\n elif child in child2parent:\n del child2parent[child]\n unfusible.add(child)\n elif has_many_children:\n unfusible.add(child)\n elif child not in unfusible:\n child2parent[child] = parent\n\n # construct the chains from ancestor to descendant\n chains = []\n parent2child = dict(map(reversed, child2parent.items()))\n while child2parent:\n child, parent = child2parent.popitem()\n chain = [child, parent]\n while parent in child2parent:\n parent = child2parent.pop(parent)\n del parent2child[parent]\n chain.append(parent)\n chain.reverse()\n while child in parent2child:\n child = parent2child.pop(child)\n del child2parent[child]\n chain.append(child)\n chains.append(chain)\n\n dependencies = {k: set(v) for k, v in dependencies.items()}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear.if_rename_keys_is_True___flat_set.return.set_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_linear.if_rename_keys_is_True___flat_set.return.set_x_", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 164, "end_line": 224, "span_ids": ["fuse_linear", "_flat_set"], "tokens": 441}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse_linear(dsk, keys=None, dependencies=None, rename_keys=True):\n # ... other code\n\n if rename_keys is True:\n key_renamer = default_fused_linear_keys_renamer\n elif rename_keys is False:\n key_renamer = None\n else:\n key_renamer = rename_keys\n\n # create a new dask with fused chains\n rv = {}\n fused = set()\n aliases = set()\n is_renamed = False\n for chain in chains:\n if key_renamer is not None:\n new_key = key_renamer(chain)\n is_renamed = (\n new_key is not None and new_key not in dsk and new_key not in rv\n )\n child = chain.pop()\n val = dsk[child]\n while chain:\n parent = chain.pop()\n dependencies[parent].update(dependencies.pop(child))\n dependencies[parent].remove(child)\n val = subs(dsk[parent], child, val)\n fused.add(child)\n child = parent\n fused.add(child)\n if is_renamed:\n rv[new_key] = val\n rv[child] = new_key\n dependencies[new_key] = dependencies[child]\n dependencies[child] = {new_key}\n aliases.add(child)\n else:\n rv[child] = val\n for key, val in dsk.items():\n if key not in fused:\n rv[key] = val\n if aliases:\n for key, deps in dependencies.items():\n for old_key in deps & aliases:\n new_key = rv[old_key]\n deps.remove(old_key)\n deps.add(new_key)\n rv[key] = subs(rv[key], old_key, new_key)\n if keys is not None:\n for key in aliases - keys:\n del rv[key]\n del dependencies[key]\n return rv, dependencies\n\n\ndef _flat_set(x):\n if x is None:\n return set()\n elif isinstance(x, set):\n return x\n elif not isinstance(x, (list, set)):\n x = [x]\n return set(x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_inline.return.dsk2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_inline.return.dsk2", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 229, "end_line": 287, "span_ids": ["inline"], "tokens": 608}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def inline(dsk, keys=None, inline_constants=True, dependencies=None):\n \"\"\"Return new dask with the given keys inlined with their values.\n\n Inlines all constants if ``inline_constants`` keyword is True. Note that\n the constant keys will remain in the graph, to remove them follow\n ``inline`` with ``cull``.\n\n Examples\n --------\n\n >>> d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 'x', 'y')}\n >>> inline(d) # doctest: +ELLIPSIS\n {'x': 1, 'y': (, 1), 'z': (, 1, 'y')}\n\n >>> inline(d, keys='y') # doctest: +ELLIPSIS\n {'x': 1, 'y': (, 1), 'z': (, 1, (, 1))}\n\n >>> inline(d, keys='y', inline_constants=False) # doctest: +ELLIPSIS\n {'x': 1, 'y': (, 'x'), 'z': (, 'x', (, 'x'))}\n \"\"\"\n if dependencies and isinstance(next(iter(dependencies.values())), list):\n dependencies = {k: set(v) for k, v in dependencies.items()}\n\n keys = _flat_set(keys)\n\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, k) for k in dsk}\n\n if inline_constants:\n keys.update(\n k\n for k, v in dsk.items()\n if (ishashable(v) and v in dsk) or (not dependencies[k] and not istask(v))\n )\n\n # Keys may depend on other keys, so determine replace order with toposort.\n # The values stored in `keysubs` do not include other keys.\n replaceorder = toposort(\n {k: dsk[k] for k in keys if k in dsk}, dependencies=dependencies\n )\n keysubs = {}\n for key in replaceorder:\n val = dsk[key]\n for dep in keys & dependencies[key]:\n if dep in keysubs:\n replace = keysubs[dep]\n else:\n replace = dsk[dep]\n val = subs(val, dep, replace)\n keysubs[key] = val\n\n # Make new dask with substitutions\n dsk2 = keysubs.copy()\n for key, val in dsk.items():\n if key not in dsk2:\n for item in keys & dependencies[key]:\n val = subs(val, item, keysubs[item])\n dsk2[key] = val\n return dsk2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_functions_inline_functions.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_inline_functions_inline_functions.return.dsk", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 287, "end_line": 340, "span_ids": ["inline_functions"], "tokens": 429}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def inline_functions(\n dsk, output, fast_functions=None, inline_constants=False, dependencies=None\n):\n \"\"\"Inline cheap functions into larger operations\n\n Examples\n --------\n >>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP\n ... 'i': (inc, 'x'),\n ... 'd': (double, 'y'),\n ... 'x': 1, 'y': 1}\n >>> inline_functions(dsk, [], [inc]) # doctest: +SKIP\n {'out': (add, (inc, 'x'), 'd'),\n 'd': (double, 'y'),\n 'x': 1, 'y': 1}\n\n Protect output keys. In the example below ``i`` is not inlined because it\n is marked as an output key.\n\n >>> inline_functions(dsk, ['i', 'out'], [inc, double]) # doctest: +SKIP\n {'out': (add, 'i', (double, 'y')),\n 'i': (inc, 'x'),\n 'x': 1, 'y': 1}\n \"\"\"\n if not fast_functions:\n return dsk\n\n output = set(output)\n\n fast_functions = set(fast_functions)\n\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, k) for k in dsk}\n dependents = reverse_dict(dependencies)\n\n def inlinable(v):\n try:\n return functions_of(v).issubset(fast_functions)\n except TypeError:\n return False\n\n keys = [\n k\n for k, v in dsk.items()\n if istask(v) and dependents[k] and k not in output and inlinable(v)\n ]\n\n if keys:\n dsk = inline(\n dsk, keys, inline_constants=inline_constants, dependencies=dependencies\n )\n for k in keys:\n del dsk[k]\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_unwrap_partial_functions_of.return.funcs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_unwrap_partial_functions_of.return.funcs", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 343, "end_line": 374, "span_ids": ["unwrap_partial", "functions_of"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unwrap_partial(func):\n while hasattr(func, \"func\"):\n func = func.func\n return func\n\n\ndef functions_of(task):\n \"\"\"Set of functions contained within nested task\n\n Examples\n --------\n >>> task = (add, (mul, 1, 2), (inc, 3)) # doctest: +SKIP\n >>> functions_of(task) # doctest: +SKIP\n set([add, mul, inc])\n \"\"\"\n funcs = set()\n\n work = [task]\n sequence_types = {list, tuple}\n\n while work:\n new_work = []\n for task in work:\n if type(task) in sequence_types:\n if istask(task):\n funcs.add(unwrap_partial(task[0]))\n new_work.extend(task[1:])\n else:\n new_work.extend(task)\n work = new_work\n\n return funcs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_keys_renamer__default.Default_token": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_default_fused_keys_renamer__default.Default_token", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 377, "end_line": 423, "span_ids": ["Default", "Default.__repr__", "default_fused_keys_renamer", "impl"], "tokens": 420}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def default_fused_keys_renamer(keys, max_fused_key_length=120):\n \"\"\"Create new keys for ``fuse`` tasks.\n\n The optional parameter `max_fused_key_length` is used to limit the maximum string length for each renamed key.\n If this parameter is set to `None`, there is no limit.\n \"\"\"\n it = reversed(keys)\n first_key = next(it)\n typ = type(first_key)\n\n if max_fused_key_length: # Take into account size of hash suffix\n max_fused_key_length -= 5\n\n def _enforce_max_key_limit(key_name):\n if max_fused_key_length and len(key_name) > max_fused_key_length:\n name_hash = f\"{hash(key_name):x}\"[:4]\n key_name = f\"{key_name[:max_fused_key_length]}-{name_hash}\"\n return key_name\n\n if typ is str:\n first_name = utils.key_split(first_key)\n names = {utils.key_split(k) for k in it}\n names.discard(first_name)\n names = sorted(names)\n names.append(first_key)\n concatenated_name = \"-\".join(names)\n return _enforce_max_key_limit(concatenated_name)\n elif typ is tuple and len(first_key) > 0 and isinstance(first_key[0], str):\n first_name = utils.key_split(first_key)\n names = {utils.key_split(k) for k in it}\n names.discard(first_name)\n names = sorted(names)\n names.append(first_key[0])\n concatenated_name = \"-\".join(names)\n return (_enforce_max_key_limit(concatenated_name),) + first_key[1:]\n\n\n# PEP-484 compliant singleton constant\n# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions\nclass Default(Enum):\n token = 0\n\n def __repr__(self) -> str:\n return \"\"\n\n\n_default = Default.token", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.if_keys_is_not_None_and_n_fuse.children_stack_pop.children_stack_pop": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.if_keys_is_not_None_and_n_fuse.children_stack_pop.children_stack_pop", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 498, "end_line": 586, "span_ids": ["fuse"], "tokens": 859}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse(\n dsk,\n keys=None,\n dependencies=None,\n ave_width=_default,\n max_width=_default,\n max_height=_default,\n max_depth_new_edges=_default,\n rename_keys=_default,\n fuse_subgraphs=_default,\n):\n # ... other code\n\n if keys is not None and not isinstance(keys, set):\n if not isinstance(keys, list):\n keys = [keys]\n keys = set(flatten(keys))\n\n # Read defaults from dask.yaml and/or user-defined config file\n if ave_width is _default:\n ave_width = config.get(\"optimization.fuse.ave-width\")\n assert ave_width is not _default\n if max_height is _default:\n max_height = config.get(\"optimization.fuse.max-height\")\n assert max_height is not _default\n if max_depth_new_edges is _default:\n max_depth_new_edges = config.get(\"optimization.fuse.max-depth-new-edges\")\n assert max_depth_new_edges is not _default\n if max_depth_new_edges is None:\n max_depth_new_edges = ave_width * 1.5\n if max_width is _default:\n max_width = config.get(\"optimization.fuse.max-width\")\n assert max_width is not _default\n if max_width is None:\n max_width = 1.5 + ave_width * math.log(ave_width + 1)\n if fuse_subgraphs is _default:\n fuse_subgraphs = config.get(\"optimization.fuse.subgraphs\")\n assert fuse_subgraphs is not _default\n if fuse_subgraphs is None:\n fuse_subgraphs = False\n\n if not ave_width or not max_height:\n return dsk, dependencies\n\n if rename_keys is _default:\n rename_keys = config.get(\"optimization.fuse.rename-keys\")\n assert rename_keys is not _default\n if rename_keys is True:\n key_renamer = default_fused_keys_renamer\n elif rename_keys is False:\n key_renamer = None\n elif not callable(rename_keys):\n raise TypeError(\"rename_keys must be a boolean or callable\")\n else:\n key_renamer = rename_keys\n rename_keys = key_renamer is not None\n\n if dependencies is None:\n deps = {k: get_dependencies(dsk, k, as_list=True) for k in dsk}\n else:\n deps = dict(dependencies)\n\n rdeps = {}\n for k, vals in deps.items():\n for v in vals:\n if v not in rdeps:\n rdeps[v] = [k]\n else:\n rdeps[v].append(k)\n deps[k] = set(vals)\n\n reducible = {k for k, vals in rdeps.items() if len(vals) == 1}\n if keys:\n reducible -= keys\n\n for k, v in dsk.items():\n if type(v) is not tuple and not isinstance(v, (numbers.Number, str)):\n reducible.discard(k)\n\n if not reducible and (\n not fuse_subgraphs or all(len(set(v)) != 1 for v in rdeps.values())\n ):\n # Quick return if there's nothing to do. Only progress if there's tasks\n # fusible by the main `fuse`, or by `fuse_subgraphs` if enabled.\n return dsk, deps\n\n rv = dsk.copy()\n fused_trees = {}\n # These are the stacks we use to store data as we traverse the graph\n info_stack = []\n children_stack = []\n # For speed\n deps_pop = deps.pop\n reducible_add = reducible.add\n reducible_pop = reducible.pop\n reducible_remove = reducible.remove\n fused_trees_pop = fused_trees.pop\n info_stack_append = info_stack.append\n info_stack_pop = info_stack.pop\n children_stack_append = children_stack.append\n children_stack_extend = children_stack.extend\n children_stack_pop = children_stack.pop\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.while_reducible__fuse.return.rv_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse.while_reducible__fuse.return.rv_deps", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 587, "end_line": 847, "span_ids": ["fuse"], "tokens": 1721}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse(\n dsk,\n keys=None,\n dependencies=None,\n ave_width=_default,\n max_width=_default,\n max_height=_default,\n max_depth_new_edges=_default,\n rename_keys=_default,\n fuse_subgraphs=_default,\n):\n # ... other code\n while reducible:\n parent = reducible_pop()\n reducible_add(parent)\n while parent in reducible:\n # Go to the top\n parent = rdeps[parent][0]\n children_stack_append(parent)\n children_stack_extend(reducible & deps[parent])\n while True:\n child = children_stack[-1]\n if child != parent:\n children = reducible & deps[child]\n while children:\n # Depth-first search\n children_stack_extend(children)\n parent = child\n child = children_stack[-1]\n children = reducible & deps[child]\n children_stack_pop()\n # This is a leaf node in the reduction region\n # key, task, fused_keys, height, width, number of nodes, fudge, set of edges\n info_stack_append(\n (\n child,\n rv[child],\n [child] if rename_keys else None,\n 1,\n 1,\n 1,\n 0,\n deps[child] - reducible,\n )\n )\n else:\n children_stack_pop()\n # Calculate metrics and fuse as appropriate\n deps_parent = deps[parent]\n edges = deps_parent - reducible\n children = deps_parent - edges\n num_children = len(children)\n\n if num_children == 1:\n (\n child_key,\n child_task,\n child_keys,\n height,\n width,\n num_nodes,\n fudge,\n children_edges,\n ) = info_stack_pop()\n num_children_edges = len(children_edges)\n\n if fudge > num_children_edges - 1 >= 0:\n fudge = num_children_edges - 1\n edges |= children_edges\n no_new_edges = len(edges) == num_children_edges\n if not no_new_edges:\n fudge += 1\n if (\n (num_nodes + fudge) / height <= ave_width\n and\n # Sanity check; don't go too deep if new levels introduce new edge dependencies\n (no_new_edges or height < max_depth_new_edges)\n ):\n # Perform substitutions as we go\n val = subs(dsk[parent], child_key, child_task)\n deps_parent.remove(child_key)\n deps_parent |= deps_pop(child_key)\n del rv[child_key]\n reducible_remove(child_key)\n if rename_keys:\n child_keys.append(parent)\n fused_trees[parent] = child_keys\n fused_trees_pop(child_key, None)\n\n if children_stack:\n if no_new_edges:\n # Linear fuse\n info_stack_append(\n (\n parent,\n val,\n child_keys,\n height,\n width,\n num_nodes,\n fudge,\n edges,\n )\n )\n else:\n info_stack_append(\n (\n parent,\n val,\n child_keys,\n height + 1,\n width,\n num_nodes + 1,\n fudge,\n edges,\n )\n )\n else:\n rv[parent] = val\n break\n else:\n rv[child_key] = child_task\n reducible_remove(child_key)\n if children_stack:\n # Allow the parent to be fused, but only under strict circumstances.\n # Ensure that linear chains may still be fused.\n if fudge > int(ave_width - 1):\n fudge = int(ave_width - 1)\n # This task *implicitly* depends on `edges`\n info_stack_append(\n (\n parent,\n rv[parent],\n [parent] if rename_keys else None,\n 1,\n width,\n 1,\n fudge,\n edges,\n )\n )\n else:\n break\n else:\n child_keys = []\n height = 1\n width = 0\n num_single_nodes = 0\n num_nodes = 0\n fudge = 0\n children_edges = set()\n max_num_edges = 0\n children_info = info_stack[-num_children:]\n del info_stack[-num_children:]\n for (\n cur_key,\n cur_task,\n cur_keys,\n cur_height,\n cur_width,\n cur_num_nodes,\n cur_fudge,\n cur_edges,\n ) in children_info:\n if cur_height == 1:\n num_single_nodes += 1\n elif cur_height > height:\n height = cur_height\n width += cur_width\n num_nodes += cur_num_nodes\n fudge += cur_fudge\n if len(cur_edges) > max_num_edges:\n max_num_edges = len(cur_edges)\n children_edges |= cur_edges\n # Fudge factor to account for possible parallelism with the boundaries\n num_children_edges = len(children_edges)\n fudge += min(\n num_children - 1, max(0, num_children_edges - max_num_edges)\n )\n\n if fudge > num_children_edges - 1 >= 0:\n fudge = num_children_edges - 1\n edges |= children_edges\n no_new_edges = len(edges) == num_children_edges\n if not no_new_edges:\n fudge += 1\n if (\n (num_nodes + fudge) / height <= ave_width\n and num_single_nodes <= ave_width\n and width <= max_width\n and height <= max_height\n and\n # Sanity check; don't go too deep if new levels introduce new edge dependencies\n (no_new_edges or height < max_depth_new_edges)\n ):\n # Perform substitutions as we go\n val = dsk[parent]\n children_deps = set()\n for child_info in children_info:\n cur_child = child_info[0]\n val = subs(val, cur_child, child_info[1])\n del rv[cur_child]\n children_deps |= deps_pop(cur_child)\n reducible_remove(cur_child)\n if rename_keys:\n fused_trees_pop(cur_child, None)\n child_keys.extend(child_info[2])\n deps_parent -= children\n deps_parent |= children_deps\n\n if rename_keys:\n child_keys.append(parent)\n fused_trees[parent] = child_keys\n\n if children_stack:\n info_stack_append(\n (\n parent,\n val,\n child_keys,\n height + 1,\n width,\n num_nodes + 1,\n fudge,\n edges,\n )\n )\n else:\n rv[parent] = val\n break\n else:\n for child_info in children_info:\n rv[child_info[0]] = child_info[1]\n reducible_remove(child_info[0])\n if children_stack:\n # Allow the parent to be fused, but only under strict circumstances.\n # Ensure that linear chains may still be fused.\n if width > max_width:\n width = max_width\n if fudge > int(ave_width - 1):\n fudge = int(ave_width - 1)\n # key, task, height, width, number of nodes, fudge, set of edges\n # This task *implicitly* depends on `edges`\n info_stack_append(\n (\n parent,\n rv[parent],\n [parent] if rename_keys else None,\n 1,\n width,\n 1,\n fudge,\n edges,\n )\n )\n else:\n break\n # Traverse upwards\n parent = rdeps[parent][0]\n\n if fuse_subgraphs:\n _inplace_fuse_subgraphs(rv, keys, deps, fused_trees, rename_keys)\n\n if key_renamer:\n for root_key, fused_keys in fused_trees.items():\n alias = key_renamer(fused_keys)\n if alias is not None and alias not in rv:\n rv[alias] = rv[root_key]\n rv[root_key] = alias\n deps[alias] = deps[root_key]\n deps[root_key] = {alias}\n\n return rv, deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py__inplace_fuse_subgraphs__inplace_fuse_subgraphs.for_chain_in_chains_.if_rename_keys_.fused_trees_outkey_cha": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py__inplace_fuse_subgraphs__inplace_fuse_subgraphs.for_chain_in_chains_.if_rename_keys_.fused_trees_outkey_cha", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 850, "end_line": 918, "span_ids": ["_inplace_fuse_subgraphs"], "tokens": 541}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _inplace_fuse_subgraphs(dsk, keys, dependencies, fused_trees, rename_keys):\n \"\"\"Subroutine of fuse.\n\n Mutates dsk, depenencies, and fused_trees inplace\"\"\"\n # locate all members of linear chains\n child2parent = {}\n unfusible = set()\n for parent in dsk:\n deps = dependencies[parent]\n has_many_children = len(deps) > 1\n for child in deps:\n if keys is not None and child in keys:\n unfusible.add(child)\n elif child in child2parent:\n del child2parent[child]\n unfusible.add(child)\n elif has_many_children:\n unfusible.add(child)\n elif child not in unfusible:\n child2parent[child] = parent\n\n # construct the chains from ancestor to descendant\n chains = []\n parent2child = {v: k for k, v in child2parent.items()}\n while child2parent:\n child, parent = child2parent.popitem()\n chain = [child, parent]\n while parent in child2parent:\n parent = child2parent.pop(parent)\n del parent2child[parent]\n chain.append(parent)\n chain.reverse()\n while child in parent2child:\n child = parent2child.pop(child)\n del child2parent[child]\n chain.append(child)\n # Skip chains with < 2 executable tasks\n ntasks = 0\n for key in chain:\n ntasks += istask(dsk[key])\n if ntasks > 1:\n chains.append(chain)\n break\n\n # Mutate dsk fusing chains into subgraphs\n for chain in chains:\n subgraph = {k: dsk[k] for k in chain}\n outkey = chain[0]\n\n # Update dependencies and graph\n inkeys_set = dependencies[outkey] = dependencies[chain[-1]]\n for k in chain[1:]:\n del dependencies[k]\n del dsk[k]\n\n # Create new task\n inkeys = tuple(inkeys_set)\n dsk[outkey] = (SubgraphCallable(subgraph, outkey, inkeys),) + inkeys\n\n # Mutate `fused_trees` if key renaming is needed (renaming done in fuse)\n if rename_keys:\n chain2 = []\n for k in chain:\n subchain = fused_trees.pop(k, False)\n if subchain:\n chain2.extend(subchain)\n else:\n chain2.append(k)\n fused_trees[outkey] = chain2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_SubgraphCallable_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_SubgraphCallable_", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 927, "end_line": 976, "span_ids": ["SubgraphCallable.__hash__", "SubgraphCallable.__repr__", "SubgraphCallable.__eq__", "SubgraphCallable.__reduce__", "SubgraphCallable.__ne__", "SubgraphCallable.__call__", "SubgraphCallable", "SubgraphCallable.__init__"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SubgraphCallable:\n \"\"\"Create a callable object from a dask graph.\n\n Parameters\n ----------\n dsk : dict\n A dask graph\n outkey : hashable\n The output key from the graph\n inkeys : list\n A list of keys to be used as arguments to the callable.\n name : str, optional\n The name to use for the function.\n \"\"\"\n\n __slots__ = (\"dsk\", \"outkey\", \"inkeys\", \"name\")\n\n def __init__(self, dsk, outkey, inkeys, name=None):\n self.dsk = dsk\n self.outkey = outkey\n self.inkeys = inkeys\n if name is None:\n name = f\"subgraph_callable-{uuid.uuid4()}\"\n self.name = name\n\n def __repr__(self):\n return self.name\n\n def __eq__(self, other):\n return (\n type(self) is type(other)\n and self.name == other.name\n and self.outkey == other.outkey\n and set(self.inkeys) == set(other.inkeys)\n )\n\n def __ne__(self, other):\n return not (self == other)\n\n def __call__(self, *args):\n if not len(args) == len(self.inkeys):\n raise ValueError(\"Expected %d args, got %d\" % (len(self.inkeys), len(args)))\n return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))\n\n def __reduce__(self):\n return (SubgraphCallable, (self.dsk, self.outkey, self.inkeys, self.name))\n\n def __hash__(self):\n return hash(tuple((self.outkey, frozenset(self.inkeys), self.name)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_r_Static_order_of_node_add": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_r_Static_order_of_node_add", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 82, "span_ids": ["imports", "docstring"], "tokens": 700}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "r\"\"\" Static order of nodes in dask graph\n\nDask makes decisions on what tasks to prioritize both\n\n* Dynamically at runtime\n* Statically before runtime\n\nDynamically we prefer to run tasks that were just made available. However when\nseveral tasks become available at the same time we have an opportunity to break\nties in an intelligent way\n\n d\n |\n b c\n \\ /\n a\n\nFor example after we finish ``a`` we can choose to run either ``b`` or ``c``\nnext. Making small decisions like this can greatly affect our performance,\nespecially because the order in which we run tasks affects the order in which\nwe can release memory, which operationally we find to have a large affect on\nmany computation. We want to run tasks in such a way that we keep only a small\namount of data in memory at any given time.\n\n\nStatic Ordering\n---------------\n\nAnd so we create a total ordering over all nodes to serve as a tie breaker. We\nrepresent this ordering with a dictionary mapping keys to integer values.\nLower scores have higher priority. These scores correspond to the order in\nwhich a sequential scheduler would visit each node.\n\n {'a': 0,\n 'c': 1,\n 'd': 2,\n 'b': 3}\n\nThere are several ways in which we might order our keys. This is a nuanced\nprocess that has to take into account many different kinds of workflows, and\noperate efficiently in linear time. We strongly recommend that readers look at\nthe docstrings of tests in dask/tests/test_order.py. These tests usually have\ngraph types laid out very carefully to show the kinds of situations that often\narise, and the order we would like to be determined.\n\n\nPolicy\n------\n\nWork towards *small goals* with *big steps*.\n\n1. **Small goals**: prefer tasks that have few total dependents and whose final\n dependents have few total dependencies.\n\n We prefer to prioritize those tasks that help branches of computation that\n can terminate quickly.\n\n With more detail, we compute the total number of dependencies that each\n task depends on (both its own dependencies, and the dependencies of its\n dependencies, and so on), and then we choose those tasks that drive towards\n results with a low number of total dependencies. We choose to prioritize\n tasks that work towards finishing shorter computations first.\n\n2. **Big steps**: prefer tasks with many dependents\n\n However, many tasks work towards the same final dependents. Among those,\n we choose those tasks with the most work left to do. We want to finish\n the larger portions of a sub-computation before we start on the smaller\n ones.\n\n3. **Name comparison**: break ties with key name\n\n Often graphs are made with regular keynames. When no other structural\n difference exists between two keys, use the key name to break ties.\n This relies on the regularity of graph constructors like dask.array to be a\n good proxy for ordering. This is usually a good idea and a sane default.\n\"\"\"\nfrom collections import defaultdict, namedtuple\nfrom math import log\n\nfrom .core import get_dependencies, get_deps, getcycle, reverse_dict # noqa: F401\nfrom .utils_test import add, inc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics_graph_metrics.for_key_deps_in_dependen.if_not_deps_.for_child_in_dependencies.if_not_num_needed_child_.current_append_child_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics_graph_metrics.for_key_deps_in_dependen.if_not_deps_.for_child_in_dependencies.if_not_num_needed_child_.current_append_child_", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 512, "end_line": 598, "span_ids": ["graph_metrics"], "tokens": 688}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def graph_metrics(dependencies, dependents, total_dependencies):\n r\"\"\"Useful measures of a graph used by ``dask.order.order``\n\n Example DAG (a1 has no dependencies; b2 and c1 are root nodes):\n\n c1\n |\n b1 b2\n \\ /\n a1\n\n For each key we return:\n\n 1. **total_dependents**: The number of keys that can only be run\n after this key is run. The root nodes have value 1 while deep child\n nodes will have larger values.\n\n 1\n |\n 2 1\n \\ /\n 4\n\n 2. **min_dependencies**: The minimum value of the total number of\n dependencies of all final dependents (see module-level comment for more).\n In other words, the minimum of ``ndependencies`` of root\n nodes connected to the current node.\n\n 3\n |\n 3 2\n \\ /\n 2\n\n 3. **max_dependencies**: The maximum value of the total number of\n dependencies of all final dependents (see module-level comment for more).\n In other words, the maximum of ``ndependencies`` of root\n nodes connected to the current node.\n\n 3\n |\n 3 2\n \\ /\n 3\n\n 4. **min_height**: The minimum height from a root node\n\n 0\n |\n 1 0\n \\ /\n 1\n\n 5. **max_height**: The maximum height from a root node\n\n 0\n |\n 1 0\n \\ /\n 2\n\n Examples\n --------\n >>> dsk = {'a1': 1, 'b1': (inc, 'a1'), 'b2': (inc, 'a1'), 'c1': (inc, 'b1')}\n >>> dependencies, dependents = get_deps(dsk)\n >>> _, total_dependencies = ndependencies(dependencies, dependents)\n >>> metrics = graph_metrics(dependencies, dependents, total_dependencies)\n >>> sorted(metrics.items())\n [('a1', (4, 2, 3, 1, 2)), ('b1', (2, 3, 3, 1, 1)), ('b2', (1, 2, 2, 0, 0)), ('c1', (1, 3, 3, 0, 0))]\n\n Returns\n -------\n metrics: Dict[key, Tuple[int, int, int, int, int]]\n \"\"\"\n result = {}\n num_needed = {k: len(v) for k, v in dependents.items() if v}\n current = []\n current_pop = current.pop\n current_append = current.append\n for key, deps in dependents.items():\n if not deps:\n val = total_dependencies[key]\n result[key] = (1, val, val, 0, 0)\n for child in dependencies[key]:\n num_needed[child] -= 1\n if not num_needed[child]:\n current_append(child)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics.while_current__graph_metrics.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_graph_metrics.while_current__graph_metrics.return.result", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 581, "end_line": 619, "span_ids": ["graph_metrics"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def graph_metrics(dependencies, dependents, total_dependencies):\n # ... other code\n\n while current:\n key = current_pop()\n parents = dependents[key]\n if len(parents) == 1:\n (parent,) = parents\n (\n total_dependents,\n min_dependencies,\n max_dependencies,\n min_heights,\n max_heights,\n ) = result[parent]\n result[key] = (\n 1 + total_dependents,\n min_dependencies,\n max_dependencies,\n 1 + min_heights,\n 1 + max_heights,\n )\n else:\n (\n total_dependents,\n min_dependencies,\n max_dependencies,\n min_heights,\n max_heights,\n ) = zip(*(result[parent] for parent in dependents[key]))\n result[key] = (\n 1 + sum(total_dependents),\n min(min_dependencies),\n max(max_dependencies),\n 1 + min(min_heights),\n 1 + max(max_heights),\n )\n for child in dependencies[key]:\n num_needed[child] -= 1\n if not num_needed[child]:\n current_append(child)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_ndependencies_ndependencies.return.num_dependencies_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_ndependencies_ndependencies.return.num_dependencies_result", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 622, "end_line": 665, "span_ids": ["ndependencies"], "tokens": 324}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ndependencies(dependencies, dependents):\n \"\"\"Number of total data elements on which this key depends\n\n For each key we return the number of tasks that must be run for us to run\n this task.\n\n Examples\n --------\n >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}\n >>> dependencies, dependents = get_deps(dsk)\n >>> num_dependencies, total_dependencies = ndependencies(dependencies, dependents)\n >>> sorted(total_dependencies.items())\n [('a', 1), ('b', 2), ('c', 3)]\n\n Returns\n -------\n num_dependencies: Dict[key, int]\n total_dependencies: Dict[key, int]\n \"\"\"\n num_needed = {}\n result = {}\n for k, v in dependencies.items():\n num_needed[k] = len(v)\n if not v:\n result[k] = 1\n\n num_dependencies = num_needed.copy()\n current = []\n current_pop = current.pop\n current_append = current.append\n\n for key in result:\n for parent in dependents[key]:\n num_needed[parent] -= 1\n if not num_needed[parent]:\n current_append(parent)\n while current:\n key = current_pop()\n result[key] = 1 + sum(result[child] for child in dependencies[key])\n for parent in dependents[key]:\n num_needed[parent] -= 1\n if not num_needed[parent]:\n current_append(parent)\n return num_dependencies, result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_from_collections_import_d_Traverser.skip.self.term.self__stack_pop_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_from_collections_import_d_Traverser.skip.self.term.self__stack_pop_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 85, "span_ids": ["Traverser.__init__", "Traverser.current", "imports", "Traverser.next", "Traverser.copy", "Traverser", "head", "Traverser.__iter__", "args", "Traverser.skip"], "tokens": 433}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from collections import deque\n\nfrom dask.core import istask, subs\n\n\ndef head(task):\n \"\"\"Return the top level node of a task\"\"\"\n\n if istask(task):\n return task[0]\n elif isinstance(task, list):\n return list\n else:\n return task\n\n\ndef args(task):\n \"\"\"Get the arguments for the current task\"\"\"\n\n if istask(task):\n return task[1:]\n elif isinstance(task, list):\n return task\n else:\n return ()\n\n\nclass Traverser:\n \"\"\"Traverser interface for tasks.\n\n Class for storing the state while performing a preorder-traversal of a\n task.\n\n Parameters\n ----------\n term : task\n The task to be traversed\n\n Attributes\n ----------\n term\n The current element in the traversal\n current\n The head of the current element in the traversal. This is simply `head`\n applied to the attribute `term`.\n \"\"\"\n\n def __init__(self, term, stack=None):\n self.term = term\n if not stack:\n self._stack = deque([END])\n else:\n self._stack = stack\n\n def __iter__(self):\n while self.current is not END:\n yield self.current\n self.next()\n\n def copy(self):\n \"\"\"Copy the traverser in its current state.\n\n This allows the traversal to be pushed onto a stack, for easy\n backtracking.\"\"\"\n\n return Traverser(self.term, deque(self._stack))\n\n def next(self):\n \"\"\"Proceed to the next term in the preorder traversal.\"\"\"\n\n subterms = args(self.term)\n if not subterms:\n # No subterms, pop off stack\n self.term = self._stack.pop()\n else:\n self.term = subterms[0]\n self._stack.extend(reversed(subterms[1:]))\n\n @property\n def current(self):\n return head(self.term)\n\n def skip(self):\n \"\"\"Skip over all subterms of the current level in the traversal\"\"\"\n self.term = self._stack.pop()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Token_END.Token_end_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Token_END.Token_end_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 88, "end_line": 104, "span_ids": ["Token", "Token.__init__", "Token.__repr__", "impl"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Token:\n \"\"\"A token object.\n\n Used to express certain objects in the traversal of a task or pattern.\"\"\"\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return self.name\n\n\n# A variable to represent *all* variables in a discrimination net\nVAR = Token(\"?\")\n# Represents the end of the traversal of an expression. We can't use `None`,\n# 'False', etc... here, as anything may be an argument to a function.\nEND = Token(\"end\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Node_Node.patterns.return.self_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_Node_Node.patterns.return.self_1_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 107, "end_line": 125, "span_ids": ["Node.patterns", "Node.__new__", "Node", "Node.edges"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Node(tuple):\n \"\"\"A Discrimination Net node.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls, edges=None, patterns=None):\n edges = edges if edges else {}\n patterns = patterns if patterns else []\n return tuple.__new__(cls, (edges, patterns))\n\n @property\n def edges(self):\n \"\"\"A dictionary, where the keys are edges, and the values are nodes\"\"\"\n return self[0]\n\n @property\n def patterns(self):\n \"\"\"A list of all patterns that currently match at this node\"\"\"\n return self[1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RewriteRule_RewriteRule.__repr__.return.str_self_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RewriteRule_RewriteRule.__repr__.return.str_self_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 197, "span_ids": ["RewriteRule.__repr__", "RewriteRule.__str__", "RewriteRule._apply", "RewriteRule", "RewriteRule.__init__"], "tokens": 611}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RewriteRule:\n \"\"\"A rewrite rule.\n\n Expresses `lhs` -> `rhs`, for variables `vars`.\n\n Parameters\n ----------\n lhs : task\n The left-hand-side of the rewrite rule.\n rhs : task or function\n The right-hand-side of the rewrite rule. If it's a task, variables in\n `rhs` will be replaced by terms in the subject that match the variables\n in `lhs`. If it's a function, the function will be called with a dict\n of such matches.\n vars: tuple, optional\n Tuple of variables found in the lhs. Variables can be represented as\n any hashable object; a good convention is to use strings. If there are\n no variables, this can be omitted.\n\n Examples\n --------\n Here's a `RewriteRule` to replace all nested calls to `list`, so that\n `(list, (list, 'x'))` is replaced with `(list, 'x')`, where `'x'` is a\n variable.\n\n >>> import dask.rewrite as dr\n >>> lhs = (list, (list, 'x'))\n >>> rhs = (list, 'x')\n >>> variables = ('x',)\n >>> rule = dr.RewriteRule(lhs, rhs, variables)\n\n Here's a more complicated rule that uses a callable right-hand-side. A\n callable `rhs` takes in a dictionary mapping variables to their matching\n values. This rule replaces all occurrences of `(list, 'x')` with `'x'` if\n `'x'` is a list itself.\n\n >>> lhs = (list, 'x')\n >>> def repl_list(sd):\n ... x = sd['x']\n ... if isinstance(x, list):\n ... return x\n ... else:\n ... return (list, x)\n >>> rule = dr.RewriteRule(lhs, repl_list, variables)\n \"\"\"\n\n def __init__(self, lhs, rhs, vars=()):\n if not isinstance(vars, tuple):\n raise TypeError(\"vars must be a tuple of variables\")\n self.lhs = lhs\n if callable(rhs):\n self.subs = rhs\n else:\n self.subs = self._apply\n self.rhs = rhs\n self._varlist = [t for t in Traverser(lhs) if t in vars]\n # Reduce vars down to just variables found in lhs\n self.vars = tuple(sorted(set(self._varlist)))\n\n def _apply(self, sub_dict):\n term = self.rhs\n for key, val in sub_dict.items():\n term = subs(term, key, val)\n return term\n\n def __str__(self):\n return f\"RewriteRule({self.lhs}, {self.rhs}, {self.vars})\"\n\n def __repr__(self):\n return str(self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet_RuleSet.__init__.for_p_in_rules_.self_add_p_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet_RuleSet.__init__.for_p_in_rules_.self_add_p_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 200, "end_line": 252, "span_ids": ["RuleSet.__init__", "RuleSet"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RuleSet:\n \"\"\"A set of rewrite rules.\n\n Forms a structure for fast rewriting over a set of rewrite rules. This\n allows for syntactic matching of terms to patterns for many patterns at\n the same time.\n\n Examples\n --------\n\n >>> import dask.rewrite as dr\n >>> def f(*args): pass\n >>> def g(*args): pass\n >>> def h(*args): pass\n >>> from operator import add\n\n >>> rs = dr.RuleSet(\n ... dr.RewriteRule((add, 'x', 0), 'x', ('x',)),\n ... dr.RewriteRule((f, (g, 'x'), 'y'),\n ... (h, 'x', 'y'),\n ... ('x', 'y')))\n\n >>> rs.rewrite((add, 2, 0))\n 2\n\n >>> rs.rewrite((f, (g, 'a', 3))) # doctest: +ELLIPSIS\n (, 'a', 3)\n\n >>> dsk = {'a': (add, 2, 0),\n ... 'b': (f, (g, 'a', 3))}\n\n >>> from toolz import valmap\n >>> valmap(rs.rewrite, dsk) # doctest: +ELLIPSIS\n {'a': 2, 'b': (, 'a', 3)}\n\n Attributes\n ----------\n rules : list\n A list of `RewriteRule`s included in the `RuleSet`.\n \"\"\"\n\n def __init__(self, *rules):\n \"\"\"Create a `RuleSet` for a number of rules\n\n Parameters\n ----------\n rules\n One or more instances of RewriteRule\n \"\"\"\n self._net = Node()\n self.rules = []\n for p in rules:\n self.add(p)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.add_RuleSet.add.self_rules_append_rule_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.add_RuleSet.add.self_rules_append_rule_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 255, "end_line": 280, "span_ids": ["RuleSet.add"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RuleSet:\n\n def add(self, rule):\n \"\"\"Add a rule to the RuleSet.\n\n Parameters\n ----------\n rule : RewriteRule\n \"\"\"\n\n if not isinstance(rule, RewriteRule):\n raise TypeError(\"rule must be instance of RewriteRule\")\n vars = rule.vars\n curr_node = self._net\n ind = len(self.rules)\n # List of variables, in order they appear in the POT of the term\n for t in Traverser(rule.lhs):\n prev_node = curr_node\n if t in vars:\n t = VAR\n if t in curr_node.edges:\n curr_node = curr_node.edges[t]\n else:\n curr_node.edges[t] = Node()\n curr_node = curr_node.edges[t]\n # We've reached a leaf node. Add the term index to this leaf.\n prev_node.edges[t].patterns.append(ind)\n self.rules.append(rule)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.iter_matches_RuleSet._rewrite.return.term": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.iter_matches_RuleSet._rewrite.return.term", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 282, "end_line": 312, "span_ids": ["RuleSet._rewrite", "RuleSet.iter_matches"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RuleSet:\n\n def iter_matches(self, term):\n \"\"\"A generator that lazily finds matchings for term from the RuleSet.\n\n Parameters\n ----------\n term : task\n\n Yields\n ------\n Tuples of `(rule, subs)`, where `rule` is the rewrite rule being\n matched, and `subs` is a dictionary mapping the variables in the lhs\n of the rule to their matching values in the term.\"\"\"\n\n S = Traverser(term)\n for m, syms in _match(S, self._net):\n for i in m:\n rule = self.rules[i]\n subs = _process_match(rule, syms)\n if subs is not None:\n yield rule, subs\n\n def _rewrite(self, term):\n \"\"\"Apply the rewrite rules in RuleSet to top level of term\"\"\"\n\n for rule, sd in self.iter_matches(term):\n # We use for (...) because it's fast in all cases for getting the\n # first element from the match iterator. As we only want that\n # element, we break here\n term = rule.subs(sd)\n break\n return term", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.rewrite_RuleSet.rewrite.return.strategies_strategy_self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py_RuleSet.rewrite_RuleSet.rewrite.return.strategies_strategy_self", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 314, "end_line": 355, "span_ids": ["RuleSet.rewrite"], "tokens": 384}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RuleSet:\n\n def rewrite(self, task, strategy=\"bottom_up\"):\n \"\"\"Apply the `RuleSet` to `task`.\n\n This applies the most specific matching rule in the RuleSet to the\n task, using the provided strategy.\n\n Parameters\n ----------\n term: a task\n The task to be rewritten\n strategy: str, optional\n The rewriting strategy to use. Options are \"bottom_up\" (default),\n or \"top_level\".\n\n Examples\n --------\n Suppose there was a function `add` that returned the sum of 2 numbers,\n and another function `double` that returned twice its input:\n\n >>> add = lambda x, y: x + y\n >>> double = lambda x: 2*x\n\n Now suppose `double` was *significantly* faster than `add`, so\n you'd like to replace all expressions `(add, x, x)` with `(double,\n x)`, where `x` is a variable. This can be expressed as a rewrite rule:\n\n >>> rule = RewriteRule((add, 'x', 'x'), (double, 'x'), ('x',))\n >>> rs = RuleSet(rule)\n\n This can then be applied to terms to perform the rewriting:\n\n >>> term = (add, (add, 2, 2), (add, 2, 2))\n >>> rs.rewrite(term) # doctest: +SKIP\n (double, (double, 2))\n\n If we only wanted to apply this to the top level of the term, the\n `strategy` kwarg can be set to \"top_level\".\n\n >>> rs.rewrite(term) # doctest: +SKIP\n (double, (add, 2, 2))\n \"\"\"\n return strategies[strategy](self, task)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__top_level__match.while_True_.None_1.except_Exception_.return": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__top_level__match.while_True_.None_1.except_Exception_.return", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 356, "end_line": 406, "span_ids": ["_match", "_top_level", "_bottom_up", "impl:5"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _top_level(net, term):\n return net._rewrite(term)\n\n\ndef _bottom_up(net, term):\n if istask(term):\n term = (head(term),) + tuple(_bottom_up(net, t) for t in args(term))\n elif isinstance(term, list):\n term = [_bottom_up(net, t) for t in args(term)]\n return net._rewrite(term)\n\n\nstrategies = {\"top_level\": _top_level, \"bottom_up\": _bottom_up}\n\n\ndef _match(S, N):\n \"\"\"Structural matching of term S to discrimination net node N.\"\"\"\n\n stack = deque()\n restore_state_flag = False\n # matches are stored in a tuple, because all mutations result in a copy,\n # preventing operations from changing matches stored on the stack.\n matches = ()\n while True:\n if S.current is END:\n yield N.patterns, matches\n try:\n # This try-except block is to catch hashing errors from un-hashable\n # types. This allows for variables to be matched with un-hashable\n # objects.\n n = N.edges.get(S.current, None)\n if n and not restore_state_flag:\n stack.append((S.copy(), N, matches))\n N = n\n S.next()\n continue\n except TypeError:\n pass\n n = N.edges.get(VAR, None)\n if n:\n restore_state_flag = False\n matches = matches + (S.term,)\n S.skip()\n N = n\n continue\n try:\n # Backtrack here\n (S, N, matches) = stack.pop()\n restore_state_flag = True\n except Exception:\n return", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__process_match_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/rewrite.py__process_match_", "embedding": null, "metadata": {"file_path": "dask/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 409, "end_line": 435, "span_ids": ["_process_match"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _process_match(rule, syms):\n \"\"\"Process a match to determine if it is correct, and to find the correct\n substitution that will convert the term into the pattern.\n\n Parameters\n ----------\n rule : RewriteRule\n syms : iterable\n Iterable of subterms that match a corresponding variable.\n\n Returns\n -------\n A dictionary of {vars : subterms} describing the substitution to make the\n pattern equivalent with the term. Returns `None` if the match is\n invalid.\"\"\"\n\n subs = {}\n varlist = rule._varlist\n if not len(varlist) == len(syms):\n raise RuntimeError(\"length of varlist doesn't match length of syms.\")\n for v, s in zip(varlist, syms):\n if v in subs and subs[v] != s:\n return None\n else:\n subs[v] = s\n return subs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_pandas_register_pandas.sizeof_pandas_multiindex.return.int_p_1000": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_pandas_register_pandas.sizeof_pandas_multiindex.return.int_p_1000", "embedding": null, "metadata": {"file_path": "dask/sizeof.py", "file_name": "sizeof.py", "file_type": "text/x-python", "category": "implementation", "start_line": 136, "end_line": 178, "span_ids": ["register_pandas"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@sizeof.register_lazy(\"pandas\")\ndef register_pandas():\n import numpy as np\n import pandas as pd\n\n def object_size(x):\n if not len(x):\n return 0\n sample = np.random.choice(x, size=20, replace=True)\n sample = list(map(sizeof, sample))\n return sum(sample) / 20 * len(x)\n\n @sizeof.register(pd.DataFrame)\n def sizeof_pandas_dataframe(df):\n p = sizeof(df.index)\n for name, col in df.items():\n p += col.memory_usage(index=False)\n if col.dtype == object:\n p += object_size(col._values)\n return int(p) + 1000\n\n @sizeof.register(pd.Series)\n def sizeof_pandas_series(s):\n p = int(s.memory_usage(index=True))\n if s.dtype == object:\n p += object_size(s._values)\n if s.index.dtype == object:\n p += object_size(s.index)\n return int(p) + 1000\n\n @sizeof.register(pd.Index)\n def sizeof_pandas_index(i):\n p = int(i.memory_usage())\n if i.dtype == object:\n p += object_size(i)\n return int(p) + 1000\n\n @sizeof.register(pd.MultiIndex)\n def sizeof_pandas_multiindex(i):\n p = int(sum(object_size(l) for l in i.levels))\n for c in i.codes if hasattr(i, \"codes\") else i.labels:\n p += c.nbytes\n return int(p) + 1000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_spmatrix_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_register_spmatrix_", "embedding": null, "metadata": {"file_path": "dask/sizeof.py", "file_name": "sizeof.py", "file_type": "text/x-python", "category": "implementation", "start_line": 181, "end_line": 218, "span_ids": ["register_spmatrix", "register_pyarrow"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@sizeof.register_lazy(\"scipy\")\ndef register_spmatrix():\n from scipy import sparse\n\n @sizeof.register(sparse.dok_matrix)\n def sizeof_spmatrix_dok(s):\n return s.__sizeof__()\n\n @sizeof.register(sparse.spmatrix)\n def sizeof_spmatrix(s):\n return sum(sizeof(v) for v in s.__dict__.values())\n\n\n@sizeof.register_lazy(\"pyarrow\")\ndef register_pyarrow():\n import pyarrow as pa\n\n def _get_col_size(data):\n p = 0\n if not isinstance(data, pa.ChunkedArray):\n data = data.data # pyarrow <0.15.0\n for chunk in data.iterchunks():\n for buffer in chunk.buffers():\n if buffer:\n p += buffer.size\n return p\n\n @sizeof.register(pa.Table)\n def sizeof_pyarrow_table(table):\n p = sizeof(table.schema.metadata)\n for col in table.itercolumns():\n p += _get_col_size(col)\n return int(p) + 1000\n\n @sizeof.register(pa.ChunkedArray)\n def sizeof_pyarrow_chunked_array(data):\n return int(_get_col_size(data)) + 1000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/system.py_math_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/system.py_math_", "embedding": null, "metadata": {"file_path": "dask/system.py", "file_name": "system.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 54, "span_ids": ["imports", "cpu_count", "impl:7"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import math\nimport os\nimport sys\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\n__all__ = (\"cpu_count\", \"CPU_COUNT\")\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n\n Takes the minimum value from the following locations:\n\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n if psutil is not None:\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n # The directory name isn't standardized across linux distros, check both\n for dirname in [\"cpuacct,cpu\", \"cpu,cpuacct\"]:\n try:\n with open(\"/sys/fs/cgroup/%s/cpu.cfs_quota_us\" % dirname) as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/%s/cpu.cfs_period_us\" % dirname) as f:\n period = int(f.read())\n # We round up on fractional CPUs\n cgroups_count = math.ceil(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n break\n except Exception:\n pass\n\n return count\n\n\nCPU_COUNT = cpu_count()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_function_test_normalize_function.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_function_test_normalize_function.None_10", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 59, "end_line": 77, "span_ids": ["test_normalize_function"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_normalize_function():\n assert normalize_function(f2)\n\n assert normalize_function(lambda a: a)\n\n assert normalize_function(partial(f2, b=2)) == normalize_function(partial(f2, b=2))\n\n assert normalize_function(partial(f2, b=2)) != normalize_function(partial(f2, b=3))\n\n assert normalize_function(partial(f1, b=2)) != normalize_function(partial(f2, b=2))\n\n assert normalize_function(compose(f2, f3)) == normalize_function(compose(f2, f3))\n\n assert normalize_function(compose(f2, f3)) != normalize_function(compose(f2, f1))\n\n assert normalize_function(curry(f2)) == normalize_function(curry(f2))\n assert normalize_function(curry(f2)) != normalize_function(curry(f1))\n assert normalize_function(curry(f2, b=1)) == normalize_function(curry(f2, b=1))\n assert normalize_function(curry(f2, b=1)) != normalize_function(curry(f2, b=2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_test_tokenize_numpy_datetime.tokenize_np_array_2000_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_test_tokenize_numpy_datetime.tokenize_np_array_2000_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 98, "end_line": 122, "span_ids": ["test_tokenize_numpy_datetime", "test_tokenize", "test_tokenize_numpy_array_consistent_on_values", "test_tokenize_numpy_array_supports_uneven_sizes", "test_tokenize_discontiguous_numpy_array"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize():\n a = (1, 2, 3)\n assert isinstance(tokenize(a), (str, bytes))\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_array_consistent_on_values():\n assert tokenize(np.random.RandomState(1234).random_sample(1000)) == tokenize(\n np.random.RandomState(1234).random_sample(1000)\n )\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_array_supports_uneven_sizes():\n tokenize(np.random.random(7).astype(dtype=\"i2\"))\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_discontiguous_numpy_array():\n tokenize(np.random.random(8)[::2])\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_datetime():\n tokenize(np.array([\"2000-01-01T12:00:00\"], dtype=\"M8[ns]\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_scalar_test_tokenize_numpy_scalar_string_rep.try_.finally_.np_set_string_function_No": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_scalar_test_tokenize_numpy_scalar_string_rep.try_.finally_.np_set_string_function_No", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 125, "end_line": 141, "span_ids": ["test_tokenize_numpy_scalar_string_rep", "test_tokenize_numpy_scalar"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_scalar():\n assert tokenize(np.array(1.0, dtype=\"f8\")) == tokenize(np.array(1.0, dtype=\"f8\"))\n assert tokenize(\n np.array([(1, 2)], dtype=[(\"a\", \"i4\"), (\"b\", \"i8\")])[0]\n ) == tokenize(np.array([(1, 2)], dtype=[(\"a\", \"i4\"), (\"b\", \"i8\")])[0])\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_scalar_string_rep():\n # Test tokenizing numpy scalars doesn't depend on their string representation\n try:\n np.set_string_function(lambda x: \"foo\")\n assert tokenize(np.array(1)) != tokenize(np.array(2))\n finally:\n # Reset back to default\n np.set_string_function(None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_offset_test_tokenize_numpy_memmap_offset.with_open_fn_rb_as_f_.assert_tokenize_sub1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_offset_test_tokenize_numpy_memmap_offset.with_open_fn_rb_as_f_.assert_tokenize_sub1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 173, "span_ids": ["test_tokenize_numpy_memmap_offset"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_memmap_offset(tmpdir):\n # Test two different memmaps into the same numpy file\n fn = str(tmpdir.join(\"demo_data\"))\n\n with open(fn, \"wb\") as f:\n f.write(b\"ashekwicht\")\n\n with open(fn, \"rb\") as f:\n mmap1 = np.memmap(f, dtype=np.uint8, mode=\"r\", offset=0, shape=5)\n mmap2 = np.memmap(f, dtype=np.uint8, mode=\"r\", offset=5, shape=5)\n\n assert tokenize(mmap1) != tokenize(mmap2)\n # also make sure that they tokenize correctly when taking sub-arrays\n sub1 = mmap1[1:-1]\n sub2 = mmap2[1:-1]\n assert tokenize(sub1) != tokenize(sub2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_test_tokenize_numpy_memmap.None_2.assert_tokenize_mm_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_test_tokenize_numpy_memmap.None_2.assert_tokenize_mm_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 172, "end_line": 197, "span_ids": ["test_tokenize_numpy_memmap"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_memmap():\n with tmpfile(\".npy\") as fn:\n x = np.arange(5)\n np.save(fn, x)\n y = tokenize(np.load(fn, mmap_mode=\"r\"))\n\n with tmpfile(\".npy\") as fn:\n x = np.arange(5)\n np.save(fn, x)\n z = tokenize(np.load(fn, mmap_mode=\"r\"))\n\n assert y != z\n\n with tmpfile(\".npy\") as fn:\n x = np.random.normal(size=(10, 10))\n np.save(fn, x)\n mm = np.load(fn, mmap_mode=\"r\")\n mm2 = np.load(fn, mmap_mode=\"r\")\n a = tokenize(mm[0, :])\n b = tokenize(mm[1, :])\n c = tokenize(mm[0:3, :])\n d = tokenize(mm[:, 0])\n assert len({a, b, c, d}) == 4\n assert tokenize(mm) == tokenize(mm2)\n assert tokenize(mm[1, :]) == tokenize(mm2[1, :])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_no_filename_test_tokenize_numpy_ufunc_consistent.assert_tokenize_inc_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_memmap_no_filename_test_tokenize_numpy_ufunc_consistent.assert_tokenize_inc_t", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 204, "end_line": 225, "span_ids": ["test_tokenize_numpy_ufunc_consistent", "test_tokenize_numpy_memmap_no_filename"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_memmap_no_filename():\n # GH 1562:\n with tmpfile(\".npy\") as fn1, tmpfile(\".npy\") as fn2:\n x = np.arange(5)\n np.save(fn1, x)\n np.save(fn2, x)\n\n a = np.load(fn1, mmap_mode=\"r\")\n b = a + a\n assert tokenize(b) == tokenize(b)\n\n\n@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_ufunc_consistent():\n assert tokenize(np.sin) == \"02106e2c67daf452fb480d264e0dac21\"\n assert tokenize(np.cos) == \"c99e52e912e4379882a9a4b387957a0b\"\n\n # Make a ufunc that isn't in the numpy namespace. Similar to\n # any found in other packages.\n inc = np.frompyfunc(lambda x: x + 1, 1, 1)\n assert tokenize(inc) == tokenize(inc)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_test_tokenize_pandas.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_test_tokenize_pandas.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 244, "end_line": 258, "span_ids": ["test_tokenize_pandas"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas():\n a = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [\"4\", \"asd\", None]}, index=[1, 2, 3])\n b = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [\"4\", \"asd\", None]}, index=[1, 2, 3])\n\n assert tokenize(a) == tokenize(b)\n b.index.name = \"foo\"\n assert tokenize(a) != tokenize(b)\n\n a = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [\"a\", \"b\", \"a\"]})\n b = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [\"a\", \"b\", \"a\"]})\n a[\"z\"] = a.y.astype(\"category\")\n assert tokenize(a) != tokenize(b)\n b[\"z\"] = a.y.astype(\"category\")\n assert tokenize(a) == tokenize(b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_invalid_unicode_test_tokenize_pandas_no_pickle.tokenize_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_invalid_unicode_test_tokenize_pandas_no_pickle.tokenize_df_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 274, "end_line": 299, "span_ids": ["test_tokenize_pandas_no_pickle", "test_tokenize_pandas_mixed_unicode_bytes", "test_tokenize_pandas_invalid_unicode"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas_invalid_unicode():\n # see https://github.com/dask/dask/issues/2713\n df = pd.DataFrame(\n {\"x\\ud83d\": [1, 2, 3], \"y\\ud83d\": [\"4\", \"asd\\ud83d\", None]}, index=[1, 2, 3]\n )\n tokenize(df)\n\n\n@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas_mixed_unicode_bytes():\n df = pd.DataFrame(\n {\"\u00f6\".encode(): [1, 2, 3], \"\u00f6\": [\"\u00f6\", \"\u00f6\".encode(), None]},\n index=[1, 2, 3],\n )\n tokenize(df)\n\n\n@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas_no_pickle():\n class NoPickle:\n # pickling not supported because it is a local class\n pass\n\n df = pd.DataFrame({\"x\": [\"foo\", None, NoPickle()]})\n tokenize(df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_extension_array_test_tokenize_pandas_extension_array.for_arr_in_arrays_.assert_tokenize_arr_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_extension_array_test_tokenize_pandas_extension_array.for_arr_in_arrays_.assert_tokenize_arr_t", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 302, "end_line": 324, "span_ids": ["test_tokenize_pandas_extension_array"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_tokenize_pandas_extension_array():\n arrays = [\n pd.array([1, 0, None], dtype=\"Int64\"),\n pd.array([\"2000\"], dtype=\"Period[D]\"),\n pd.array([1, 0, 0], dtype=\"Sparse[int]\"),\n pd.array([pd.Timestamp(\"2000\")], dtype=\"datetime64[ns]\"),\n pd.array([pd.Timestamp(\"2000\", tz=\"CET\")], dtype=\"datetime64[ns, CET]\"),\n pd.array(\n [\"a\", \"b\"],\n dtype=pd.api.types.CategoricalDtype([\"a\", \"b\", \"c\"], ordered=False),\n ),\n ]\n\n arrays.extend(\n [\n pd.array([\"a\", \"b\", None], dtype=\"string\"),\n pd.array([True, False, None], dtype=\"boolean\"),\n ]\n )\n\n for arr in arrays:\n assert tokenize(arr) == tokenize(arr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_range_test_tokenize_range._Different_step": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_range_test_tokenize_range._Different_step", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 397, "end_line": 401, "span_ids": ["test_tokenize_range"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize_range():\n assert tokenize(range(5, 10, 2)) == tokenize(range(5, 10, 2)) # Identical ranges\n assert tokenize(range(5, 10, 2)) != tokenize(range(1, 10, 2)) # Different start\n assert tokenize(range(5, 10, 2)) != tokenize(range(5, 15, 2)) # Different stop\n assert tokenize(range(5, 10, 2)) != tokenize(range(5, 10, 1)) # Different step", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_array_with_nans_test_tokenize_numpy_matrix.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_array_with_nans_test_tokenize_numpy_matrix.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 414, "end_line": 440, "span_ids": ["test_tokenize_numpy_matrix", "test_tokenize_base_types", "test_tokenize_object_array_with_nans", "test_tokenize_literal"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_object_array_with_nans():\n a = np.array([\"foo\", \"Jos\\xe9\", np.nan], dtype=\"O\")\n assert tokenize(a) == tokenize(a)\n\n\n@pytest.mark.parametrize(\n \"x\", [1, True, \"a\", b\"a\", 1.0, 1j, 1.0j, [], (), {}, None, str, int]\n)\ndef test_tokenize_base_types(x):\n assert tokenize(x) == tokenize(x), x\n\n\ndef test_tokenize_literal():\n assert tokenize(literal([\"x\", 1])) == tokenize(literal([\"x\", 1]))\n\n\n@pytest.mark.skipif(\"not np\")\n@pytest.mark.filterwarnings(\"ignore:the matrix:PendingDeprecationWarning\")\ndef test_tokenize_numpy_matrix():\n rng = np.random.RandomState(1234)\n a = np.asmatrix(rng.rand(100))\n b = a.copy()\n assert tokenize(a) == tokenize(b)\n\n b[:10] = 1\n assert tokenize(a) != tokenize(b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_unpack_collections.args_test_unpack_collections._Smoketest_results_that_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_unpack_collections.args_test_unpack_collections._Smoketest_results_that_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 516, "end_line": 544, "span_ids": ["test_unpack_collections"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unpack_collections():\n # ... other code\n\n args = build(a, b, c, (i for i in [a, b, c]))\n\n collections, repack = unpack_collections(*args)\n assert len(collections) == 3\n\n # Replace collections with `'~a'` strings\n result = repack([\"~a\", \"~b\", \"~c\"])\n sol = build(\"~a\", \"~b\", \"~c\", [\"~a\", \"~b\", \"~c\"])\n assert result == sol\n\n # traverse=False\n collections, repack = unpack_collections(*args, traverse=False)\n assert len(collections) == 2 # just a and b\n assert repack(collections) == args\n\n # No collections\n collections, repack = unpack_collections(1, 2, {\"a\": 3})\n assert not collections\n assert repack(collections) == (1, 2, {\"a\": 3})\n\n # Result that looks like a task\n def fail(*args):\n raise ValueError(\"Shouldn't have been called\") # pragma: nocover\n\n collections, repack = unpack_collections(\n a, (fail, 1), [(fail, 2, 3)], traverse=False\n )\n repack(collections) # Smoketest task literals\n repack([(fail, 1)]) # Smoketest results that look like tasks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_no_opt_test_compute_no_opt._See_Renamed": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_no_opt_test_compute_no_opt._See_Renamed", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 727, "end_line": 751, "span_ids": ["test_compute_no_opt"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_no_opt():\n # Bag does `fuse` by default. Test that with `optimize_graph=False` that\n # doesn't get called. We check this by using a callback to track the keys\n # that are computed.\n from dask.callbacks import Callback\n\n b = db.from_sequence(range(100), npartitions=4)\n add1 = partial(add, 1)\n mul2 = partial(mul, 2)\n o = b.map(add1).map(mul2)\n # Check that with the kwarg, the optimization doesn't happen\n keys = []\n with Callback(pretask=lambda key, *args: keys.append(key)):\n o.compute(scheduler=\"single-threaded\", optimize_graph=False)\n assert len([k for k in keys if \"mul\" in k[0]]) == 4\n assert len([k for k in keys if \"add\" in k[0]]) == 4\n # Check that without the kwarg, the optimization does happen\n keys = []\n with Callback(pretask=lambda key, *args: keys.append(key)):\n o.compute(scheduler=\"single-threaded\")\n # Names of fused tasks have been merged, and the original key is an alias.\n # Otherwise, the lengths below would be 4 and 0.\n assert len([k for k in keys if \"mul\" in k[0]]) == 8\n assert len([k for k in keys if \"add\" in k[0]]) == 4\n assert len([k for k in keys if \"add-mul\" in k[0]]) == 4 # See? Renamed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_test_persist_array.assert_len_y_dask_y_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_test_persist_array.assert_len_y_dask_y_n", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 648, "end_line": 670, "span_ids": ["test_persist_array", "test_compute_array"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da\")\ndef test_compute_array():\n arr = np.arange(100).reshape((10, 10))\n darr = da.from_array(arr, chunks=(5, 5))\n darr1 = darr + 1\n darr2 = darr + 2\n out1, out2 = compute(darr1, darr2)\n assert np.allclose(out1, arr + 1)\n assert np.allclose(out2, arr + 2)\n\n\n@pytest.mark.skipif(\"not da\")\ndef test_persist_array():\n from dask.array.utils import assert_eq\n\n arr = np.arange(100).reshape((10, 10))\n x = da.from_array(arr, chunks=(5, 5))\n x = (x + 1) - x.mean(axis=0)\n y = x.persist()\n\n assert_eq(x, y)\n assert set(y.dask).issubset(x.dask)\n assert len(y.dask) == y.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_valid_unicode_in_bytes_test_compute_with_literal.assert_compute_5_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_valid_unicode_in_bytes_test_compute_with_literal.assert_compute_5_5_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 929, "end_line": 963, "span_ids": ["test_compute_dataframe_valid_unicode_in_bytes", "test_compute_with_literal", "test_compute_array_bag", "test_compute_dataframe_invalid_unicode"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_compute_dataframe_valid_unicode_in_bytes():\n df = pd.DataFrame(data=np.random.random((3, 1)), columns=[\"\u00f6\".encode()])\n dd.from_pandas(df, npartitions=4)\n\n\n@pytest.mark.skipif(\"not dd\")\ndef test_compute_dataframe_invalid_unicode():\n # see https://github.com/dask/dask/issues/2713\n df = pd.DataFrame(data=np.random.random((3, 1)), columns=[\"\\ud83d\"])\n dd.from_pandas(df, npartitions=4)\n\n\n@pytest.mark.skipif(\"not da\")\ndef test_compute_array_bag():\n x = da.arange(5, chunks=2)\n b = db.from_sequence([1, 2, 3])\n\n pytest.raises(ValueError, lambda: compute(x, b))\n\n xx, bb = compute(x, b, scheduler=\"single-threaded\")\n assert np.allclose(xx, np.arange(5))\n assert bb == [1, 2, 3]\n\n\n@pytest.mark.skipif(\"not da\")\ndef test_compute_with_literal():\n x = da.arange(5, chunks=2)\n y = 10\n\n xx, yy = compute(x, y)\n assert (xx == x.compute()).all()\n assert yy == y\n\n assert compute(5) == (5,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_nested_test_compute_nested.assert_res_1_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_nested_test_compute_nested.assert_res_1_8", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 732, "end_line": 744, "span_ids": ["test_compute_nested"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_nested():\n a = delayed(1) + 5\n b = a + 1\n c = a + 2\n assert compute({\"a\": a, \"b\": [1, 2, b]}, (c, 2)) == (\n {\"a\": 6, \"b\": [1, 2, 7]},\n (8, 2),\n )\n\n res = compute([a, b], c, traverse=False)\n assert res[0][0] is a\n assert res[0][1] is b\n assert res[1] == 8", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_use_cloudpickle_to_tokenize_functions_in__main___test_optimizations_keyword.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_use_cloudpickle_to_tokenize_functions_in__main___test_optimizations_keyword.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 985, "end_line": 1018, "span_ids": ["test_use_cloudpickle_to_tokenize_functions_in__main__", "inc_to_dec", "test_optimizations_keyword"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_use_cloudpickle_to_tokenize_functions_in__main__():\n from textwrap import dedent\n\n defn = dedent(\n \"\"\"\n def inc():\n return x\n \"\"\"\n )\n\n __main__ = sys.modules[\"__main__\"]\n exec(compile(defn, \"\", \"exec\"), __main__.__dict__)\n f = __main__.inc\n\n t = normalize_token(f)\n assert b\"cloudpickle\" in t\n\n\ndef inc_to_dec(dsk, keys):\n dsk = dict(dsk)\n for key in dsk:\n if dsk[key][0] == inc:\n dsk[key] = (dec,) + dsk[key][1:]\n return dsk\n\n\ndef test_optimizations_keyword():\n x = dask.delayed(inc)(1)\n assert x.compute() == 2\n\n with dask.config.set(optimizations=[inc_to_dec]):\n assert x.compute() == 0\n\n assert x.compute() == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_test_optimize.for_a_b_in_zip_x3_y3_.assert_dict_a_dask_di": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_test_optimize.for_a_b_in_zip_x3_y3_.assert_dict_a_dask_di", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 837, "end_line": 863, "span_ids": ["test_optimize"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize():\n x = dask.delayed(inc)(1)\n y = dask.delayed(inc)(x)\n z = x + y\n\n x2, y2, z2, constant = optimize(x, y, z, 1)\n assert constant == 1\n\n # Same graphs for each\n dsk = dict(x2.dask)\n assert dict(y2.dask) == dsk\n assert dict(z2.dask) == dsk\n\n # Computationally equivalent\n assert dask.compute(x2, y2, z2) == dask.compute(x, y, z)\n\n # Applying optimizations before compute and during compute gives\n # same results. Shows optimizations are occurring.\n sols = dask.compute(x, y, z, optimizations=[inc_to_dec])\n x3, y3, z3 = optimize(x, y, z, optimizations=[inc_to_dec])\n assert dask.compute(x3, y3, z3) == sols\n\n # Optimize respects global optimizations as well\n with dask.config.set(optimizations=[inc_to_dec]):\n x4, y4, z4 = optimize(x, y, z)\n for a, b in zip([x3, y3, z3], [x4, y4, z4]):\n assert dict(a.dask) == dict(b.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_nested_test_optimize_nested.assert_res_1_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_nested_test_optimize_nested.assert_res_1_compute_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 866, "end_line": 886, "span_ids": ["test_optimize_nested"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_nested():\n a = dask.delayed(inc)(1)\n b = dask.delayed(inc)(a)\n c = a + b\n\n result = optimize({\"a\": a, \"b\": [1, 2, b]}, (c, 2))\n\n a2 = result[0][\"a\"]\n b2 = result[0][\"b\"][2]\n c2 = result[1][0]\n\n assert isinstance(a2, Delayed)\n assert isinstance(b2, Delayed)\n assert isinstance(c2, Delayed)\n assert dict(a2.dask) == dict(b2.dask) == dict(c2.dask)\n assert compute(*result) == ({\"a\": 2, \"b\": [1, 2, 3]}, (5, 2))\n\n res = optimize([a, b], c, traverse=False)\n assert res[0][0] is a\n assert res[0][1] is b\n assert res[1].compute() == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_default_imports_test_persist_literals.assert_persist_1_2_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_default_imports_test_persist_literals.assert_persist_1_2_3_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 889, "end_line": 917, "span_ids": ["test_default_imports", "test_persist_literals"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_default_imports():\n \"\"\"\n Startup time: `import dask` should not import too many modules.\n \"\"\"\n code = \"\"\"if 1:\n import dask\n import sys\n\n print(sorted(sys.modules))\n \"\"\"\n\n out = subprocess.check_output([sys.executable, \"-c\", code])\n modules = set(eval(out.decode()))\n assert \"dask\" in modules\n blacklist = [\n \"dask.array\",\n \"dask.dataframe\",\n \"numpy\",\n \"pandas\",\n \"partd\",\n \"s3fs\",\n \"distributed\",\n ]\n for mod in blacklist:\n assert mod not in modules\n\n\ndef test_persist_literals():\n assert persist(1, 2, 3) == (1, 2, 3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_nested_test_persist_nested.assert_res_1_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_nested_test_persist_nested.assert_res_1_compute_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 920, "end_line": 933, "span_ids": ["test_persist_nested"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_nested():\n a = delayed(1) + 5\n b = a + 1\n c = a + 2\n result = persist({\"a\": a, \"b\": [1, 2, b]}, (c, 2))\n assert isinstance(result[0][\"a\"], Delayed)\n assert isinstance(result[0][\"b\"][2], Delayed)\n assert isinstance(result[1][0], Delayed)\n assert compute(*result) == ({\"a\": 6, \"b\": [1, 2, 7]}, (8, 2))\n\n res = persist([a, b], c, traverse=False)\n assert res[0][0] is a\n assert res[0][1] is b\n assert res[1].compute() == 8", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_None_test_optimize_None.with_dask_config_set_arra.y_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_None_test_optimize_None.with_dask_config_set_arra.y_compute_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1002, "end_line": 1013, "span_ids": ["test_optimize_None"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_None():\n da = pytest.importorskip(\"dask.array\")\n\n x = da.ones(10, chunks=(5,))\n y = x[:9][1:8][::2] + 1 # normally these slices would be fused\n\n def my_get(dsk, keys):\n assert dsk == dict(y.dask) # but they aren't\n return dask.get(dsk, keys)\n\n with dask.config.set(array_optimize=None, scheduler=my_get):\n y.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_scheduler_keyword_test_scheduler_keyword.try_.finally_.del_named_schedulers_foo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_scheduler_keyword_test_scheduler_keyword.try_.finally_.del_named_schedulers_foo", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1016, "end_line": 1035, "span_ids": ["test_scheduler_keyword"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_scheduler_keyword():\n def schedule(dsk, keys, **kwargs):\n return [[123]]\n\n named_schedulers[\"foo\"] = schedule\n\n x = delayed(inc)(1)\n\n try:\n assert x.compute() == 2\n assert x.compute(scheduler=\"foo\") == 123\n\n with dask.config.set(scheduler=\"foo\"):\n assert x.compute() == 123\n assert x.compute() == 2\n\n with dask.config.set(scheduler=\"foo\"):\n assert x.compute(scheduler=\"threads\") == 2\n finally:\n del named_schedulers[\"foo\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_test_cache_with_number_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_test_cache_with_number_", "embedding": null, "metadata": {"file_path": "dask/tests/test_cache.py", "file_name": "test_cache.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 76, "span_ids": ["f", "test_prefer_cheap_dependent", "test_cache_correctness", "test_cache_with_number"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cache_with_number():\n c = Cache(10000, limit=1)\n assert isinstance(c.cache, cachey.Cache)\n assert c.cache.available_bytes == 10000\n assert c.cache.limit == 1\n\n\ndef test_cache_correctness():\n # https://github.com/dask/dask/issues/3631\n c = Cache(10000)\n da = pytest.importorskip(\"dask.array\")\n from numpy import ones, zeros\n\n z = da.from_array(zeros(1), chunks=10)\n o = da.from_array(ones(1), chunks=10)\n with c:\n assert (z.compute() == 0).all()\n assert (o.compute() == 1).all()\n\n\ndef f(duration, size, *args):\n sleep(duration)\n return [0] * size\n\n\ndef test_prefer_cheap_dependent():\n dsk = {\"x\": (f, 0.01, 10), \"y\": (f, 0.000001, 1, \"x\")}\n c = Cache(10000)\n with c:\n get_sync(dsk, \"y\")\n\n assert c.cache.scorer.cost[\"x\"] < c.cache.scorer.cost[\"y\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_finish_always_called_test_finish_always_called.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_finish_always_called_test_finish_always_called.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_callbacks.py", "file_name": "test_callbacks.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 73, "span_ids": ["test_finish_always_called"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_finish_always_called():\n flag = [False]\n\n class MyCallback(Callback):\n def _finish(self, dsk, state, errored):\n flag[0] = True\n assert errored\n\n dsk = {\"x\": (lambda: 1 / 0,)}\n\n # `raise_on_exception=True`\n try:\n with MyCallback():\n get_sync(dsk, \"x\")\n except Exception as e:\n assert isinstance(e, ZeroDivisionError)\n assert flag[0]\n\n # `raise_on_exception=False`\n flag[0] = False\n try:\n with MyCallback():\n get_threaded(dsk, \"x\")\n except Exception as e:\n assert isinstance(e, ZeroDivisionError)\n assert flag[0]\n\n # KeyboardInterrupt\n def raise_keyboard():\n raise KeyboardInterrupt()\n\n dsk = {\"x\": (raise_keyboard,)}\n flag[0] = False\n try:\n with MyCallback():\n get_sync(dsk, \"x\")\n except BaseException as e:\n assert isinstance(e, KeyboardInterrupt)\n assert flag[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_nested_schedulers_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_test_nested_schedulers_", "embedding": null, "metadata": {"file_path": "dask/tests/test_callbacks.py", "file_name": "test_callbacks.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 111, "span_ids": ["test_add_remove_mutates_not_replaces", "test_nested_schedulers"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nested_schedulers():\n class MyCallback(Callback):\n def _start(self, dsk):\n self.dsk = dsk\n\n def _pretask(self, key, dsk, state):\n assert key in self.dsk\n\n inner_callback = MyCallback()\n inner_dsk = {\"x\": (add, 1, 2), \"y\": (add, \"x\", 3)}\n\n def nested_call(x):\n assert not Callback.active\n with inner_callback:\n return get_threaded(inner_dsk, \"y\") + x\n\n outer_callback = MyCallback()\n outer_dsk = {\"a\": (nested_call, 1), \"b\": (add, \"a\", 2)}\n\n with outer_callback:\n get_threaded(outer_dsk, \"b\")\n\n assert not Callback.active\n assert outer_callback.dsk == outer_dsk\n assert inner_callback.dsk == inner_dsk\n assert not Callback.active\n\n\ndef test_add_remove_mutates_not_replaces():\n assert not Callback.active\n\n with Callback():\n assert Callback.active\n\n assert not Callback.active", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_update_test_update.assert_b_x_2_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_update_test_update.assert_b_x_2_y_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 51, "span_ids": ["test_update"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_update():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": OrderedDict({\"b\": 2})}\n update(b, a)\n assert b == {\"x\": 1, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"a\": 3, \"b\": 2}}\n update(b, a, priority=\"old\")\n assert b == {\"x\": 2, \"y\": {\"a\": 3, \"b\": 2}, \"z\": 3}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_merge_test_collect_yaml_paths.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_merge_test_collect_yaml_paths.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 54, "end_line": 78, "span_ids": ["test_collect_yaml_paths", "test_merge"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"b\": 2}}\n\n expected = {\"x\": 2, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n c = merge(a, b)\n assert c == expected\n\n\ndef test_collect_yaml_paths():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"b\": 2}}\n\n expected = {\"x\": 2, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n with tmpfile(extension=\"yaml\") as fn1:\n with tmpfile(extension=\"yaml\") as fn2:\n with open(fn1, \"w\") as f:\n yaml.dump(a, f)\n with open(fn2, \"w\") as f:\n yaml.dump(b, f)\n\n config = merge(*collect_yaml(paths=[fn1, fn2]))\n assert config == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_dir_no_read_permissions.try_.finally_.os_chmod_path_perm_orig_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_dir_no_read_permissions.try_.finally_.os_chmod_path_perm_orig_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 81, "end_line": 106, "span_ids": ["test_collect_yaml_dir", "no_read_permissions"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_yaml_dir():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"b\": 2}}\n\n expected = {\"x\": 2, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n with tmpfile() as dirname:\n os.mkdir(dirname)\n with open(os.path.join(dirname, \"a.yaml\"), mode=\"w\") as f:\n yaml.dump(a, f)\n with open(os.path.join(dirname, \"b.yaml\"), mode=\"w\") as f:\n yaml.dump(b, f)\n\n config = merge(*collect_yaml(paths=[dirname]))\n assert config == expected\n\n\n@contextmanager\ndef no_read_permissions(path):\n perm_orig = stat.S_IMODE(os.stat(path).st_mode)\n perm_new = perm_orig ^ stat.S_IREAD\n try:\n os.chmod(path, perm_new)\n yield\n finally:\n os.chmod(path, perm_orig)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_permission_errors_test_collect_yaml_permission_errors.with_no_read_permissions_.assert_config_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_yaml_permission_errors_test_collect_yaml_permission_errors.with_no_read_permissions_.assert_config_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 109, "end_line": 135, "span_ids": ["test_collect_yaml_permission_errors"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.platform == \"win32\", reason=\"Can't make writeonly file on windows\"\n)\n@pytest.mark.parametrize(\"kind\", [\"directory\", \"file\"])\ndef test_collect_yaml_permission_errors(tmpdir, kind):\n a = {\"x\": 1, \"y\": 2}\n b = {\"y\": 3, \"z\": 4}\n\n dir_path = str(tmpdir)\n a_path = os.path.join(dir_path, \"a.yaml\")\n b_path = os.path.join(dir_path, \"b.yaml\")\n\n with open(a_path, mode=\"w\") as f:\n yaml.dump(a, f)\n with open(b_path, mode=\"w\") as f:\n yaml.dump(b, f)\n\n if kind == \"directory\":\n cant_read = dir_path\n expected = {}\n else:\n cant_read = a_path\n expected = b\n\n with no_read_permissions(cant_read):\n config = merge(*collect_yaml(paths=[dir_path]))\n assert config == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_env_test_env.assert_res_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_env_test_env.assert_res_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 160, "span_ids": ["test_env"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_env():\n env = {\n \"DASK_A_B\": \"123\",\n \"DASK_C\": \"True\",\n \"DASK_D\": \"hello\",\n \"DASK_E__X\": \"123\",\n \"DASK_E__Y\": \"456\",\n \"DASK_F\": '[1, 2, \"3\"]',\n \"DASK_G\": \"/not/parsable/as/literal\",\n \"FOO\": \"not included\",\n }\n\n expected = {\n \"a_b\": 123,\n \"c\": True,\n \"d\": \"hello\",\n \"e\": {\"x\": 123, \"y\": 456},\n \"f\": [1, 2, \"3\"],\n \"g\": \"/not/parsable/as/literal\",\n }\n\n res = collect_env(env)\n assert res == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_test_collect.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_test_collect.with_tmpfile_extension_y.with_tmpfile_extension_y.assert_config_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 163, "end_line": 178, "span_ids": ["test_collect"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect():\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 2, \"z\": 3, \"y\": {\"b\": 2}}\n env = {\"DASK_W\": 4}\n\n expected = {\"w\": 4, \"x\": 2, \"y\": {\"a\": 1, \"b\": 2}, \"z\": 3}\n\n with tmpfile(extension=\"yaml\") as fn1:\n with tmpfile(extension=\"yaml\") as fn2:\n with open(fn1, \"w\") as f:\n yaml.dump(a, f)\n with open(fn2, \"w\") as f:\n yaml.dump(b, f)\n\n config = collect([fn1, fn2], env=env)\n assert config == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_env_none_test_get.with_pytest_raises_KeyErr.get_y_b_config_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_collect_env_none_test_get.with_pytest_raises_KeyErr.get_y_b_config_d_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 182, "end_line": 195, "span_ids": ["test_get", "test_collect_env_none"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_env_none(monkeypatch):\n monkeypatch.setenv(\"DASK_FOO\", \"bar\")\n config = collect([])\n assert config == {\"foo\": \"bar\"}\n\n\ndef test_get():\n d = {\"x\": 1, \"y\": {\"a\": 2}}\n\n assert get(\"x\", config=d) == 1\n assert get(\"y.a\", config=d) == 2\n assert get(\"y.b\", 123, config=d) == 123\n with pytest.raises(KeyError):\n get(\"y.b\", config=d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_test_ensure_file.assert_not_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_test_ensure_file.assert_not_result", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 200, "end_line": 238, "span_ids": ["test_ensure_file"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ensure_file(tmpdir):\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n b = {\"x\": 123}\n\n source = os.path.join(str(tmpdir), \"source.yaml\")\n dest = os.path.join(str(tmpdir), \"dest\")\n destination = os.path.join(dest, \"source.yaml\")\n\n with open(source, \"w\") as f:\n yaml.dump(a, f)\n\n ensure_file(source=source, destination=dest, comment=False)\n\n with open(destination) as f:\n result = yaml.safe_load(f)\n assert result == a\n\n # don't overwrite old config files\n with open(source, \"w\") as f:\n yaml.dump(b, f)\n\n ensure_file(source=source, destination=dest, comment=False)\n\n with open(destination) as f:\n result = yaml.safe_load(f)\n assert result == a\n\n os.remove(destination)\n\n # Write again, now with comments\n ensure_file(source=source, destination=dest, comment=True)\n\n with open(destination) as f:\n text = f.read()\n assert \"123\" in text\n\n with open(destination) as f:\n result = yaml.safe_load(f)\n assert not result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_test_set.assert_d_abc_x_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_test_set.assert_d_abc_x_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 239, "end_line": 258, "span_ids": ["test_set"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set():\n with dask.config.set(abc=123):\n assert config[\"abc\"] == 123\n with dask.config.set(abc=456):\n assert config[\"abc\"] == 456\n assert config[\"abc\"] == 123\n\n assert \"abc\" not in config\n\n with dask.config.set({\"abc\": 123}):\n assert config[\"abc\"] == 123\n assert \"abc\" not in config\n\n with dask.config.set({\"abc.x\": 1, \"abc.y\": 2, \"abc.z.a\": 3}):\n assert config[\"abc\"] == {\"x\": 1, \"y\": 2, \"z\": {\"a\": 3}}\n assert \"abc\" not in config\n\n d = {}\n dask.config.set({\"abc.x\": 123}, config=d)\n assert d[\"abc\"][\"x\"] == 123", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_kwargs_test_set_kwargs.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_kwargs_test_set_kwargs.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 261, "end_line": 274, "span_ids": ["test_set_kwargs"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_kwargs():\n with dask.config.set(foo__bar=1, foo__baz=2):\n assert config[\"foo\"] == {\"bar\": 1, \"baz\": 2}\n assert \"foo\" not in config\n\n # Mix kwargs and dict, kwargs override\n with dask.config.set({\"foo.bar\": 1, \"foo.baz\": 2}, foo__buzz=3, foo__bar=4):\n assert config[\"foo\"] == {\"bar\": 4, \"baz\": 2, \"buzz\": 3}\n assert \"foo\" not in config\n\n # Mix kwargs and nested dict, kwargs override\n with dask.config.set({\"foo\": {\"bar\": 1, \"baz\": 2}}, foo__buzz=3, foo__bar=4):\n assert config[\"foo\"] == {\"bar\": 4, \"baz\": 2, \"buzz\": 3}\n assert \"foo\" not in config", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_directory_test_ensure_file_directory.assert_os_path_exists_os_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_directory_test_ensure_file_directory.assert_os_path_exists_os_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 296, "end_line": 312, "span_ids": ["test_ensure_file_directory"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"mkdir\", [True, False])\ndef test_ensure_file_directory(mkdir, tmpdir):\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n\n source = os.path.join(str(tmpdir), \"source.yaml\")\n dest = os.path.join(str(tmpdir), \"dest\")\n\n with open(source, \"w\") as f:\n yaml.dump(a, f)\n\n if mkdir:\n os.mkdir(dest)\n\n ensure_file(source=source, destination=dest)\n\n assert os.path.isdir(dest)\n assert os.path.exists(os.path.join(dest, \"source.yaml\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_defaults_to_DASK_CONFIG_directory_test_ensure_file_defaults_to_DASK_CONFIG_directory.assert_os_path_split_fn_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_ensure_file_defaults_to_DASK_CONFIG_directory_test_ensure_file_defaults_to_DASK_CONFIG_directory.assert_os_path_split_fn_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 315, "end_line": 331, "span_ids": ["test_ensure_file_defaults_to_DASK_CONFIG_directory"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ensure_file_defaults_to_DASK_CONFIG_directory(tmpdir):\n a = {\"x\": 1, \"y\": {\"a\": 1}}\n source = os.path.join(str(tmpdir), \"source.yaml\")\n with open(source, \"w\") as f:\n yaml.dump(a, f)\n\n destination = os.path.join(str(tmpdir), \"dask\")\n PATH = dask.config.PATH\n try:\n dask.config.PATH = destination\n ensure_file(source=source)\n finally:\n dask.config.PATH = PATH\n\n assert os.path.isdir(destination)\n [fn] = os.listdir(destination)\n assert os.path.split(fn)[1] == os.path.split(source)[1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_rename_test_refresh.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_rename_test_refresh.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 352, "span_ids": ["test_rename", "test_refresh"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rename():\n aliases = {\"foo_bar\": \"foo.bar\"}\n config = {\"foo-bar\": 123}\n rename(aliases, config=config)\n assert config == {\"foo\": {\"bar\": 123}}\n\n\ndef test_refresh():\n defaults = []\n config = {}\n\n update_defaults({\"a\": 1}, config=config, defaults=defaults)\n assert config == {\"a\": 1}\n\n refresh(paths=[], env={\"DASK_B\": \"2\"}, config=config, defaults=defaults)\n assert config == {\"a\": 1, \"b\": 2}\n\n refresh(paths=[], env={\"DASK_C\": \"3\"}, config=config, defaults=defaults)\n assert config == {\"a\": 1, \"c\": 3}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_expand_environment_variables_test_env_var_canonical_name.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_expand_environment_variables_test_env_var_canonical_name.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 353, "end_line": 377, "span_ids": ["test_expand_environment_variables", "test_env_var_canonical_name"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"inp,out\",\n [\n (\"1\", \"1\"),\n (1, 1),\n (\"$FOO\", \"foo\"),\n ([1, \"$FOO\"], [1, \"foo\"]),\n ((1, \"$FOO\"), (1, \"foo\")),\n ({1, \"$FOO\"}, {1, \"foo\"}),\n ({\"a\": \"$FOO\"}, {\"a\": \"foo\"}),\n ({\"a\": \"A\", \"b\": [1, \"2\", \"$FOO\"]}, {\"a\": \"A\", \"b\": [1, \"2\", \"foo\"]}),\n ],\n)\ndef test_expand_environment_variables(monkeypatch, inp, out):\n monkeypatch.setenv(\"FOO\", \"foo\")\n assert expand_environment_variables(inp) == out\n\n\ndef test_env_var_canonical_name(monkeypatch):\n value = 3\n monkeypatch.setenv(\"DASK_A_B\", str(value))\n d = {}\n dask.config.refresh(config=d)\n assert get(\"a_b\", config=d) == value\n assert get(\"a-b\", config=d) == value", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_canonical_name_test_get_set_canonical_name.None_2.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_canonical_name_test_get_set_canonical_name.None_2.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 385, "end_line": 399, "span_ids": ["test_get_set_canonical_name"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_set_canonical_name():\n c = {\"x-y\": {\"a_b\": 123}}\n\n keys = [\"x_y.a_b\", \"x-y.a-b\", \"x_y.a-b\"]\n for k in keys:\n assert dask.config.get(k, config=c) == 123\n\n with dask.config.set({\"x_y\": {\"a-b\": 456}}, config=c):\n for k in keys:\n assert dask.config.get(k, config=c) == 456\n\n # No change to new keys in sub dicts\n with dask.config.set({\"x_y\": {\"a-b\": {\"c_d\": 1}, \"e-f\": 2}}, config=c):\n assert dask.config.get(\"x_y.a-b\", config=c) == {\"c_d\": 1}\n assert dask.config.get(\"x_y.e_f\", config=c) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_roundtrip_test_schema.jsonschema_validate_confi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_get_set_roundtrip_test_schema.jsonschema_validate_confi", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 402, "end_line": 435, "span_ids": ["test_schema", "test_core_file", "test_get_set_roundtrip", "test_merge_None_to_dict"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"key\", [\"custom_key\", \"custom-key\"])\ndef test_get_set_roundtrip(key):\n value = 123\n with dask.config.set({key: value}):\n assert dask.config.get(\"custom_key\") == value\n assert dask.config.get(\"custom-key\") == value\n\n\ndef test_merge_None_to_dict():\n assert dask.config.merge({\"a\": None, \"c\": 0}, {\"a\": {\"b\": 1}}) == {\n \"a\": {\"b\": 1},\n \"c\": 0,\n }\n\n\ndef test_core_file():\n assert \"temporary-directory\" in dask.config.config\n assert \"dataframe\" in dask.config.config\n assert \"shuffle-compression\" in dask.config.get(\"dataframe\")\n\n\ndef test_schema():\n jsonschema = pytest.importorskip(\"jsonschema\")\n\n config_fn = os.path.join(os.path.dirname(__file__), \"..\", \"dask.yaml\")\n schema_fn = os.path.join(os.path.dirname(__file__), \"..\", \"dask-schema.yaml\")\n\n with open(config_fn) as f:\n config = yaml.safe_load(f)\n\n with open(schema_fn) as f:\n schema = yaml.safe_load(f)\n\n jsonschema.validate(config, schema)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_foo_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_foo_", "embedding": null, "metadata": {"file_path": "dask/tests/test_context.py", "file_name": "test_context.py", "file_type": "text/x-python", "category": "test", "start_line": 29, "end_line": 63, "span_ids": ["Foo", "Foo:2", "bar", "Foo.f", "test_globalmethod", "foo"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def foo():\n return \"foo\"\n\n\ndef bar():\n return \"bar\"\n\n\nclass Foo:\n @globalmethod(key=\"f\")\n def f(): # type: ignore\n return 1\n\n g = globalmethod(foo, key=\"g\", falsey=bar)\n\n\ndef test_globalmethod():\n x = Foo()\n\n assert x.f() == 1\n\n with dask.config.set(f=lambda: 2):\n assert x.f() == 2\n\n with dask.config.set(f=foo):\n assert x.f is foo\n assert x.f() == \"foo\"\n\n assert x.g is foo\n assert x.g() == \"foo\"\n\n with dask.config.set(g=False):\n assert x.g is bar\n assert x.g() == \"bar\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_has_tasks_test_has_tasks.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_has_tasks_test_has_tasks.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 54, "span_ids": ["test_has_tasks"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_has_tasks():\n dsk = {\n \"a\": [1, 2, 3],\n \"b\": \"a\",\n \"c\": [1, (inc, 1)],\n \"d\": [(sum, \"a\")],\n \"e\": [\"a\", \"b\"],\n \"f\": [[\"a\", \"b\"], 2, 3],\n }\n assert not has_tasks(dsk, dsk[\"a\"])\n assert has_tasks(dsk, dsk[\"b\"])\n assert has_tasks(dsk, dsk[\"c\"])\n assert has_tasks(dsk, dsk[\"d\"])\n assert has_tasks(dsk, dsk[\"e\"])\n assert has_tasks(dsk, dsk[\"f\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_preorder_traversal_test_preorder_traversal.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_preorder_traversal_test_preorder_traversal.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 57, "end_line": 63, "span_ids": ["test_preorder_traversal"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_preorder_traversal():\n t = (add, 1, 2)\n assert list(preorder_traversal(t)) == [add, 1, 2]\n t = (add, (add, 1, 2), (add, 3, 4))\n assert list(preorder_traversal(t)) == [add, add, 1, 2, add, 3, 4]\n t = (add, (sum, [1, 2]), 3)\n assert list(preorder_traversal(t)) == [add, sum, list, 1, 2, 3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_TestGet_test_get_dependencies_nothing.with_pytest_raises_ValueE.get_dependencies_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_TestGet_test_get_dependencies_nothing.with_pytest_raises_ValueE.get_dependencies_", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 66, "end_line": 111, "span_ids": ["TestGet", "test_GetFunctionTestMixin_class", "test_get_dependencies_empty", "test_get_dependencies_nothing", "test_get_dependencies_list", "test_get_dependencies_task", "test_get_dependencies_nested"], "tokens": 405}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGet(GetFunctionTestMixin):\n get = staticmethod(core.get)\n\n\ndef test_GetFunctionTestMixin_class():\n class TestCustomGetFail(GetFunctionTestMixin):\n get = staticmethod(lambda x, y: 1)\n\n custom_testget = TestCustomGetFail()\n pytest.raises(AssertionError, custom_testget.test_get)\n\n class TestCustomGetPass(GetFunctionTestMixin):\n get = staticmethod(core.get)\n\n custom_testget = TestCustomGetPass()\n custom_testget.test_get()\n\n\ndef test_get_dependencies_nested():\n dsk = {\"x\": 1, \"y\": 2, \"z\": (add, (inc, [[\"x\"]]), \"y\")}\n\n assert get_dependencies(dsk, \"z\") == {\"x\", \"y\"}\n assert sorted(get_dependencies(dsk, \"z\", as_list=True)) == [\"x\", \"y\"]\n\n\ndef test_get_dependencies_empty():\n dsk = {\"x\": (inc,)}\n assert get_dependencies(dsk, \"x\") == set()\n assert get_dependencies(dsk, \"x\", as_list=True) == []\n\n\ndef test_get_dependencies_list():\n dsk = {\"x\": 1, \"y\": 2, \"z\": [\"x\", [(inc, \"y\")]]}\n assert get_dependencies(dsk, \"z\") == {\"x\", \"y\"}\n assert sorted(get_dependencies(dsk, \"z\", as_list=True)) == [\"x\", \"y\"]\n\n\ndef test_get_dependencies_task():\n dsk = {\"x\": 1, \"y\": 2, \"z\": [\"x\", [(inc, \"y\")]]}\n assert get_dependencies(dsk, task=(inc, \"x\")) == {\"x\"}\n assert get_dependencies(dsk, task=(inc, \"x\"), as_list=True) == [\"x\"]\n\n\ndef test_get_dependencies_nothing():\n with pytest.raises(ValueError):\n get_dependencies({})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_dependencies_many_test_get_dependencies_task_none.assert_get_dependencies_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_dependencies_many_test_get_dependencies_task_none.assert_get_dependencies_d", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 114, "end_line": 139, "span_ids": ["test_get_dependencies_task_none", "test_get_dependencies_many"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dependencies_many():\n dsk = {\n \"a\": [1, 2, 3],\n \"b\": \"a\",\n \"c\": [1, (inc, 1)],\n \"d\": [(sum, \"c\")],\n \"e\": [\"a\", \"b\", \"zzz\"],\n \"f\": [[\"a\", \"b\"], 2, 3],\n }\n\n tasks = [dsk[k] for k in (\"d\", \"f\")]\n s = get_dependencies(dsk, task=tasks)\n assert s == {\"a\", \"b\", \"c\"}\n s = get_dependencies(dsk, task=tasks, as_list=True)\n assert sorted(s) == [\"a\", \"b\", \"c\"]\n\n s = get_dependencies(dsk, task=[])\n assert s == set()\n s = get_dependencies(dsk, task=[], as_list=True)\n assert s == []\n\n\ndef test_get_dependencies_task_none():\n # Regression test for https://github.com/dask/distributed/issues/2756\n dsk = {\"foo\": None}\n assert get_dependencies(dsk, task=dsk[\"foo\"]) == set()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_deps_test_get_deps.assert_dependents_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_get_deps_test_get_deps.assert_dependents_", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 167, "span_ids": ["test_get_deps"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_deps():\n dsk = {\n \"a\": [1, 2, 3],\n \"b\": \"a\",\n \"c\": [1, (inc, 1)],\n \"d\": [(sum, \"c\")],\n \"e\": [\"b\", \"zzz\", \"b\"],\n \"f\": [[\"a\", \"b\"], 2, 3],\n }\n dependencies, dependents = get_deps(dsk)\n assert dependencies == {\n \"a\": set(),\n \"b\": {\"a\"},\n \"c\": set(),\n \"d\": {\"c\"},\n \"e\": {\"b\"},\n \"f\": {\"a\", \"b\"},\n }\n assert dependents == {\n \"a\": {\"b\", \"f\"},\n \"b\": {\"e\", \"f\"},\n \"c\": {\"d\"},\n \"d\": set(),\n \"e\": set(),\n \"f\": set(),\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_flatten_MutateOnEq.__eq__.return.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_flatten_MutateOnEq.__eq__.return.False", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 178, "end_line": 193, "span_ids": ["MutateOnEq", "MutateOnEq.__eq__", "test_subs", "test_flatten"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_flatten():\n assert list(flatten(())) == []\n assert list(flatten(\"foo\")) == [\"foo\"]\n\n\ndef test_subs():\n assert subs((sum, [1, \"x\"]), \"x\", 2) == (sum, [1, 2])\n assert subs((sum, [1, [\"x\"]]), \"x\", 2) == (sum, [1, [2]])\n\n\nclass MutateOnEq:\n hit_eq = 0\n\n def __eq__(self, other):\n self.hit_eq += 1\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_no_key_data_eq_test_subs_no_key_data_eq.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_no_key_data_eq_test_subs_no_key_data_eq.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 205, "span_ids": ["test_subs_no_key_data_eq"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_subs_no_key_data_eq():\n # Numpy throws a deprecation warning on bool(array == scalar), which\n # pollutes the terminal. This test checks that `subs` never tries to\n # compare keys (scalars) with values (which could be arrays)`subs` never\n # tries to compare keys (scalars) with values (which could be arrays).\n a = MutateOnEq()\n subs(a, \"x\", 1)\n assert a.hit_eq == 0\n subs((add, a, \"x\"), \"x\", 1)\n assert a.hit_eq == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_with_unfriendly_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_test_subs_with_unfriendly_eq_", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 208, "end_line": 262, "span_ids": ["test_subs_unexpected_hashable_key", "test_literal_serializable", "test_subs_with_unfriendly_eq", "test_quote", "test_subs_with_surprisingly_friendly_eq"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_subs_with_unfriendly_eq():\n try:\n import numpy as np\n except ImportError:\n return\n else:\n task = (np.sum, np.array([1, 2]))\n assert (subs(task, (4, 5), 1) == task) is True\n\n class MyException(Exception):\n pass\n\n class F:\n def __eq__(self, other):\n raise MyException()\n\n task = F()\n assert subs(task, 1, 2) is task\n\n\ndef test_subs_with_surprisingly_friendly_eq():\n try:\n import pandas as pd\n except ImportError:\n return\n else:\n df = pd.DataFrame()\n assert subs(df, \"x\", 1) is df\n\n\ndef test_subs_unexpected_hashable_key():\n class UnexpectedButHashable:\n def __init__(self):\n self.name = \"a\"\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n return isinstance(other, UnexpectedButHashable)\n\n assert subs((id, UnexpectedButHashable()), UnexpectedButHashable(), 1) == (id, 1)\n\n\ndef test_quote():\n literals = [[1, 2, 3], (add, 1, 2), [1, [2, 3]], (add, 1, (add, 2, 3)), {\"x\": \"x\"}]\n\n for l in literals:\n assert core.get({\"x\": quote(l)}, \"x\") == l\n\n\ndef test_literal_serializable():\n l = literal((add, 1, 2))\n assert pickle.loads(pickle.dumps(l)).data == (add, 1, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_to_task_dask_test_to_task_dask.assert_dask_x__dask": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_to_task_dask_test_to_task_dask.assert_dask_x__dask", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 43, "end_line": 82, "span_ids": ["test_to_task_dask"], "tokens": 448}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore:The dask.delayed:UserWarning\")\ndef test_to_task_dask():\n a = delayed(1, name=\"a\")\n b = delayed(2, name=\"b\")\n task, dask = to_task_dask([a, b, 3])\n assert task == [\"a\", \"b\", 3]\n\n task, dask = to_task_dask((a, b, 3))\n assert task == (tuple, [\"a\", \"b\", 3])\n assert dict(dask) == merge(a.dask, b.dask)\n\n task, dask = to_task_dask({a: 1, b: 2})\n assert task == (dict, [[\"b\", 2], [\"a\", 1]]) or task == (dict, [[\"a\", 1], [\"b\", 2]])\n assert dict(dask) == merge(a.dask, b.dask)\n\n f = namedtuple(\"f\", [\"x\", \"y\"])\n x = f(1, 2)\n task, dask = to_task_dask(x)\n assert task == x\n assert dict(dask) == {}\n\n task, dask = to_task_dask(slice(a, b, 3))\n assert task == (slice, \"a\", \"b\", 3)\n assert dict(dask) == merge(a.dask, b.dask)\n\n # Issue https://github.com/dask/dask/issues/2107\n class MyClass(dict):\n pass\n\n task, dask = to_task_dask(MyClass())\n assert type(task) is MyClass\n assert dict(dask) == {}\n\n # Custom dask objects\n x = Tuple({\"a\": 1, \"b\": 2, \"c\": (add, \"a\", \"b\")}, [\"a\", \"b\", \"c\"])\n task, dask = to_task_dask(x)\n assert task in dask\n f = dask.pop(task)\n assert f == (tuple, [\"a\", \"b\", \"c\"])\n assert dask == x._dask", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_test_delayed.assert_a_key_in_b_dask": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_test_delayed.assert_a_key_in_b_dask", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 85, "end_line": 95, "span_ids": ["test_delayed"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed():\n add2 = delayed(add)\n assert add2(1, 2).compute() == 3\n assert (add2(1, 2) + 3).compute() == 6\n assert add2(add2(1, 2), 3).compute() == 6\n\n a = delayed(1)\n assert a.compute() == 1\n assert 1 in a.dask.values()\n b = add2(add2(a, 2), 3)\n assert a.key in b.dask", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_with_dataclass_test_delayed_with_dataclass.assert_final_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_with_dataclass_test_delayed_with_dataclass.assert_final_compute_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 99, "end_line": 115, "span_ids": ["test_delayed_with_dataclass"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_with_dataclass():\n dataclasses = pytest.importorskip(\"dataclasses\")\n\n # Avoid @dataclass decorator as Python < 3.7 fail to interpret the type hints\n ADataClass = dataclasses.make_dataclass(\n \"ADataClass\", [(\"a\", int), (\"b\", int, dataclasses.field(init=False))]\n )\n\n literal = dask.delayed(3)\n with_class = dask.delayed({\"a\": ADataClass(a=literal)})\n\n def return_nested(obj):\n return obj[\"a\"].a\n\n final = delayed(return_nested)(with_class)\n\n assert final.compute() == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_operators_test_operators.if_matmul_.assert_eval_c_d_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_operators_test_operators.if_matmul_.assert_eval_c_d_co", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 114, "end_line": 137, "span_ids": ["test_operators"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_operators():\n a = delayed([1, 2, 3])\n assert a[0].compute() == 1\n assert (a + a).compute() == [1, 2, 3, 1, 2, 3]\n b = delayed(2)\n assert a[:b].compute() == [1, 2]\n\n a = delayed(10)\n assert (a + 1).compute() == 11\n assert (1 + a).compute() == 11\n assert (a >> 1).compute() == 5\n assert (a > 2).compute()\n assert (a**2).compute() == 100\n\n if matmul:\n\n class dummy:\n def __matmul__(self, other):\n return 4\n\n c = delayed(dummy()) # noqa\n d = delayed(dummy()) # noqa\n\n assert (eval(\"c @ d\")).compute() == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_methods_test_np_dtype_of_delayed.assert_delayed_np_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_methods_test_np_dtype_of_delayed.assert_delayed_np_array_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 145, "end_line": 176, "span_ids": ["test_np_dtype_of_delayed", "test_attributes", "test_method_getattr_call_same_task", "test_methods"], "tokens": 332}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_methods():\n a = delayed(\"a b c d e\")\n assert a.split(\" \").compute() == [\"a\", \"b\", \"c\", \"d\", \"e\"]\n assert a.upper().replace(\"B\", \"A\").split().count(\"A\").compute() == 2\n assert a.split(\" \", pure=True).key == a.split(\" \", pure=True).key\n o = a.split(\" \", dask_key_name=\"test\")\n assert o.key == \"test\"\n\n\ndef test_attributes():\n a = delayed(2 + 1j)\n assert a.real._key == a.real._key\n assert a.real.compute() == 2\n assert a.imag.compute() == 1\n assert (a.real + a.imag).compute() == 3\n\n\ndef test_method_getattr_call_same_task():\n a = delayed([1, 2, 3])\n o = a.index(1)\n # Don't getattr the method, then call in separate task\n assert getattr not in {v[0] for v in o.__dask_graph__().values()}\n\n\ndef test_np_dtype_of_delayed():\n # This used to result in a segfault due to recursion, see\n # https://github.com/dask/dask/pull/4374#issuecomment-454381465\n np = pytest.importorskip(\"numpy\")\n x = delayed(1)\n with pytest.raises(TypeError):\n np.dtype(x)\n assert delayed(np.array([1], dtype=\"f8\")).dtype.compute() == np.dtype(\"f8\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_visualise_warn_test_delayed_visualise_warn.None_1.z_visualise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_visualise_warn_test_delayed_visualise_warn.None_1.z_visualise_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 175, "end_line": 193, "span_ids": ["test_delayed_visualise_warn"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_visualise_warn():\n # Raise a warning when user calls visualise()\n # instead of visualize()\n def inc(x):\n return x + 1\n\n z = dask.delayed(inc)(1)\n z.compute()\n\n with pytest.warns(\n UserWarning, match=\"dask.delayed objects have no `visualise` method\"\n ):\n z.visualise(file_name=\"desk_graph.svg\")\n\n # with no args\n with pytest.warns(\n UserWarning, match=\"dask.delayed objects have no `visualise` method\"\n ):\n z.visualise()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_errors_test_delayed_errors.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_errors_test_delayed_errors.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 207, "span_ids": ["test_delayed_errors"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_errors():\n a = delayed([1, 2, 3])\n # Immutable\n pytest.raises(TypeError, lambda: setattr(a, \"foo\", 1))\n pytest.raises(TypeError, lambda: setitem(a, 1, 0))\n # Can't iterate, or check if contains\n pytest.raises(TypeError, lambda: 1 in a)\n pytest.raises(TypeError, lambda: list(a))\n # No dynamic generation of magic/hidden methods\n pytest.raises(AttributeError, lambda: a._hidden())\n # Truth of delayed forbidden\n pytest.raises(TypeError, lambda: bool(a))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_common_subexpressions_test_lists.assert_c_compute_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_common_subexpressions_test_lists.assert_c_compute_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 236, "span_ids": ["test_delayed_optimize", "test_lists", "test_common_subexpressions"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_common_subexpressions():\n a = delayed([1, 2, 3])\n res = a[0] + a[0]\n assert a[0].key in res.dask\n assert a.key in res.dask\n assert len(res.dask) == 3\n\n\ndef test_delayed_optimize():\n x = Delayed(\"b\", {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (inc, \"b\")})\n (x2,) = dask.optimize(x)\n # Delayed's __dask_optimize__ culls out 'c'\n assert sorted(x2.dask.keys()) == [\"a\", \"b\"]\n assert x2._layer != x2._key\n # Optimize generates its own layer name, which doesn't match the key.\n # `Delayed._rebuild` handles this.\n\n\ndef test_lists():\n a = delayed(1)\n b = delayed(2)\n c = delayed(sum)([a, b])\n assert c.compute() == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_test_literates.assert_delayed_lit_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_test_literates.assert_delayed_lit_a_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 239, "end_line": 253, "span_ids": ["test_literates"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_literates():\n a = delayed(1)\n b = a + 1\n lit = (a, b, 3)\n assert delayed(lit).compute() == (1, 2, 3)\n lit = [a, b, 3]\n assert delayed(lit).compute() == [1, 2, 3]\n lit = {a, b, 3}\n assert delayed(lit).compute() == {1, 2, 3}\n lit = {a: \"a\", b: \"b\", 3: \"c\"}\n assert delayed(lit).compute() == {1: \"a\", 2: \"b\", 3: \"c\"}\n assert delayed(lit)[a].compute() == \"a\"\n lit = {\"a\": a, \"b\": b, \"c\": 3}\n assert delayed(lit).compute() == {\"a\": 1, \"b\": 2, \"c\": 3}\n assert delayed(lit)[\"a\"].compute() == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_keys_test_iterators.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_literates_keys_test_iterators.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 276, "span_ids": ["test_lists_are_concrete", "test_iterators", "test_literates_keys"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_literates_keys():\n a = delayed(1)\n b = a + 1\n lit = (a, b, 3)\n assert delayed(lit).key != delayed(lit).key\n assert delayed(lit, pure=True).key == delayed(lit, pure=True).key\n\n\ndef test_lists_are_concrete():\n a = delayed(1)\n b = delayed(2)\n c = delayed(max)([[a, 10], [b, 20]], key=lambda x: x[0])[1]\n\n assert c.compute() == 20\n\n\ndef test_iterators():\n a = delayed(1)\n b = delayed(2)\n c = delayed(sum)(iter([a, b]))\n\n assert c.compute() == 3\n\n def f(seq):\n return sum(seq)\n\n c = delayed(f)(iter([a, b]))\n assert c.compute() == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_traverse_false_test_pure.assert_myrand_key_my": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_traverse_false_test_pure.assert_myrand_key_my", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 320, "span_ids": ["test_pure", "test_traverse_false"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_traverse_false():\n # Create a list with a dask value, and test that it's not computed\n def fail(*args):\n raise ValueError(\"shouldn't have computed\")\n\n a = delayed(fail)()\n\n # list\n x = [a, 1, 2, 3]\n res = delayed(x, traverse=False).compute()\n assert len(res) == 4\n assert res[0] is a\n assert res[1:] == x[1:]\n\n # tuple that looks like a task\n x = (fail, a, (fail, a))\n res = delayed(x, traverse=False).compute()\n assert isinstance(res, tuple)\n assert res[0] == fail\n assert res[1] is a\n\n # list containing task-like-things\n x = [1, (fail, a), a]\n res = delayed(x, traverse=False).compute()\n assert isinstance(res, list)\n assert res[0] == 1\n assert res[1][0] == fail and res[1][1] is a\n assert res[2] is a\n\n # traverse=False still hits top level\n b = delayed(1)\n x = delayed(b, traverse=False)\n assert x.compute() == 1\n\n\ndef test_pure():\n v1 = delayed(add, pure=True)(1, 2)\n v2 = delayed(add, pure=True)(1, 2)\n assert v1.key == v2.key\n\n myrand = delayed(random)\n assert myrand().key != myrand().key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_pure_global_setting_test_pure_global_setting.None_7.assert_element_element": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_pure_global_setting_test_pure_global_setting.None_7.assert_element_element", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 323, "end_line": 360, "span_ids": ["test_pure_global_setting"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pure_global_setting():\n # delayed functions\n func = delayed(add)\n\n with dask.config.set(delayed_pure=True):\n assert func(1, 2).key == func(1, 2).key\n\n with dask.config.set(delayed_pure=False):\n assert func(1, 2).key != func(1, 2).key\n\n func = delayed(add, pure=True)\n with dask.config.set(delayed_pure=False):\n assert func(1, 2).key == func(1, 2).key\n\n # delayed objects\n assert delayed(1).key != delayed(1).key\n with dask.config.set(delayed_pure=True):\n assert delayed(1).key == delayed(1).key\n\n with dask.config.set(delayed_pure=False):\n assert delayed(1, pure=True).key == delayed(1, pure=True).key\n\n # delayed methods\n data = delayed([1, 2, 3])\n assert data.index(1).key != data.index(1).key\n\n with dask.config.set(delayed_pure=True):\n assert data.index(1).key == data.index(1).key\n assert data.index(1, pure=False).key != data.index(1, pure=False).key\n\n with dask.config.set(delayed_pure=False):\n assert data.index(1, pure=True).key == data.index(1, pure=True).key\n\n # magic methods always pure\n with dask.config.set(delayed_pure=False):\n assert data.index.key == data.index.key\n element = data[0]\n assert (element + element).key == (element + element).key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_nout_test_nout.assert_x_compute_tup": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_nout_test_nout.assert_x_compute_tup", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 363, "end_line": 394, "span_ids": ["test_nout"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nout():\n func = delayed(lambda x: (x, -x), nout=2, pure=True)\n x = func(1)\n assert len(x) == 2\n a, b = x\n assert compute(a, b) == (1, -1)\n assert a._length is None\n assert b._length is None\n pytest.raises(TypeError, lambda: len(a))\n pytest.raises(TypeError, lambda: list(a))\n\n pytest.raises(ValueError, lambda: delayed(add, nout=-1))\n pytest.raises(ValueError, lambda: delayed(add, nout=True))\n\n func = delayed(add, nout=None)\n a = func(1)\n assert a._length is None\n pytest.raises(TypeError, lambda: list(a))\n pytest.raises(TypeError, lambda: len(a))\n\n func = delayed(lambda x: (x,), nout=1, pure=True)\n x = func(1)\n assert len(x) == 1\n (a,) = x\n assert a.compute() == 1\n assert a._length is None\n pytest.raises(TypeError, lambda: len(a))\n\n func = delayed(lambda x: tuple(), nout=0, pure=True)\n x = func(1)\n assert len(x) == 0\n assert x.compute() == tuple()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_custom_delayed_test_custom_delayed.assert_compute_n_x2_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_custom_delayed_test_custom_delayed.assert_compute_n_x2_x_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 414, "end_line": 420, "span_ids": ["test_custom_delayed"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_custom_delayed():\n x = Tuple({\"a\": 1, \"b\": 2, \"c\": (add, \"a\", \"b\")}, [\"a\", \"b\", \"c\"])\n x2 = delayed(add, pure=True)(x, (4, 5, 6))\n n = delayed(len, pure=True)(x)\n assert delayed(len, pure=True)(x).key == n.key\n assert x2.compute() == (1, 2, 3, 4, 5, 6)\n assert compute(n, x2, x) == (3, (1, 2, 3, 4, 5, 6), (1, 2, 3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_delayed_test_array_delayed.assert_delayed_arr_compu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_delayed_test_array_delayed.assert_delayed_arr_compu", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 422, "end_line": 441, "span_ids": ["test_array_delayed"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore:The dask.delayed:UserWarning\")\ndef test_array_delayed():\n np = pytest.importorskip(\"numpy\")\n da = pytest.importorskip(\"dask.array\")\n\n arr = np.arange(100).reshape((10, 10))\n darr = da.from_array(arr, chunks=(5, 5))\n val = delayed(sum)([arr, darr, 1])\n assert isinstance(val, Delayed)\n assert np.allclose(val.compute(), arr + arr + 1)\n assert val.sum().compute() == (arr + arr + 1).sum()\n assert val[0, 0].compute() == (arr + arr + 1)[0, 0]\n\n task, dsk = to_task_dask(darr)\n assert not darr.dask.keys() - dsk.keys()\n diff = dsk.keys() - darr.dask.keys()\n assert len(diff) == 1\n\n delayed_arr = delayed(darr)\n assert (delayed_arr.compute() == arr).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_bag_delayed_test_array_bag_delayed.assert_out_compute_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_array_bag_delayed_test_array_bag_delayed.assert_out_compute_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 457, "end_line": 468, "span_ids": ["test_array_bag_delayed"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_bag_delayed():\n da = pytest.importorskip(\"dask.array\")\n np = pytest.importorskip(\"numpy\")\n\n arr1 = np.arange(100).reshape((10, 10))\n arr2 = arr1.dot(arr1.T)\n darr1 = da.from_array(arr1, chunks=(5, 5))\n darr2 = da.from_array(arr2, chunks=(5, 5))\n b = db.from_sequence([1, 2, 3])\n seq = [arr1, arr2, darr1, darr2, b]\n out = delayed(sum)([i.sum() for i in seq])\n assert out.compute() == 2 * arr1.sum() + 2 * arr2.sum() + sum([1, 2, 3])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_picklable_test_delayed_picklable.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_picklable_test_delayed_picklable.None_10", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 462, "end_line": 482, "span_ids": ["test_delayed_picklable"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_picklable():\n # Delayed\n x = delayed(divmod, nout=2, pure=True)(1, 2)\n y = pickle.loads(pickle.dumps(x))\n assert x.dask == y.dask\n assert x._key == y._key\n assert x._length == y._length\n # DelayedLeaf\n x = delayed(1j + 2)\n y = pickle.loads(pickle.dumps(x))\n assert x.dask == y.dask\n assert x._key == y._key\n assert x._nout == y._nout\n assert x._pure == y._pure\n # DelayedAttr\n x = x.real\n y = pickle.loads(pickle.dumps(x))\n assert x._obj._key == y._obj._key\n assert x._obj.dask == y._obj.dask\n assert x._attr == y._attr\n assert x._key == y._key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_compute_forward_kwargs_identity.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_delayed_compute_forward_kwargs_identity.return.x", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 482, "end_line": 521, "span_ids": ["test_delayed_callable", "test_delayed_name_on_call", "test_delayed_method_descriptor", "test_callable_obj", "test_delayed_compute_forward_kwargs", "identity"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_compute_forward_kwargs():\n x = delayed(1) + 2\n x.compute(bogus_keyword=10)\n\n\ndef test_delayed_method_descriptor():\n delayed(bytes.decode)(b\"\") # does not err\n\n\ndef test_delayed_callable():\n f = delayed(add, pure=True)\n v = f(1, 2)\n assert v.dask == {v.key: (add, 1, 2)}\n\n assert f.dask == {f.key: add}\n assert f.compute() == add\n\n\ndef test_delayed_name_on_call():\n f = delayed(add, pure=True)\n assert f(1, 2, dask_key_name=\"foo\")._key == \"foo\"\n\n\ndef test_callable_obj():\n class Foo:\n def __init__(self, a):\n self.a = a\n\n def __call__(self):\n return 2\n\n foo = Foo(1)\n f = delayed(foo)\n assert f.compute() is foo\n assert f.a.compute() == 1\n assert f().compute() == 2\n\n\ndef identity(x):\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_name_consistent_across_instances_test_name_consistent_across_instances.assert_func_1__key_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_name_consistent_across_instances_test_name_consistent_across_instances.assert_func_1__key_i", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 537, "end_line": 545, "span_ids": ["test_name_consistent_across_instances"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_name_consistent_across_instances():\n func = delayed(identity, pure=True)\n\n data = {\"x\": 1, \"y\": 25, \"z\": [1, 2, 3]}\n assert func(data)._key == \"identity-4f318f3c27b869239e97c3ac07f7201a\"\n\n data = {\"x\": 1, 1: \"x\"}\n assert func(data)._key == func(data)._key\n assert func(1)._key == \"identity-7258833899272585e16d0ec36b21a3de\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_sensitive_to_partials_test_keys_from_array._check_dsk_xs_0_dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_sensitive_to_partials_test_keys_from_array._check_dsk_xs_0_dask_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 553, "end_line": 594, "span_ids": ["test_sensitive_to_partials", "test_delayed_name", "test_keys_from_array", "test_finalize_name"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sensitive_to_partials():\n assert (\n delayed(partial(add, 10), pure=True)(2)._key\n != delayed(partial(add, 20), pure=True)(2)._key\n )\n\n\ndef test_delayed_name():\n assert delayed(1)._key.startswith(\"int-\")\n assert delayed(1, pure=True)._key.startswith(\"int-\")\n assert delayed(1, name=\"X\")._key == \"X\"\n\n def myfunc(x):\n return x + 1\n\n assert delayed(myfunc)(1).key.startswith(\"myfunc\")\n\n\ndef test_finalize_name():\n da = pytest.importorskip(\"dask.array\")\n\n x = da.ones(10, chunks=5)\n v = delayed([x])\n assert set(x.dask).issubset(v.dask)\n\n def key(s):\n if isinstance(s, tuple):\n s = s[0]\n # Ignore _ in 'ones_like'\n return s.split(\"-\")[0].replace(\"_\", \"\")\n\n assert all(key(k).isalpha() for k in v.dask)\n\n\ndef test_keys_from_array():\n da = pytest.importorskip(\"dask.array\")\n from dask.array.utils import _check_dsk\n\n X = da.ones((10, 10), chunks=5).to_delayed().flatten()\n xs = [delayed(inc)(x) for x in X]\n\n _check_dsk(xs[0].dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py__Mostly_copied_from_http_test_delayed_decorator_on_method.assert_isinstance_A_addst": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py__Mostly_copied_from_http_test_delayed_decorator_on_method.assert_isinstance_A_addst", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 578, "end_line": 619, "span_ids": ["test_keys_from_array", "test_delayed_decorator_on_method"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Mostly copied from https://github.com/pytoolz/toolz/pull/220\ndef test_delayed_decorator_on_method():\n class A:\n BASE = 10\n\n def __init__(self, base):\n self.BASE = base\n\n @delayed\n def addmethod(self, x, y):\n return self.BASE + x + y\n\n @classmethod\n @delayed\n def addclass(cls, x, y):\n return cls.BASE + x + y\n\n @staticmethod\n @delayed\n def addstatic(x, y):\n return x + y\n\n a = A(100)\n assert a.addmethod(3, 4).compute() == 107\n assert A.addmethod(a, 3, 4).compute() == 107\n\n assert a.addclass(3, 4).compute() == 17\n assert A.addclass(3, 4).compute() == 17\n\n assert a.addstatic(3, 4).compute() == 7\n assert A.addstatic(3, 4).compute() == 7\n\n # We want the decorated methods to be actual methods for instance methods\n # and class methods since their first arguments are the object and the\n # class respectively. Or in other words, the first argument is generated by\n # the runtime based on the object/class before the dot.\n assert isinstance(a.addmethod, types.MethodType)\n assert isinstance(A.addclass, types.MethodType)\n\n # For static methods (and regular functions), the decorated methods should\n # be Delayed objects.\n assert isinstance(A.addstatic, Delayed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_persist_nested_test_persist_nested.assert_res_2_4_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_persist_nested_test_persist_nested.assert_res_2_4_5_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 51, "end_line": 67, "span_ids": ["test_persist_nested"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_nested(c):\n a = delayed(1) + 5\n b = a + 1\n c = a + 2\n result = persist({\"a\": a, \"b\": [1, 2, b]}, (c, 2), 4, [5])\n assert isinstance(result[0][\"a\"], Delayed)\n assert isinstance(result[0][\"b\"][2], Delayed)\n assert isinstance(result[1][0], Delayed)\n\n sol = ({\"a\": 6, \"b\": [1, 2, 7]}, (8, 2), 4, [5])\n assert compute(*result) == sol\n\n res = persist([a, b], c, 4, [5], traverse=False)\n assert res[0][0] is a\n assert res[0][1] is b\n assert res[1].compute() == 8\n assert res[2:] == (4, [5])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_dataframe_test_futures_to_delayed_dataframe.with_pytest_raises_TypeEr.ddf.dd_from_delayed_1_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_dataframe_test_futures_to_delayed_dataframe.with_pytest_raises_TypeEr.ddf.dd_from_delayed_1_2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 69, "end_line": 80, "span_ids": ["test_futures_to_delayed_dataframe"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_futures_to_delayed_dataframe(c):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n\n futures = c.scatter([df, df])\n ddf = dd.from_delayed(futures)\n dd.utils.assert_eq(ddf.compute(), pd.concat([df, df], axis=0))\n\n with pytest.raises(TypeError):\n ddf = dd.from_delayed([1, 2])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_bag_test_futures_to_delayed_array.assert_eq_A_compute_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_to_delayed_bag_test_futures_to_delayed_array.assert_eq_A_compute_np", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 110, "end_line": 129, "span_ids": ["test_futures_to_delayed_array", "test_futures_to_delayed_bag"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_futures_to_delayed_bag(c):\n L = [1, 2, 3]\n\n futures = c.scatter([L, L])\n b = db.from_delayed(futures)\n assert list(b) == L + L\n\n\ndef test_futures_to_delayed_array(c):\n da = pytest.importorskip(\"dask.array\")\n from dask.array.utils import assert_eq\n\n np = pytest.importorskip(\"numpy\")\n x = np.arange(5)\n\n futures = c.scatter([x, x])\n A = da.concatenate(\n [da.from_delayed(f, shape=x.shape, dtype=x.dtype) for f in futures], axis=0\n )\n assert_eq(A.compute(), np.concatenate([x, x], axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_local_get_with_distributed_active_test_to_hdf_distributed.test_to_hdf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_local_get_with_distributed_active_test_to_hdf_distributed.test_to_hdf_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 133, "end_line": 156, "span_ids": ["test_to_hdf_distributed", "test_local_get_with_distributed_active"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\n \"ignore:Running on a single-machine scheduler when a distributed client \"\n \"is active might lead to unexpected results.\"\n)\n@gen_cluster(client=True)\nasync def test_local_get_with_distributed_active(c, s, a, b):\n\n with dask.config.set(scheduler=\"sync\"):\n x = delayed(inc)(1).persist()\n await asyncio.sleep(0.01)\n assert not s.tasks # scheduler hasn't done anything\n\n x = delayed(inc)(2).persist(scheduler=\"sync\") # noqa F841\n await asyncio.sleep(0.01)\n assert not s.tasks # scheduler hasn't done anything\n\n\ndef test_to_hdf_distributed(c):\n pytest.importorskip(\"numpy\")\n pytest.importorskip(\"pandas\")\n\n from ..dataframe.io.tests.test_hdf import test_to_hdf\n\n test_to_hdf()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_to_hdf_scheduler_distributed_test_to_hdf_scheduler_distributed.test_to_hdf_schedulers_No": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_to_hdf_scheduler_distributed_test_to_hdf_scheduler_distributed.test_to_hdf_schedulers_No", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 159, "end_line": 183, "span_ids": ["test_to_hdf_scheduler_distributed"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\n \"ignore:Running on a single-machine scheduler when a distributed client \"\n \"is active might lead to unexpected results.\"\n)\n@pytest.mark.parametrize(\n \"npartitions\",\n [\n 1,\n pytest.param(\n 4,\n marks=pytest.mark.xfail(reason=\"HDF not multi-process safe\", strict=False),\n ),\n pytest.param(\n 10,\n marks=pytest.mark.xfail(reason=\"HDF not multi-process safe\", strict=False),\n ),\n ],\n)\ndef test_to_hdf_scheduler_distributed(npartitions, c):\n pytest.importorskip(\"numpy\")\n pytest.importorskip(\"pandas\")\n\n from ..dataframe.io.tests.test_hdf import test_to_hdf_schedulers\n\n test_to_hdf_schedulers(None, npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_in_graph_test_futures_in_graph.assert_xxyy3_compute_sche": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_in_graph_test_futures_in_graph.assert_xxyy3_compute_sche", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 156, "end_line": 165, "span_ids": ["test_futures_in_graph"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_futures_in_graph(c):\n x, y = delayed(1), delayed(2)\n xx = delayed(add)(x, x)\n yy = delayed(add)(y, y)\n xxyy = delayed(add)(xx, yy)\n\n xxyy2 = c.persist(xxyy)\n xxyy3 = delayed(add)(xxyy2, 10)\n\n assert xxyy3.compute(scheduler=\"dask.distributed\") == ((1 + 1) + (2 + 2)) + 10", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_label_test_label.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_label_test_label.None_10", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 66, "end_line": 87, "span_ids": ["test_label"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_label():\n assert label(\"x\") == \"x\"\n assert label(\"elemwise-ffcd9aa2231d466b5aa91e8bfa9e9487\") == \"elemwise-#\"\n\n cache = {}\n result = label(\"elemwise-ffcd9aa2231d466b5aa91e8bfa9e9487\", cache=cache)\n assert result == \"elemwise-#0\"\n # cached\n result = label(\"elemwise-ffcd9aa2231d466b5aa91e8bfa9e9487\", cache=cache)\n assert result == \"elemwise-#0\"\n assert len(cache) == 1\n\n result = label(\"elemwise-e890b510984f344edea9a5e5fe05c0db\", cache=cache)\n assert result == \"elemwise-#1\"\n assert len(cache) == 2\n\n result = label(\"elemwise-ffcd9aa2231d466b5aa91e8bfa9e9487\", cache=cache)\n assert result == \"elemwise-#0\"\n assert len(cache) == 2\n\n assert label(\"x\", cache=cache) == \"x\"\n assert len(cache) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_with_unconnected_node_test_to_graphviz_with_unconnected_node.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_with_unconnected_node_test_to_graphviz_with_unconnected_node.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 152, "end_line": 162, "span_ids": ["test_to_graphviz_with_unconnected_node"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_graphviz_with_unconnected_node():\n dsk[\"g\"] = 3\n g = to_graphviz(dsk, verbose=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 11 # 11 nodes total\n assert set(labels) == {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"}\n\n g = to_graphviz(dsk, verbose=True, collapse_outputs=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 6 # 6 nodes total\n assert set(labels) == {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_test_dot_graph.try_.finally_.ensure_not_exists_target_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_test_dot_graph.try_.finally_.ensure_not_exists_target_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 165, "end_line": 200, "span_ids": ["test_dot_graph"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"format,typ\",\n [\n pytest.param(\"png\", Image, marks=ipython_not_installed_mark),\n pytest.param(\n \"jpeg\",\n Image,\n marks=pytest.mark.xfail(\n reason=\"jpeg not always supported in dot\", strict=False\n ),\n ),\n (\"dot\", type(None)),\n (\"pdf\", type(None)),\n pytest.param(\"svg\", SVG, marks=ipython_not_installed_mark),\n ],\n)\n@pytest.mark.xfail(\n sys.platform == \"win32\",\n reason=\"graphviz/pango on conda-forge currently broken for windows\",\n strict=False,\n)\ndef test_dot_graph(tmpdir, format, typ):\n # Use a name that the shell would interpret specially to ensure that we're\n # not vulnerable to shell injection when interacting with `dot`.\n filename = str(tmpdir.join(\"$(touch should_not_get_created.txt)\"))\n\n target = \".\".join([filename, format])\n ensure_not_exists(target)\n try:\n result = dot_graph(dsk, filename=filename, format=format)\n\n assert not os.path.exists(\"should_not_get_created.txt\")\n assert os.path.isfile(target)\n assert isinstance(result, typ)\n finally:\n ensure_not_exists(target)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_filenames_and_formats_test_filenames_and_formats.assert_isinstance_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_filenames_and_formats_test_filenames_and_formats.assert_isinstance_result_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 254, "end_line": 284, "span_ids": ["test_filenames_and_formats"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"filename,format,target,expected_result_type\",\n [\n pytest.param(\n \"mydaskpdf\", \"svg\", \"mydaskpdf.svg\", SVG, marks=ipython_not_installed_mark\n ),\n (\"mydask.pdf\", None, \"mydask.pdf\", type(None)),\n pytest.param(\n \"mydask.pdf\", \"svg\", \"mydask.pdf.svg\", SVG, marks=ipython_not_installed_mark\n ),\n pytest.param(\n \"mydaskpdf\", None, \"mydaskpdf.png\", Image, marks=ipython_not_installed_mark\n ),\n pytest.param(\n \"mydask.pdf.svg\",\n None,\n \"mydask.pdf.svg\",\n SVG,\n marks=ipython_not_installed_mark,\n ),\n ],\n)\n@pytest.mark.xfail(\n sys.platform == \"win32\",\n reason=\"graphviz/pango on conda-forge currently broken for windows\",\n strict=False,\n)\ndef test_filenames_and_formats(tmpdir, filename, format, target, expected_result_type):\n result = dot_graph(dsk, filename=str(tmpdir.join(filename)), format=format)\n assert tmpdir.join(target).exists()\n assert isinstance(result, expected_result_type)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_delayed_kwargs_apply_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_delayed_kwargs_apply_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 267, "end_line": 295, "span_ids": ["test_delayed_kwargs_apply", "test_immutable_attributes"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delayed_kwargs_apply():\n def f(x, y=True):\n return x + y\n\n x = delayed(f)(1, y=2)\n label = task_label(x.dask[x.key])\n assert \"f\" in label\n assert \"apply\" not in label\n\n\ndef test_immutable_attributes():\n def inc(x):\n return x + 1\n\n dsk = {\"a\": (inc, 1), \"b\": (inc, 2), \"c\": (add, \"a\", \"b\")}\n attrs_func = {\"a\": {}}\n attrs_data = {\"b\": {}}\n attrs_func_test = copy.deepcopy(attrs_func)\n attrs_data_test = copy.deepcopy(attrs_data)\n\n to_graphviz(\n dsk,\n function_attributes=attrs_func,\n data_attributes=attrs_data,\n )\n\n assert attrs_func_test == attrs_func\n assert attrs_data_test == attrs_data", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_hashing.py_pytest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_hashing.py_pytest_", "embedding": null, "metadata": {"file_path": "dask/tests/test_hashing.py", "file_name": "test_hashing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 43, "span_ids": ["test_hash_buffer", "imports", "test_hash_buffer_hex", "test_hashers"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nfrom dask.hashing import hash_buffer, hash_buffer_hex, hashers\n\nnp = pytest.importorskip(\"numpy\")\n\nbuffers = [\n b\"abc\",\n bytearray(b\"123\"),\n memoryview(b\"456\"),\n np.array(42),\n np.ones((100, 100)),\n np.zeros((100, 100), dtype=[(\"a\", \"i4\"), (\"b\", \"i2\")]),\n np.ones(10000, dtype=np.int8)[1:], # unaligned\n]\n\n\n@pytest.mark.parametrize(\"x\", buffers)\ndef test_hash_buffer(x):\n for hasher in [None] + hashers:\n h = hash_buffer(x, hasher=hasher)\n assert isinstance(h, bytes)\n assert 8 <= len(h) < 32\n assert h == hash_buffer(x, hasher=hasher)\n\n\n@pytest.mark.parametrize(\"x\", buffers)\ndef test_hash_buffer_hex(x):\n for hasher in [None] + hashers:\n h = hash_buffer_hex(x, hasher=hasher)\n assert isinstance(h, str)\n assert 16 <= len(h) < 64\n assert h == hash_buffer_hex(x, hasher=hasher)\n\n\n@pytest.mark.parametrize(\"hasher\", hashers)\ndef test_hashers(hasher):\n # Sanity check\n x = b\"x\"\n h = hasher(x)\n assert isinstance(h, bytes)\n assert 8 <= len(h) < 32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_looks_at_cache_test_start_state_with_independent_but_runnable_tasks.assert_start_state_from_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_looks_at_cache_test_start_state_with_independent_but_runnable_tasks.assert_start_state_from_d", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 34, "end_line": 49, "span_ids": ["test_start_state_looks_at_cache", "test_start_state_with_independent_but_runnable_tasks", "test_start_state_with_redirects"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_start_state_looks_at_cache():\n dsk = {\"b\": (inc, \"a\")}\n cache = {\"a\": 1}\n result = start_state_from_dask(dsk, cache)\n assert result[\"dependencies\"][\"b\"] == {\"a\"}\n assert result[\"ready\"] == [\"b\"]\n\n\ndef test_start_state_with_redirects():\n dsk = {\"x\": 1, \"y\": \"x\", \"z\": (inc, \"y\")}\n result = start_state_from_dask(dsk)\n assert result[\"cache\"] == {\"x\": 1}\n\n\ndef test_start_state_with_independent_but_runnable_tasks():\n assert start_state_from_dask({\"x\": (inc, 1)})[\"ready\"] == [\"x\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_with_tasks_no_deps_test_start_state_with_tasks_no_deps.assert_state_dependents_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_start_state_with_tasks_no_deps_test_start_state_with_tasks_no_deps.assert_state_dependents_", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 59, "span_ids": ["test_start_state_with_tasks_no_deps"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_start_state_with_tasks_no_deps():\n dsk = {\"a\": [1, (inc, 2)], \"b\": [1, 2, 3, 4], \"c\": (inc, 3)}\n state = start_state_from_dask(dsk)\n assert list(state[\"cache\"].keys()) == [\"b\"]\n assert \"a\" in state[\"ready\"] and \"c\" in state[\"ready\"]\n deps = {k: set() for k in \"abc\"}\n assert state[\"dependencies\"] == deps\n assert state[\"dependents\"] == deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_finish_task_test_finish_task.assert_state_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_finish_task_test_finish_task.assert_state_", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 89, "span_ids": ["test_finish_task"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_finish_task():\n dsk = {\"x\": 1, \"y\": 2, \"z\": (inc, \"x\"), \"w\": (add, \"z\", \"y\")}\n sortkey = order(dsk).get\n state = start_state_from_dask(dsk)\n state[\"ready\"].remove(\"z\")\n state[\"running\"] = {\"z\", \"other-task\"}\n task = \"z\"\n result = 2\n\n state[\"cache\"][\"z\"] = result\n finish_task(dsk, task, state, set(), sortkey)\n\n assert state == {\n \"cache\": {\"y\": 2, \"z\": 2},\n \"dependencies\": {\n \"w\": {\"y\", \"z\"},\n \"x\": set(),\n \"y\": set(),\n \"z\": {\"x\"},\n },\n \"finished\": {\"z\"},\n \"released\": {\"x\"},\n \"running\": {\"other-task\"},\n \"dependents\": {\"w\": set(), \"x\": {\"z\"}, \"y\": {\"w\"}, \"z\": {\"w\"}},\n \"ready\": [\"w\"],\n \"waiting\": {},\n \"waiting_data\": {\"y\": {\"w\"}, \"z\": {\"w\"}},\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_TestGetAsync_test_sort_key.assert_sorted_L_key_sort": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_TestGetAsync_test_sort_key.assert_sorted_L_key_sort", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 92, "end_line": 112, "span_ids": ["TestGetAsync.test_get_sync_num_workers", "test_sort_key", "test_cache_options", "TestGetAsync"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGetAsync(GetFunctionTestMixin):\n get = staticmethod(get_sync)\n\n def test_get_sync_num_workers(self):\n self.get({\"x\": (inc, \"y\"), \"y\": 1}, \"x\", num_workers=2)\n\n\ndef test_cache_options():\n cache = {}\n\n def inc2(x):\n assert \"y\" in cache\n return x + 1\n\n with dask.config.set(cache=cache):\n get_sync({\"x\": (inc2, \"y\"), \"y\": 1}, \"x\")\n\n\ndef test_sort_key():\n L = [\"x\", (\"x\", 1), (\"z\", 0), (\"x\", 0)]\n assert sorted(L, key=sortkey) == [\"x\", (\"x\", 0), (\"x\", 1), (\"z\", 0)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_callback_test_callback.get_dsk_a_start_callb": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_callback_test_callback.get_dsk_a_start_callb", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 119, "end_line": 135, "span_ids": ["test_callback"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_callback():\n f = lambda x: x + 1\n dsk = {\"a\": (f, 1)}\n from dask.threaded import get\n\n def start_callback(key, d, state):\n assert key == \"a\" or key is None\n assert d == dsk\n assert isinstance(state, dict)\n\n def end_callback(key, value, d, state, worker_id):\n assert key == \"a\" or key is None\n assert value == 2 or value is None\n assert d == dsk\n assert isinstance(state, dict)\n\n get(dsk, \"a\", start_callback=start_callback, end_callback=end_callback)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_out_of_band_pickling_test_out_of_band_pickling.assert_np_all_a_a2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_out_of_band_pickling_test_out_of_band_pickling.assert_np_all_a_a2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 64, "span_ids": ["test_out_of_band_pickling"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason=\"requires pickle protocol 5\")\ndef test_out_of_band_pickling():\n \"\"\"Test that out-of-band pickling works\"\"\"\n np = pytest.importorskip(\"numpy\")\n pytest.importorskip(\"cloudpickle\", minversion=\"1.3.0\")\n\n a = np.arange(5)\n\n l = []\n b = _dumps(a, buffer_callback=l.append)\n assert len(l) == 1\n assert isinstance(l[0], pickle.PickleBuffer)\n assert memoryview(l[0]) == memoryview(a)\n\n a2 = _loads(b, buffers=l)\n assert np.all(a == a2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_bad_test_optimize_graph_false.assert_len_keys_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_bad_test_optimize_graph_false.assert_len_keys_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 72, "end_line": 158, "span_ids": ["test_lambda_with_cloudpickle", "lambda_result", "test_dumps_loads", "test_remote_exception", "test_reuse_pool", "NotUnpickleable.__getstate__", "test_fuse_doesnt_clobber_intermediates", "NotUnpickleable", "test_unpicklable_args_generate_errors", "bad", "test_errors_propagate", "test_lambda_results_with_cloudpickle", "NotUnpickleable.__setstate__", "test_optimize_graph_false"], "tokens": 631}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bad():\n raise ValueError(\"12345\")\n\n\ndef test_errors_propagate():\n dsk = {\"x\": (bad,)}\n\n with pytest.raises(ValueError) as e:\n get(dsk, \"x\")\n assert \"12345\" in str(e.value)\n\n\ndef test_remote_exception():\n e = TypeError(\"hello\")\n a = remote_exception(e, \"traceback-body\")\n b = remote_exception(e, \"traceback-body\")\n\n assert type(a) == type(b)\n assert isinstance(a, TypeError)\n assert \"hello\" in str(a)\n assert \"Traceback\" in str(a)\n assert \"traceback-body\" in str(a)\n\n\ndef test_lambda_with_cloudpickle():\n dsk = {\"x\": 2, \"y\": (lambda x: x + 1, \"x\")}\n assert get(dsk, \"y\") == 3\n\n\ndef lambda_result():\n return lambda x: x + 1\n\n\ndef test_lambda_results_with_cloudpickle():\n dsk = {\"x\": (lambda_result,)}\n f = get(dsk, \"x\")\n assert f(2) == 3\n\n\nclass NotUnpickleable:\n def __getstate__(self):\n return ()\n\n def __setstate__(self, state):\n raise ValueError(\"Can't unpickle me\")\n\n\ndef test_unpicklable_args_generate_errors():\n a = NotUnpickleable()\n\n dsk = {\"x\": (bool, a)}\n\n with pytest.raises(ValueError):\n get(dsk, \"x\")\n\n dsk = {\"x\": (bool, \"a\"), \"a\": a}\n\n with pytest.raises(ValueError):\n get(dsk, \"x\")\n\n\n@pytest.mark.parametrize(\"pool_typ\", [multiprocessing.Pool, ProcessPoolExecutor])\ndef test_reuse_pool(pool_typ):\n with pool_typ(CPU_COUNT) as pool:\n with dask.config.set(pool=pool):\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n\n\ndef test_dumps_loads():\n with dask.config.set(func_dumps=pickle.dumps, func_loads=pickle.loads):\n assert get({\"x\": 1, \"y\": (add, \"x\", 2)}, \"y\") == 3\n\n\ndef test_fuse_doesnt_clobber_intermediates():\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (add, 10, \"y\")}\n assert get(d, [\"y\", \"z\"]) == (2, 12)\n\n\ndef test_optimize_graph_false():\n from dask.callbacks import Callback\n\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (add, 10, \"y\")}\n keys = []\n with Callback(pretask=lambda key, *args: keys.append(key)):\n get(d, \"z\", optimize_graph=False)\n assert len(keys) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_random_seeds_check_for_pytest.return._FAKE_MODULE_FOR_TEST_in": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_random_seeds_check_for_pytest.return._FAKE_MODULE_FOR_TEST_in", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 183, "end_line": 208, "span_ids": ["check_for_pytest", "test_random_seeds"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"random\", [\"numpy\", \"random\"])\ndef test_random_seeds(random):\n if random == \"numpy\":\n np = pytest.importorskip(\"numpy\")\n random = np.random\n else:\n import random\n\n @delayed(pure=False)\n def f():\n return tuple(random.randint(0, 10000) for i in range(5))\n\n N = 10\n with dask.config.set(scheduler=\"processes\"):\n (results,) = compute([f() for _ in range(N)])\n\n assert len(set(results)) == N\n\n\ndef check_for_pytest():\n \"\"\"We check for spawn by ensuring subprocess doesn't have modules only\n parent process should have:\n \"\"\"\n import sys\n\n return \"FAKE_MODULE_FOR_TEST\" in sys.modules", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_used_python3_posix_test_custom_context_used_python3_posix.try_.finally_.del_sys_modules_FAKE_MOD": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_used_python3_posix_test_custom_context_used_python3_posix.try_.finally_.del_sys_modules_FAKE_MOD", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 211, "end_line": 235, "span_ids": ["test_custom_context_used_python3_posix"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.platform == \"win32\", reason=\"Windows doesn't support different contexts\"\n)\ndef test_custom_context_used_python3_posix():\n \"\"\"The 'multiprocessing.context' config is used to create the pool.\n\n We assume default is 'spawn', and therefore test for 'fork'.\n \"\"\"\n # We check for 'fork' by ensuring subprocess doesn't have modules only\n # parent process should have:\n\n def check_for_pytest():\n import sys\n\n return \"FAKE_MODULE_FOR_TEST\" in sys.modules\n\n import sys\n\n sys.modules[\"FAKE_MODULE_FOR_TEST\"] = 1\n try:\n with dask.config.set({\"multiprocessing.context\": \"fork\"}):\n result = get({\"x\": (check_for_pytest,)}, \"x\")\n assert result\n finally:\n del sys.modules[\"FAKE_MODULE_FOR_TEST\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_get_context_using_python3_posix_test_get_context_using_python3_posix.None_1.assert_get_context_is_m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_get_context_using_python3_posix_test_get_context_using_python3_posix.None_1.assert_get_context_is_m", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 254, "end_line": 266, "span_ids": ["test_get_context_using_python3_posix"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.platform == \"win32\", reason=\"Windows doesn't support different contexts\"\n)\ndef test_get_context_using_python3_posix():\n \"\"\"get_context() respects configuration.\n\n If default context is changed this test will need to change too.\n \"\"\"\n assert get_context() is multiprocessing.get_context(\"spawn\")\n with dask.config.set({\"multiprocessing.context\": \"forkserver\"}):\n assert get_context() is multiprocessing.get_context(\"forkserver\")\n with dask.config.set({\"multiprocessing.context\": \"fork\"}):\n assert get_context() is multiprocessing.get_context(\"fork\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_ignored_elsewhere_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_custom_context_ignored_elsewhere_", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 248, "end_line": 268, "span_ids": ["test_custom_context_ignored_elsewhere", "test_get_context_always_default"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(sys.platform != \"win32\", reason=\"POSIX supports different contexts\")\ndef test_custom_context_ignored_elsewhere():\n \"\"\"On Windows, setting 'multiprocessing.context' doesn't explode.\n\n Presumption is it's not used since it's unsupported, but mostly we care about\n not breaking anything.\n \"\"\"\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n with pytest.warns(UserWarning):\n with dask.config.set({\"multiprocessing.context\": \"forkserver\"}):\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n\n\n@pytest.mark.skipif(sys.platform != \"win32\", reason=\"POSIX supports different contexts\")\ndef test_get_context_always_default():\n \"\"\"On Python 2/Windows, get_context() always returns same context.\"\"\"\n assert get_context() is multiprocessing\n with pytest.warns(UserWarning):\n with dask.config.set({\"multiprocessing.context\": \"forkserver\"}):\n assert get_context() is multiprocessing", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_test_fuse.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_test_fuse.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 54, "end_line": 139, "span_ids": ["test_fuse"], "tokens": 755}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse():\n fuse = fuse2 # tests both `fuse` and `fuse_linear`\n d = {\n \"w\": (inc, \"x\"),\n \"x\": (inc, \"y\"),\n \"y\": (inc, \"z\"),\n \"z\": (add, \"a\", \"b\"),\n \"a\": 1,\n \"b\": 2,\n }\n assert fuse(d, rename_keys=False) == with_deps(\n {\"w\": (inc, (inc, (inc, (add, \"a\", \"b\")))), \"a\": 1, \"b\": 2}\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\n \"z-y-x-w\": (inc, (inc, (inc, (add, \"a\", \"b\")))),\n \"a\": 1,\n \"b\": 2,\n \"w\": \"z-y-x-w\",\n }\n )\n\n d = {\n \"NEW\": (inc, \"y\"),\n \"w\": (inc, \"x\"),\n \"x\": (inc, \"y\"),\n \"y\": (inc, \"z\"),\n \"z\": (add, \"a\", \"b\"),\n \"a\": 1,\n \"b\": 2,\n }\n assert fuse(d, rename_keys=False) == with_deps(\n {\n \"NEW\": (inc, \"y\"),\n \"w\": (inc, (inc, \"y\")),\n \"y\": (inc, (add, \"a\", \"b\")),\n \"a\": 1,\n \"b\": 2,\n }\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\n \"NEW\": (inc, \"z-y\"),\n \"x-w\": (inc, (inc, \"z-y\")),\n \"z-y\": (inc, (add, \"a\", \"b\")),\n \"a\": 1,\n \"b\": 2,\n \"w\": \"x-w\",\n \"y\": \"z-y\",\n }\n )\n\n d = {\n \"v\": (inc, \"y\"),\n \"u\": (inc, \"w\"),\n \"w\": (inc, \"x\"),\n \"x\": (inc, \"y\"),\n \"y\": (inc, \"z\"),\n \"z\": (add, \"a\", \"b\"),\n \"a\": (inc, \"c\"),\n \"b\": (inc, \"d\"),\n \"c\": 1,\n \"d\": 2,\n }\n assert fuse(d, rename_keys=False) == with_deps(\n {\n \"u\": (inc, (inc, (inc, \"y\"))),\n \"v\": (inc, \"y\"),\n \"y\": (inc, (add, \"a\", \"b\")),\n \"a\": (inc, 1),\n \"b\": (inc, 2),\n }\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\n \"x-w-u\": (inc, (inc, (inc, \"z-y\"))),\n \"v\": (inc, \"z-y\"),\n \"z-y\": (inc, (add, \"c-a\", \"d-b\")),\n \"c-a\": (inc, 1),\n \"d-b\": (inc, 2),\n \"a\": \"c-a\",\n \"b\": \"d-b\",\n \"u\": \"x-w-u\",\n \"y\": \"z-y\",\n }\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse.d_4_test_fuse.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse.d_4_test_fuse.None_9", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 141, "end_line": 169, "span_ids": ["test_fuse"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse():\n # ... other code\n\n d = {\n \"a\": (inc, \"x\"),\n \"b\": (inc, \"x\"),\n \"c\": (inc, \"x\"),\n \"d\": (inc, \"c\"),\n \"x\": (inc, \"y\"),\n \"y\": 0,\n }\n assert fuse(d, rename_keys=False) == with_deps(\n {\"a\": (inc, \"x\"), \"b\": (inc, \"x\"), \"d\": (inc, (inc, \"x\")), \"x\": (inc, 0)}\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\n \"a\": (inc, \"y-x\"),\n \"b\": (inc, \"y-x\"),\n \"c-d\": (inc, (inc, \"y-x\")),\n \"y-x\": (inc, 0),\n \"d\": \"c-d\",\n \"x\": \"y-x\",\n }\n )\n\n d = {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (add, \"b\", \"b\")}\n assert fuse(d, rename_keys=False) == with_deps(\n {\"b\": (inc, 1), \"c\": (add, \"b\", \"b\")}\n )\n assert fuse(d, rename_keys=True) == with_deps(\n {\"a-b\": (inc, 1), \"c\": (add, \"a-b\", \"a-b\"), \"b\": \"a-b\"}\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_keys_test_fuse_keys.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_keys_test_fuse_keys.None_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 172, "end_line": 204, "span_ids": ["test_fuse_keys"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_keys():\n fuse = fuse2 # tests both `fuse` and `fuse_linear`\n d = {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (inc, \"b\")}\n keys = [\"b\"]\n assert fuse(d, keys, rename_keys=False) == with_deps(\n {\"b\": (inc, 1), \"c\": (inc, \"b\")}\n )\n assert fuse(d, keys, rename_keys=True) == with_deps(\n {\"a-b\": (inc, 1), \"c\": (inc, \"a-b\"), \"b\": \"a-b\"}\n )\n\n d = {\n \"w\": (inc, \"x\"),\n \"x\": (inc, \"y\"),\n \"y\": (inc, \"z\"),\n \"z\": (add, \"a\", \"b\"),\n \"a\": 1,\n \"b\": 2,\n }\n keys = [\"x\", \"z\"]\n assert fuse(d, keys, rename_keys=False) == with_deps(\n {\"w\": (inc, \"x\"), \"x\": (inc, (inc, \"z\")), \"z\": (add, \"a\", \"b\"), \"a\": 1, \"b\": 2}\n )\n assert fuse(d, keys, rename_keys=True) == with_deps(\n {\n \"w\": (inc, \"y-x\"),\n \"y-x\": (inc, (inc, \"z\")),\n \"z\": (add, \"a\", \"b\"),\n \"a\": 1,\n \"b\": 2,\n \"x\": \"y-x\",\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_test_inline.assert_inline_d_a_inl": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_test_inline.assert_inline_d_a_inl", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 207, "end_line": 239, "span_ids": ["test_inline"], "tokens": 487}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline():\n d = {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (inc, \"b\"), \"d\": (add, \"a\", \"c\")}\n assert inline(d) == {\"a\": 1, \"b\": (inc, 1), \"c\": (inc, \"b\"), \"d\": (add, 1, \"c\")}\n assert inline(d, [\"a\", \"b\", \"c\"]) == {\n \"a\": 1,\n \"b\": (inc, 1),\n \"c\": (inc, (inc, 1)),\n \"d\": (add, 1, (inc, (inc, 1))),\n }\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (add, \"x\", \"y\")}\n assert inline(d) == {\"x\": 1, \"y\": (inc, 1), \"z\": (add, 1, \"y\")}\n assert inline(d, keys=\"y\") == {\"x\": 1, \"y\": (inc, 1), \"z\": (add, 1, (inc, 1))}\n assert inline(d, keys=\"y\", inline_constants=False) == {\n \"x\": 1,\n \"y\": (inc, \"x\"),\n \"z\": (add, \"x\", (inc, \"x\")),\n }\n\n d = {\"a\": 1, \"b\": \"a\", \"c\": \"b\", \"d\": [\"a\", \"b\", \"c\"], \"e\": (add, (len, \"d\"), \"a\")}\n assert inline(d, \"d\") == {\n \"a\": 1,\n \"b\": 1,\n \"c\": 1,\n \"d\": [1, 1, 1],\n \"e\": (add, (len, [1, 1, 1]), 1),\n }\n assert inline(d, \"a\", inline_constants=False) == {\n \"a\": 1,\n \"b\": 1,\n \"c\": \"b\",\n \"d\": [1, \"b\", \"c\"],\n \"e\": (add, (len, \"d\"), 1),\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_test_inline_ignores_curries_and_partials.assert_a_not_in_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_test_inline_ignores_curries_and_partials.assert_a_not_in_result", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 263, "end_line": 277, "span_ids": ["test_inline_functions", "test_inline_ignores_curries_and_partials"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_functions():\n x, y, i, d = \"xyid\"\n dsk = {\"out\": (add, i, d), i: (inc, x), d: (double, y), x: 1, y: 1}\n\n result = inline_functions(dsk, [], fast_functions={inc})\n expected = {\"out\": (add, (inc, x), d), d: (double, y), x: 1, y: 1}\n assert result == expected\n\n\ndef test_inline_ignores_curries_and_partials():\n dsk = {\"x\": 1, \"y\": 2, \"a\": (partial(add, 1), \"x\"), \"b\": (inc, \"a\")}\n\n result = inline_functions(dsk, [], fast_functions={add})\n assert result[\"b\"] == (inc, dsk[\"a\"])\n assert \"a\" not in result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_non_hashable_test_inline_functions_non_hashable.assert_b_not_in_result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_non_hashable_test_inline_functions_non_hashable.assert_b_not_in_result", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 259, "end_line": 273, "span_ids": ["test_inline_functions_non_hashable"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_functions_non_hashable():\n class NonHashableCallable:\n def __call__(self, a):\n return a + 1\n\n def __hash__(self):\n raise TypeError(\"Not hashable\")\n\n nohash = NonHashableCallable()\n\n dsk = {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (nohash, \"b\"), \"d\": (inc, \"c\")}\n\n result = inline_functions(dsk, [], fast_functions={inc})\n assert result[\"c\"] == (nohash, dsk[\"b\"])\n assert \"b\" not in result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_doesnt_shrink_fast_functions_at_top_test_inline_traverses_lists.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_doesnt_shrink_fast_functions_at_top_test_inline_traverses_lists.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 297, "end_line": 308, "span_ids": ["test_inline_traverses_lists", "test_inline_doesnt_shrink_fast_functions_at_top"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_doesnt_shrink_fast_functions_at_top():\n dsk = {\"x\": (inc, \"y\"), \"y\": 1}\n result = inline_functions(dsk, [], fast_functions={inc})\n assert result == dsk\n\n\ndef test_inline_traverses_lists():\n x, y, i, d = \"xyid\"\n dsk = {\"out\": (sum, [i, d]), i: (inc, x), d: (double, y), x: 1, y: 1}\n expected = {\"out\": (sum, [(inc, x), d]), d: (double, y), x: 1, y: 1}\n result = inline_functions(dsk, [], fast_functions={inc})\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_protects_output_keys_test_inline_cull_dependencies.inline_d2_b_depende": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_inline_functions_protects_output_keys_test_inline_cull_dependencies.inline_d2_b_depende", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 311, "end_line": 333, "span_ids": ["test_functions_of", "test_inline_functions_protects_output_keys", "test_inline_cull_dependencies"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_inline_functions_protects_output_keys():\n dsk = {\"x\": (inc, 1), \"y\": (double, \"x\")}\n assert inline_functions(dsk, [], [inc]) == {\"y\": (double, (inc, 1))}\n assert inline_functions(dsk, [\"x\"], [inc]) == {\"y\": (double, \"x\"), \"x\": (inc, 1)}\n\n\ndef test_functions_of():\n a = lambda x: x\n b = lambda x: x\n assert functions_of((a, 1)) == {a}\n assert functions_of((a, (b, 1))) == {a, b}\n assert functions_of((a, [(b, 1)])) == {a, b}\n assert functions_of((a, [[[(b, 1)]]])) == {a, b}\n assert functions_of(1) == set()\n assert functions_of(a) == set()\n assert functions_of((a,)) == {a}\n\n\ndef test_inline_cull_dependencies():\n d = {\"a\": 1, \"b\": \"a\", \"c\": \"b\", \"d\": [\"a\", \"b\", \"c\"], \"e\": (add, (len, \"d\"), \"a\")}\n\n d2, dependencies = cull(d, [\"d\", \"e\"])\n inline(d2, {\"b\"}, dependencies=dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input_test_fuse_reductions_single_input.d_3._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input_test_fuse_reductions_single_input.d_3._", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 315, "end_line": 367, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 771}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n def f(*args):\n return args\n\n d = {\"a\": 1, \"b1\": (f, \"a\"), \"b2\": (f, \"a\", \"a\"), \"c\": (f, \"b1\", \"b2\")}\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"c\": (f, (f, \"a\"), (f, \"a\", \"a\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\"a\": 1, \"b1-b2-c\": (f, (f, \"a\"), (f, \"a\", \"a\")), \"c\": \"b1-b2-c\"}\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\", \"a\"),\n \"b3\": (f, \"a\", \"a\", \"a\"),\n \"c\": (f, \"b1\", \"b2\", \"b3\"),\n }\n assert fuse(d, ave_width=2.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=2.9, rename_keys=True) == with_deps(d)\n assert fuse(d, ave_width=3, rename_keys=False) == with_deps(\n {\"a\": 1, \"c\": (f, (f, \"a\"), (f, \"a\", \"a\"), (f, \"a\", \"a\", \"a\"))}\n )\n assert fuse(d, ave_width=3, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-c\": (f, (f, \"a\"), (f, \"a\", \"a\"), (f, \"a\", \"a\", \"a\")),\n \"c\": \"b1-b2-b3-c\",\n }\n )\n\n d = {\"a\": 1, \"b1\": (f, \"a\"), \"b2\": (f, \"a\"), \"c\": (f, \"a\", \"b1\", \"b2\")}\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"c\": (f, \"a\", (f, \"a\"), (f, \"a\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\"a\": 1, \"b1-b2-c\": (f, \"a\", (f, \"a\"), (f, \"a\")), \"c\": \"b1-b2-c\"}\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"c\": (f, \"b1\", \"b2\"),\n \"d1\": (f, \"c\"),\n \"d2\": (f, \"c\"),\n \"e\": (f, \"d1\", \"d2\"),\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_12_test_fuse_reductions_single_input.None_22": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_12_test_fuse_reductions_single_input.None_22", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 368, "end_line": 419, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 690}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"c\": (f, (f, \"a\"), (f, \"a\")), \"e\": (f, (f, \"c\"), (f, \"c\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-c\": (f, (f, \"a\"), (f, \"a\")),\n \"d1-d2-e\": (f, (f, \"c\"), (f, \"c\")),\n \"c\": \"b1-b2-c\",\n \"e\": \"d1-d2-e\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"b3\": (f, \"a\"),\n \"b4\": (f, \"a\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b3\", \"b4\"),\n \"d\": (f, \"c1\", \"c2\"),\n }\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n expected = with_deps(\n {\n \"a\": 1,\n \"c1\": (f, (f, \"a\"), (f, \"a\")),\n \"c2\": (f, (f, \"a\"), (f, \"a\")),\n \"d\": (f, \"c1\", \"c2\"),\n }\n )\n assert fuse(d, ave_width=2, rename_keys=False) == expected\n assert fuse(d, ave_width=2.9, rename_keys=False) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1\": (f, (f, \"a\"), (f, \"a\")),\n \"b3-b4-c2\": (f, (f, \"a\"), (f, \"a\")),\n \"d\": (f, \"c1\", \"c2\"),\n \"c1\": \"b1-b2-c1\",\n \"c2\": \"b3-b4-c2\",\n }\n )\n assert fuse(d, ave_width=2, rename_keys=True) == expected\n assert fuse(d, ave_width=2.9, rename_keys=True) == expected\n assert fuse(d, ave_width=3, rename_keys=False) == with_deps(\n {\"a\": 1, \"d\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\")))}\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_23_test_fuse_reductions_single_input.None_27": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_23_test_fuse_reductions_single_input.None_27", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 420, "end_line": 465, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 561}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=3, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-c1-c2-d\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"d\": \"b1-b2-b3-b4-c1-c2-d\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"b3\": (f, \"a\"),\n \"b4\": (f, \"a\"),\n \"b5\": (f, \"a\"),\n \"b6\": (f, \"a\"),\n \"b7\": (f, \"a\"),\n \"b8\": (f, \"a\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b3\", \"b4\"),\n \"c3\": (f, \"b5\", \"b6\"),\n \"c4\": (f, \"b7\", \"b8\"),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"e\": (f, \"d1\", \"d2\"),\n }\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n expected = with_deps(\n {\n \"a\": 1,\n \"c1\": (f, (f, \"a\"), (f, \"a\")),\n \"c2\": (f, (f, \"a\"), (f, \"a\")),\n \"c3\": (f, (f, \"a\"), (f, \"a\")),\n \"c4\": (f, (f, \"a\"), (f, \"a\")),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"e\": (f, \"d1\", \"d2\"),\n }\n )\n assert fuse(d, ave_width=2, rename_keys=False) == expected\n assert fuse(d, ave_width=2.9, rename_keys=False) == expected\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_9_test_fuse_reductions_single_input.None_34": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_9_test_fuse_reductions_single_input.None_34", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 466, "end_line": 523, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 754}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1\": (f, (f, \"a\"), (f, \"a\")),\n \"b3-b4-c2\": (f, (f, \"a\"), (f, \"a\")),\n \"b5-b6-c3\": (f, (f, \"a\"), (f, \"a\")),\n \"b7-b8-c4\": (f, (f, \"a\"), (f, \"a\")),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"e\": (f, \"d1\", \"d2\"),\n \"c1\": \"b1-b2-c1\",\n \"c2\": \"b3-b4-c2\",\n \"c3\": \"b5-b6-c3\",\n \"c4\": \"b7-b8-c4\",\n }\n )\n assert fuse(d, ave_width=2, rename_keys=True) == expected\n assert fuse(d, ave_width=2.9, rename_keys=True) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"d1\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"d2\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"e\": (f, \"d1\", \"d2\"),\n }\n )\n assert fuse(d, ave_width=3, rename_keys=False) == expected\n assert fuse(d, ave_width=4.6, rename_keys=False) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-c1-c2-d1\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"b5-b6-b7-b8-c3-c4-d2\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"e\": (f, \"d1\", \"d2\"),\n \"d1\": \"b1-b2-b3-b4-c1-c2-d1\",\n \"d2\": \"b5-b6-b7-b8-c3-c4-d2\",\n }\n )\n assert fuse(d, ave_width=3, rename_keys=True) == expected\n assert fuse(d, ave_width=4.6, rename_keys=True) == expected\n assert fuse(d, ave_width=4.7, rename_keys=False) == with_deps(\n {\n \"a\": 1,\n \"e\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n }\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_35_test_fuse_reductions_single_input.None_37": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_35_test_fuse_reductions_single_input.None_37", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 524, "end_line": 571, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 667}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=4.7, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-b5-b6-b7-b8-c1-c2-c3-c4-d1-d2-e\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"e\": \"b1-b2-b3-b4-b5-b6-b7-b8-c1-c2-c3-c4-d1-d2-e\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"b3\": (f, \"a\"),\n \"b4\": (f, \"a\"),\n \"b5\": (f, \"a\"),\n \"b6\": (f, \"a\"),\n \"b7\": (f, \"a\"),\n \"b8\": (f, \"a\"),\n \"b9\": (f, \"a\"),\n \"b10\": (f, \"a\"),\n \"b11\": (f, \"a\"),\n \"b12\": (f, \"a\"),\n \"b13\": (f, \"a\"),\n \"b14\": (f, \"a\"),\n \"b15\": (f, \"a\"),\n \"b16\": (f, \"a\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b3\", \"b4\"),\n \"c3\": (f, \"b5\", \"b6\"),\n \"c4\": (f, \"b7\", \"b8\"),\n \"c5\": (f, \"b9\", \"b10\"),\n \"c6\": (f, \"b11\", \"b12\"),\n \"c7\": (f, \"b13\", \"b14\"),\n \"c8\": (f, \"b15\", \"b16\"),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"d3\": (f, \"c5\", \"c6\"),\n \"d4\": (f, \"c7\", \"c8\"),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n }\n assert fuse(d, ave_width=1.9, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1.9, rename_keys=True) == with_deps(d)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_13_test_fuse_reductions_single_input.None_39": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_13_test_fuse_reductions_single_input.None_39", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 572, "end_line": 593, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"c1\": (f, (f, \"a\"), (f, \"a\")),\n \"c2\": (f, (f, \"a\"), (f, \"a\")),\n \"c3\": (f, (f, \"a\"), (f, \"a\")),\n \"c4\": (f, (f, \"a\"), (f, \"a\")),\n \"c5\": (f, (f, \"a\"), (f, \"a\")),\n \"c6\": (f, (f, \"a\"), (f, \"a\")),\n \"c7\": (f, (f, \"a\"), (f, \"a\")),\n \"c8\": (f, (f, \"a\"), (f, \"a\")),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"d3\": (f, \"c5\", \"c6\"),\n \"d4\": (f, \"c7\", \"c8\"),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n }\n )\n assert fuse(d, ave_width=2, rename_keys=False) == expected\n assert fuse(d, ave_width=2.9, rename_keys=False) == expected\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_14_test_fuse_reductions_single_input.None_43": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_14_test_fuse_reductions_single_input.None_43", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 594, "end_line": 637, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 741}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1\": (f, (f, \"a\"), (f, \"a\")),\n \"b3-b4-c2\": (f, (f, \"a\"), (f, \"a\")),\n \"b5-b6-c3\": (f, (f, \"a\"), (f, \"a\")),\n \"b7-b8-c4\": (f, (f, \"a\"), (f, \"a\")),\n \"b10-b9-c5\": (f, (f, \"a\"), (f, \"a\")),\n \"b11-b12-c6\": (f, (f, \"a\"), (f, \"a\")),\n \"b13-b14-c7\": (f, (f, \"a\"), (f, \"a\")),\n \"b15-b16-c8\": (f, (f, \"a\"), (f, \"a\")),\n \"d1\": (f, \"c1\", \"c2\"),\n \"d2\": (f, \"c3\", \"c4\"),\n \"d3\": (f, \"c5\", \"c6\"),\n \"d4\": (f, \"c7\", \"c8\"),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n \"c1\": \"b1-b2-c1\",\n \"c2\": \"b3-b4-c2\",\n \"c3\": \"b5-b6-c3\",\n \"c4\": \"b7-b8-c4\",\n \"c5\": \"b10-b9-c5\",\n \"c6\": \"b11-b12-c6\",\n \"c7\": \"b13-b14-c7\",\n \"c8\": \"b15-b16-c8\",\n }\n )\n assert fuse(d, ave_width=2, rename_keys=True) == expected\n assert fuse(d, ave_width=2.9, rename_keys=True) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"d1\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"d2\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"d3\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"d4\": (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n }\n )\n assert fuse(d, ave_width=3, rename_keys=False) == expected\n assert fuse(d, ave_width=4.6, rename_keys=False) == expected\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_16_test_fuse_reductions_single_input.None_47": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_16_test_fuse_reductions_single_input.None_47", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 638, "end_line": 689, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 661}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-c1-c2-d1\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"b5-b6-b7-b8-c3-c4-d2\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"b10-b11-b12-b9-c5-c6-d3\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"b13-b14-b15-b16-c7-c8-d4\": (\n f,\n (f, (f, \"a\"), (f, \"a\")),\n (f, (f, \"a\"), (f, \"a\")),\n ),\n \"e1\": (f, \"d1\", \"d2\"),\n \"e2\": (f, \"d3\", \"d4\"),\n \"f\": (f, \"e1\", \"e2\"),\n \"d1\": \"b1-b2-b3-b4-c1-c2-d1\",\n \"d2\": \"b5-b6-b7-b8-c3-c4-d2\",\n \"d3\": \"b10-b11-b12-b9-c5-c6-d3\",\n \"d4\": \"b13-b14-b15-b16-c7-c8-d4\",\n }\n )\n assert fuse(d, ave_width=3, rename_keys=True) == expected\n assert fuse(d, ave_width=4.6, rename_keys=True) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"e1\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"e2\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"f\": (f, \"e1\", \"e2\"),\n }\n )\n assert fuse(d, ave_width=4.7, rename_keys=False) == expected\n assert fuse(d, ave_width=7.4, rename_keys=False) == expected\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_18_test_fuse_reductions_single_input.None_50": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.expected_18_test_fuse_reductions_single_input.None_50", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 690, "end_line": 727, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 582}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n expected = with_deps(\n {\n \"a\": 1,\n \"b1-b2-b3-b4-b5-b6-b7-b8-c1-c2-c3-c4-d1-d2-e1\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"b10-b11-b12-b13-b14-b15-b16-b9-c5-c6-c7-c8-d3-d4-e2\": (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n \"f\": (f, \"e1\", \"e2\"),\n \"e1\": \"b1-b2-b3-b4-b5-b6-b7-b8-c1-c2-c3-c4-d1-d2-e1\",\n \"e2\": \"b10-b11-b12-b13-b14-b15-b16-b9-c5-c6-c7-c8-d3-d4-e2\",\n }\n )\n assert fuse(d, ave_width=4.7, rename_keys=True) == expected\n assert fuse(d, ave_width=7.4, rename_keys=True) == expected\n assert fuse(d, ave_width=7.5, rename_keys=False) == with_deps(\n {\n \"a\": 1,\n \"f\": (\n f,\n (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n ),\n }\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_51_test_fuse_reductions_single_input.None_57": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_51_test_fuse_reductions_single_input.None_57", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 728, "end_line": 766, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 694}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=7.5, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b10-b11-b12-b13-b14-b15-b16-b2-b3-b4-b5-b6-b7-b8-b9-c1-c2-c3-c4-c5-c6-c7-c8-d1-d2-d3-d4-e1-e2-f\": (\n f,\n (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n (\n f,\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n (f, (f, (f, \"a\"), (f, \"a\")), (f, (f, \"a\"), (f, \"a\"))),\n ),\n ),\n \"f\": \"b1-b10-b11-b12-b13-b14-b15-b16-b2-b3-b4-b5-b6-b7-b8-b9-c1-c2-c3-c4-c5-c6-c7-c8-d1-d2-d3-d4-e1-e2-f\",\n }\n )\n\n d = {\"a\": 1, \"b\": (f, \"a\")}\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps({\"b\": (f, 1)})\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\"a-b\": (f, 1), \"b\": \"a-b\"}\n )\n\n d = {\"a\": 1, \"b\": (f, \"a\"), \"c\": (f, \"b\"), \"d\": (f, \"c\")}\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps({\"d\": (f, (f, (f, 1)))})\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\"a-b-c-d\": (f, (f, (f, 1))), \"d\": \"a-b-c-d\"}\n )\n\n d = {\"a\": 1, \"b\": (f, \"a\"), \"c\": (f, \"a\", \"b\"), \"d\": (f, \"a\", \"c\")}\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(\n {\"a\": 1, \"d\": (f, \"a\", (f, \"a\", (f, \"a\")))}\n )\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\"a\": 1, \"b-c-d\": (f, \"a\", (f, \"a\", (f, \"a\"))), \"d\": \"b-c-d\"}\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.d_22_test_fuse_reductions_single_input.expected_27.with_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.d_22_test_fuse_reductions_single_input.expected_27.with_deps_", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 768, "end_line": 831, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 779}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"c1\": (f, \"b1\"),\n \"d1\": (f, \"c1\"),\n \"e1\": (f, \"d1\"),\n \"f\": (f, \"e1\", \"b2\"),\n }\n expected = with_deps(\n {\"a\": 1, \"b2\": (f, \"a\"), \"e1\": (f, (f, (f, (f, \"a\")))), \"f\": (f, \"e1\", \"b2\")}\n )\n assert fuse(d, ave_width=1, rename_keys=False) == expected\n assert fuse(d, ave_width=1.9, rename_keys=False) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"b2\": (f, \"a\"),\n \"b1-c1-d1-e1\": (f, (f, (f, (f, \"a\")))),\n \"f\": (f, \"e1\", \"b2\"),\n \"e1\": \"b1-c1-d1-e1\",\n }\n )\n assert fuse(d, ave_width=1, rename_keys=True) == expected\n assert fuse(d, ave_width=1.9, rename_keys=True) == expected\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"f\": (f, (f, (f, (f, (f, \"a\")))), (f, \"a\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1-d1-e1-f\": (f, (f, (f, (f, (f, \"a\")))), (f, \"a\")),\n \"f\": \"b1-b2-c1-d1-e1-f\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"c1\": (f, \"a\", \"b1\"),\n \"d1\": (f, \"a\", \"c1\"),\n \"e1\": (f, \"a\", \"d1\"),\n \"f\": (f, \"a\", \"e1\", \"b2\"),\n }\n expected = with_deps(\n {\n \"a\": 1,\n \"b2\": (f, \"a\"),\n \"e1\": (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\")))),\n \"f\": (f, \"a\", \"e1\", \"b2\"),\n }\n )\n assert fuse(d, ave_width=1, rename_keys=False) == expected\n assert fuse(d, ave_width=1.9, rename_keys=False) == expected\n expected = with_deps(\n {\n \"a\": 1,\n \"b2\": (f, \"a\"),\n \"b1-c1-d1-e1\": (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\")))),\n \"f\": (f, \"a\", \"e1\", \"b2\"),\n \"e1\": \"b1-c1-d1-e1\",\n }\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_66_test_fuse_reductions_single_input.d_29._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_66_test_fuse_reductions_single_input.d_29._", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 832, "end_line": 896, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 750}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=1, rename_keys=True) == expected\n assert fuse(d, ave_width=1.9, rename_keys=True) == expected\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\"a\": 1, \"f\": (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\")))), (f, \"a\"))}\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-b2-c1-d1-e1-f\": (\n f,\n \"a\",\n (f, \"a\", (f, \"a\", (f, \"a\", (f, \"a\")))),\n (f, \"a\"),\n ),\n \"f\": \"b1-b2-c1-d1-e1-f\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b1\": (f, \"a\"),\n \"b2\": (f, \"a\"),\n \"b3\": (f, \"a\"),\n \"c1\": (f, \"b1\"),\n \"c2\": (f, \"b2\"),\n \"c3\": (f, \"b3\"),\n \"d1\": (f, \"c1\"),\n \"d2\": (f, \"c2\"),\n \"d3\": (f, \"c3\"),\n \"e\": (f, \"d1\", \"d2\", \"d3\"),\n \"f\": (f, \"e\"),\n \"g\": (f, \"f\"),\n }\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(\n {\n \"a\": 1,\n \"d1\": (f, (f, (f, \"a\"))),\n \"d2\": (f, (f, (f, \"a\"))),\n \"d3\": (f, (f, (f, \"a\"))),\n \"g\": (f, (f, (f, \"d1\", \"d2\", \"d3\"))),\n }\n )\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\n \"a\": 1,\n \"b1-c1-d1\": (f, (f, (f, \"a\"))),\n \"b2-c2-d2\": (f, (f, (f, \"a\"))),\n \"b3-c3-d3\": (f, (f, (f, \"a\"))),\n \"e-f-g\": (f, (f, (f, \"d1\", \"d2\", \"d3\"))),\n \"d1\": \"b1-c1-d1\",\n \"d2\": \"b2-c2-d2\",\n \"d3\": \"b3-c3-d3\",\n \"g\": \"e-f-g\",\n }\n )\n\n d = {\n \"a\": 1,\n \"b\": (f, \"a\"),\n \"c\": (f, \"b\"),\n \"d\": (f, \"b\", \"c\"),\n \"e\": (f, \"d\"),\n \"f\": (f, \"e\"),\n \"g\": (f, \"d\", \"f\"),\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_72_test_fuse_reductions_single_input.None_73": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_single_input.None_72_test_fuse_reductions_single_input.None_73", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 897, "end_line": 909, "span_ids": ["test_fuse_reductions_single_input"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_single_input():\n # ... other code\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(\n {\"b\": (f, 1), \"d\": (f, \"b\", (f, \"b\")), \"g\": (f, \"d\", (f, (f, \"d\")))}\n )\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\n \"a-b\": (f, 1),\n \"c-d\": (f, \"b\", (f, \"b\")),\n \"e-f-g\": (f, \"d\", (f, (f, \"d\"))),\n \"b\": \"a-b\",\n \"d\": \"c-d\",\n \"g\": \"e-f-g\",\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed_test_fuse_stressed.d._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed_test_fuse_stressed.d._", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 912, "end_line": 973, "span_ids": ["test_fuse_stressed"], "tokens": 1058}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_stressed():\n def f(*args):\n return args\n\n d = {\n \"array-original-27b9f9d257a80fa6adae06a98faf71eb\": 1,\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0): (\n f,\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0),\n ),\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0): (\n f,\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1),\n ),\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 0, 0): (\n f,\n \"array-original-27b9f9d257a80fa6adae06a98faf71eb\",\n (slice(0, 10, None), slice(0, 10, None)),\n ),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0): (\n \"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\",\n 0,\n 1,\n ),\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 1): (\n f,\n (\n f,\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 1, 1),\n (f, [(\"cholesky-lt-dot-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0, 1, 0)]),\n ),\n ),\n (\"cholesky-lt-dot-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0, 1, 0): (\n f,\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1),\n ),\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 0, 1): (\n f,\n \"array-original-27b9f9d257a80fa6adae06a98faf71eb\",\n (slice(0, 10, None), slice(10, 20, None)),\n ),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 1): (\n f,\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 1),\n ),\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1): (f, (10, 10)),\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 1, 1): (\n f,\n \"array-original-27b9f9d257a80fa6adae06a98faf71eb\",\n (slice(10, 20, None), slice(10, 20, None)),\n ),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1): (\n f,\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0),\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 0, 1),\n ),\n (\"cholesky-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0): (\n f,\n (\"array-27b9f9d257a80fa6adae06a98faf71eb\", 0, 0),\n ),\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed.keys_test_fuse_stressed.assert_rv_with_deps_rv": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_stressed.keys_test_fuse_stressed.assert_rv_with_deps_rv", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 974, "end_line": 981, "span_ids": ["test_fuse_stressed"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_stressed():\n # ... other code\n keys = {\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 0),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 0, 1),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 0),\n (\"cholesky-upper-26a6b670a8aabb7e2f8936db7ccb6a88\", 1, 1),\n }\n rv = fuse(d, keys=keys, ave_width=2, rename_keys=True)\n assert rv == with_deps(rv[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input_test_fuse_reductions_multiple_input.None_11": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input_test_fuse_reductions_multiple_input.None_11", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 984, "end_line": 1035, "span_ids": ["test_fuse_reductions_multiple_input"], "tokens": 713}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_multiple_input():\n def f(*args):\n return args\n\n d = {\"a1\": 1, \"a2\": 2, \"b\": (f, \"a1\", \"a2\"), \"c\": (f, \"b\")}\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps({\"c\": (f, (f, 1, 2))})\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\"a1-a2-b-c\": (f, (f, 1, 2)), \"c\": \"a1-a2-b-c\"}\n )\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(\n {\"a1\": 1, \"a2\": 2, \"c\": (f, (f, \"a1\", \"a2\"))}\n )\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(\n {\"a1\": 1, \"a2\": 2, \"b-c\": (f, (f, \"a1\", \"a2\")), \"c\": \"b-c\"}\n )\n\n d = {\n \"a1\": 1,\n \"a2\": 2,\n \"b1\": (f, \"a1\"),\n \"b2\": (f, \"a1\", \"a2\"),\n \"b3\": (f, \"a2\"),\n \"c\": (f, \"b1\", \"b2\", \"b3\"),\n }\n expected = with_deps(d)\n assert fuse(d, ave_width=1, rename_keys=False) == expected\n assert fuse(d, ave_width=2.9, rename_keys=False) == expected\n assert fuse(d, ave_width=1, rename_keys=True) == expected\n assert fuse(d, ave_width=2.9, rename_keys=True) == expected\n assert fuse(d, ave_width=3, rename_keys=False) == with_deps(\n {\"a1\": 1, \"a2\": 2, \"c\": (f, (f, \"a1\"), (f, \"a1\", \"a2\"), (f, \"a2\"))}\n )\n assert fuse(d, ave_width=3, rename_keys=True) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b1-b2-b3-c\": (f, (f, \"a1\"), (f, \"a1\", \"a2\"), (f, \"a2\")),\n \"c\": \"b1-b2-b3-c\",\n }\n )\n\n d = {\n \"a1\": 1,\n \"a2\": 2,\n \"b1\": (f, \"a1\"),\n \"b2\": (f, \"a1\", \"a2\"),\n \"b3\": (f, \"a2\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b2\", \"b3\"),\n }\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(d)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input.None_12_test_fuse_reductions_multiple_input.None_17": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_reductions_multiple_input.None_12_test_fuse_reductions_multiple_input.None_17", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1036, "end_line": 1088, "span_ids": ["test_fuse_reductions_multiple_input"], "tokens": 619}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_reductions_multiple_input():\n # ... other code\n assert fuse(d, ave_width=2, rename_keys=False) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b2\": (f, \"a1\", \"a2\"),\n \"c1\": (f, (f, \"a1\"), \"b2\"),\n \"c2\": (f, \"b2\", (f, \"a2\")),\n }\n )\n assert fuse(d, ave_width=2, rename_keys=True) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b2\": (f, \"a1\", \"a2\"),\n \"b1-c1\": (f, (f, \"a1\"), \"b2\"),\n \"b3-c2\": (f, \"b2\", (f, \"a2\")),\n \"c1\": \"b1-c1\",\n \"c2\": \"b3-c2\",\n }\n )\n\n d = {\n \"a1\": 1,\n \"a2\": 2,\n \"b1\": (f, \"a1\"),\n \"b2\": (f, \"a1\", \"a2\"),\n \"b3\": (f, \"a2\"),\n \"c1\": (f, \"b1\", \"b2\"),\n \"c2\": (f, \"b2\", \"b3\"),\n \"d\": (f, \"c1\", \"c2\"),\n }\n assert fuse(d, ave_width=1, rename_keys=False) == with_deps(d)\n assert fuse(d, ave_width=1, rename_keys=True) == with_deps(d)\n\n # A more aggressive heuristic could do this at `ave_width=2`. Perhaps\n # we can improve this. Nevertheless, this is behaving as intended.\n assert fuse(d, ave_width=3, rename_keys=False) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b2\": (f, \"a1\", \"a2\"),\n \"d\": (f, (f, (f, \"a1\"), \"b2\"), (f, \"b2\", (f, \"a2\"))),\n }\n )\n assert fuse(d, ave_width=3, rename_keys=True) == with_deps(\n {\n \"a1\": 1,\n \"a2\": 2,\n \"b2\": (f, \"a1\", \"a2\"),\n \"b1-b3-c1-c2-d\": (f, (f, (f, \"a1\"), \"b2\"), (f, \"b2\", (f, \"a2\"))),\n \"d\": \"b1-b3-c1-c2-d\",\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_func_with_kwargs_test_SubgraphCallable.assert_f2_1_2_f_1_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_func_with_kwargs_test_SubgraphCallable.assert_f2_1_2_f_1_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1112, "end_line": 1160, "span_ids": ["test_SubgraphCallable", "func_with_kwargs"], "tokens": 471}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def func_with_kwargs(a, b, c=2):\n return a + b + c\n\n\ndef test_SubgraphCallable():\n non_hashable = [1, 2, 3]\n\n dsk = {\n \"a\": (apply, add, [\"in1\", 2]),\n \"b\": (\n apply,\n partial_by_order,\n [\"in2\"],\n {\"function\": func_with_kwargs, \"other\": [(1, 20)], \"c\": 4},\n ),\n \"c\": (\n apply,\n partial_by_order,\n [\"in2\", \"in1\"],\n {\"function\": func_with_kwargs, \"other\": [(1, 20)]},\n ),\n \"d\": (inc, \"a\"),\n \"e\": (add, \"c\", \"d\"),\n \"f\": [\"a\", 2, \"b\", (add, \"b\", (sum, non_hashable))],\n \"h\": (add, (sum, \"f\"), (sum, [\"a\", \"b\"])),\n }\n\n f = SubgraphCallable(dsk, \"h\", [\"in1\", \"in2\"], name=\"test\")\n\n assert f.name == \"test\"\n assert repr(f) == \"test\"\n\n f2 = SubgraphCallable(dsk, \"h\", [\"in1\", \"in2\"], name=\"test\")\n assert f == f2\n\n f3 = SubgraphCallable(dsk, \"g\", [\"in1\", \"in2\"], name=\"test\")\n assert f != f3\n\n assert hash(SubgraphCallable(None, None, [None]))\n assert hash(f3) != hash(f2)\n dsk2 = dsk.copy()\n dsk2.update({\"in1\": 1, \"in2\": 2})\n assert f(1, 2) == get_sync(cull(dsk2, [\"h\"])[0], [\"h\"])[0]\n assert f(1, 2) == f(1, 2)\n\n f2 = pickle.loads(pickle.dumps(f))\n assert f2 == f\n assert hash(f2) == hash(f)\n assert f2(1, 2) == f(1, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_test_fuse_subgraphs.sols._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_test_fuse_subgraphs.sols._", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1209, "end_line": 1281, "span_ids": ["test_fuse_subgraphs"], "tokens": 667}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_subgraphs(compare_subgraph_callables):\n dsk = {\n \"x-1\": 1,\n \"inc-1\": (inc, \"x-1\"),\n \"inc-2\": (inc, \"inc-1\"),\n \"add-1\": (add, \"x-1\", \"inc-2\"),\n \"inc-3\": (inc, \"add-1\"),\n \"inc-4\": (inc, \"inc-3\"),\n \"add-2\": (add, \"add-1\", \"inc-4\"),\n \"inc-5\": (inc, \"add-2\"),\n \"inc-6\": (inc, \"inc-5\"),\n }\n\n res = fuse(dsk, \"inc-6\", fuse_subgraphs=True)\n sol = with_deps(\n {\n \"inc-6\": \"add-inc-x-1\",\n \"add-inc-x-1\": (\n SubgraphCallable(\n {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", (inc, (inc, \"x-1\"))),\n \"inc-6\": (inc, (inc, (add, \"add-1\", (inc, (inc, \"add-1\"))))),\n },\n \"inc-6\",\n (),\n ),\n ),\n }\n )\n assert res == sol\n\n res = fuse(dsk, \"inc-6\", fuse_subgraphs=True, rename_keys=False)\n sol = with_deps(\n {\n \"inc-6\": (\n SubgraphCallable(\n {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", (inc, (inc, \"x-1\"))),\n \"inc-6\": (inc, (inc, (add, \"add-1\", (inc, (inc, \"add-1\"))))),\n },\n \"inc-6\",\n (),\n ),\n )\n }\n )\n assert res == sol\n\n res = fuse(dsk, \"add-2\", fuse_subgraphs=True)\n sol = with_deps(\n {\n \"add-inc-x-1\": (\n SubgraphCallable(\n {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", (inc, (inc, \"x-1\"))),\n \"add-2\": (add, \"add-1\", (inc, (inc, \"add-1\"))),\n },\n \"add-2\",\n (),\n ),\n ),\n \"add-2\": \"add-inc-x-1\",\n \"inc-6\": (inc, (inc, \"add-2\")),\n }\n )\n assert res == sol\n\n res = fuse(dsk, \"inc-2\", fuse_subgraphs=True)\n # ordering of arguments is unstable, check all permutations\n sols = []\n # ordering of arguments is unstable, check all permutations\n # ... other code\n for inkeys in itertools.permutations((\"x-1\", \"inc-2\")):\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs.for_inkeys_in_itertools_p_test_fuse_subgraphs.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs.for_inkeys_in_itertools_p_test_fuse_subgraphs.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1282, "end_line": 1333, "span_ids": ["test_fuse_subgraphs"], "tokens": 419}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_subgraphs(compare_subgraph_callables):\n # ... other code\n assert res == sol\n # ... other code\n for inkeys in itertools.permutations((\"x-1\", \"inc-2\")):\n sols.append(\n with_deps(\n {\n \"x-1\": 1,\n \"inc-2\": (inc, (inc, \"x-1\")),\n \"inc-6\": \"inc-add-1\",\n \"inc-add-1\": (\n SubgraphCallable(\n {\n \"add-1\": (add, \"x-1\", \"inc-2\"),\n \"inc-6\": (\n inc,\n (inc, (add, \"add-1\", (inc, (inc, \"add-1\")))),\n ),\n },\n \"inc-6\",\n inkeys,\n ),\n )\n + inkeys,\n }\n )\n )\n assert res in sols\n\n res = fuse(dsk, [\"inc-2\", \"add-2\"], fuse_subgraphs=True)\n # ordering of arguments is unstable, check all permutations\n sols = []\n for inkeys in itertools.permutations((\"x-1\", \"inc-2\")):\n sols.append(\n with_deps(\n {\n \"x-1\": 1,\n \"inc-2\": (inc, (inc, \"x-1\")),\n \"inc-add-1\": (\n SubgraphCallable(\n {\n \"add-1\": (add, \"x-1\", \"inc-2\"),\n \"add-2\": (add, \"add-1\", (inc, (inc, \"add-1\"))),\n },\n \"add-2\",\n inkeys,\n ),\n )\n + inkeys,\n \"add-2\": \"inc-add-1\",\n \"inc-6\": (inc, (inc, \"add-2\")),\n }\n )\n )\n assert res in sols", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_linear_chains_of_duplicate_deps_test_fuse_subgraphs_linear_chains_of_duplicate_deps.assert_res_sol": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fuse_subgraphs_linear_chains_of_duplicate_deps_test_fuse_subgraphs_linear_chains_of_duplicate_deps.assert_res_sol", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1336, "end_line": 1366, "span_ids": ["test_fuse_subgraphs_linear_chains_of_duplicate_deps"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_subgraphs_linear_chains_of_duplicate_deps(compare_subgraph_callables):\n dsk = {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", \"x-1\"),\n \"add-2\": (add, \"add-1\", \"add-1\"),\n \"add-3\": (add, \"add-2\", \"add-2\"),\n \"add-4\": (add, \"add-3\", \"add-3\"),\n \"add-5\": (add, \"add-4\", \"add-4\"),\n }\n\n res = fuse(dsk, \"add-5\", fuse_subgraphs=True)\n sol = with_deps(\n {\n \"add-x-1\": (\n SubgraphCallable(\n {\n \"x-1\": 1,\n \"add-1\": (add, \"x-1\", \"x-1\"),\n \"add-2\": (add, \"add-1\", \"add-1\"),\n \"add-3\": (add, \"add-2\", \"add-2\"),\n \"add-4\": (add, \"add-3\", \"add-3\"),\n \"add-5\": (add, \"add-4\", \"add-4\"),\n },\n \"add-5\",\n (),\n ),\n ),\n \"add-5\": \"add-x-1\",\n }\n )\n assert res == sol", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_dont_fuse_numpy_arrays_test_fuse_config.with_dask_config_set_op.assert_fuse_d_b_depen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_dont_fuse_numpy_arrays_test_fuse_config.with_dask_config_set_op.assert_fuse_d_b_depen", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1291, "end_line": 1310, "span_ids": ["test_dont_fuse_numpy_arrays", "test_fuse_config"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_fuse_numpy_arrays():\n \"\"\"\n Some types should stay in the graph bare\n\n This helps with things like serialization\n \"\"\"\n np = pytest.importorskip(\"numpy\")\n dsk = {\"x\": np.arange(5), \"y\": (inc, \"x\")}\n\n assert fuse(dsk, \"y\")[0] == dsk\n\n\ndef test_fuse_config():\n with dask.config.set({\"optimization.fuse.active\": False}):\n d = {\n \"a\": 1,\n \"b\": (inc, \"a\"),\n }\n dependencies = {\"b\": (\"a\",)}\n assert fuse(d, \"b\", dependencies=dependencies) == (d, dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fused_keys_max_length_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_fused_keys_max_length_", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1313, "end_line": 1347, "span_ids": ["test_fused_keys_max_length"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fused_keys_max_length(): # generic fix for gh-5999\n d = {\n \"u-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"v-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"v-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"w-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"w-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"x-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"x-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"y-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"y-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n inc,\n \"z-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\",\n ),\n \"z-looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong\": (\n add,\n \"a\",\n \"b\",\n ),\n \"a\": 1,\n \"b\": 2,\n }\n\n fused, deps = fuse(d, rename_keys=True)\n for key in fused:\n assert len(key) < 150", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_pytest_test_ordering_keeps_groups_together.assert_abs_o_a_1_o_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_pytest_test_ordering_keeps_groups_together.assert_abs_o_a_1_o_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 36, "span_ids": ["f", "imports", "abcde", "issorted", "test_ordering_keeps_groups_together"], "tokens": 343}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask\nfrom dask.core import get_deps\nfrom dask.order import diagnostics, ndependencies, order\nfrom dask.utils_test import add, inc\n\n\n@pytest.fixture(params=[\"abcde\", \"edcba\"])\ndef abcde(request):\n return request.param\n\n\ndef issorted(L, reverse=False):\n return sorted(L, reverse=reverse) == L\n\n\ndef f(*args):\n pass\n\n\ndef test_ordering_keeps_groups_together(abcde):\n a, b, c, d, e = abcde\n d = {(a, i): (f,) for i in range(4)}\n d.update({(b, 0): (f, (a, 0), (a, 1)), (b, 1): (f, (a, 2), (a, 3))})\n o = order(d)\n\n assert abs(o[(a, 0)] - o[(a, 1)]) == 1\n assert abs(o[(a, 2)] - o[(a, 3)]) == 1\n\n d = {(a, i): (f,) for i in range(4)}\n d.update({(b, 0): (f, (a, 0), (a, 2)), (b, 1): (f, (a, 1), (a, 3))})\n o = order(d)\n\n assert abs(o[(a, 0)] - o[(a, 2)]) == 1\n assert abs(o[(a, 1)] - o[(a, 3)]) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_broker_nodes_test_avoid_broker_nodes.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_broker_nodes_test_avoid_broker_nodes.None_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 39, "end_line": 79, "span_ids": ["test_avoid_broker_nodes"], "tokens": 397}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_avoid_broker_nodes(abcde):\n r\"\"\"\n\n b0 b1 b2\n | \\ /\n a0 a1\n\n a0 should be run before a1\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (a, 0): (f,),\n (a, 1): (f,),\n (b, 0): (f, (a, 0)),\n (b, 1): (f, (a, 1)),\n (b, 2): (f, (a, 1)),\n }\n o = order(dsk)\n assert o[(a, 0)] < o[(a, 1)]\n\n # Switch name of 0, 1 to ensure that this isn't due to string comparison\n dsk = {\n (a, 1): (f,),\n (a, 0): (f,),\n (b, 0): (f, (a, 1)),\n (b, 1): (f, (a, 0)),\n (b, 2): (f, (a, 0)),\n }\n o = order(dsk)\n assert o[(a, 0)] > o[(a, 1)]\n\n # Switch name of 0, 1 for \"b\"s too\n dsk = {\n (a, 0): (f,),\n (a, 1): (f,),\n (b, 1): (f, (a, 0)),\n (b, 0): (f, (a, 1)),\n (b, 2): (f, (a, 1)),\n }\n o = order(dsk)\n assert o[(a, 0)] < o[(a, 1)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_test_avoid_upwards_branching.assert_o_b_1_o_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_test_avoid_upwards_branching.assert_o_b_1_o_c_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 110, "end_line": 140, "span_ids": ["test_avoid_upwards_branching"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"Can't please 'em all\")\ndef test_avoid_upwards_branching(abcde):\n r\"\"\"\n a1\n |\n a2\n |\n a3 d1\n / \\ /\n b1 c1\n | |\n b2 c2\n |\n c3\n\n Prefer b1 over c1 because it won't stick around waiting for d1 to complete\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (a, 1): (f, (a, 2)),\n (a, 2): (f, (a, 3)),\n (a, 3): (f, (b, 1), (c, 1)),\n (b, 1): (f, (b, 2)),\n (c, 1): (f, (c, 2)),\n (c, 2): (f, (c, 3)),\n (d, 1): (f, (c, 1)),\n }\n\n o = order(dsk)\n\n assert o[(b, 1)] < o[(c, 1)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_complex_test_avoid_upwards_branching_complex.assert_abs_o_d_2_o_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_avoid_upwards_branching_complex_test_avoid_upwards_branching_complex.assert_abs_o_d_2_o_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 143, "end_line": 179, "span_ids": ["test_avoid_upwards_branching_complex"], "tokens": 378}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_avoid_upwards_branching_complex(abcde):\n r\"\"\"\n a1\n |\n e2 a2 d2 d3\n | | \\ /\n e1 a3 d1\n \\ / \\ /\n b1 c1\n | |\n b2 c2\n |\n c3\n\n Prefer c1 over b1 because c1 will stay in memory less long while b1\n computes\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (a, 1): (f, (a, 2)),\n (a, 2): (f, (a, 3)),\n (a, 3): (f, (b, 1), (c, 1)),\n (b, 1): (f, (b, 2)),\n (b, 2): (f,),\n (c, 1): (f, (c, 2)),\n (c, 2): (f, (c, 3)),\n (c, 3): (f,),\n (d, 1): (f, (c, 1)),\n (d, 2): (f, (d, 1)),\n (d, 3): (f, (d, 1)),\n (e, 1): (f, (b, 1)),\n (e, 2): (f, (e, 1)),\n }\n\n o = order(dsk)\n assert o[(c, 1)] < o[(b, 1)]\n assert abs(o[(d, 2)] - o[(d, 3)]) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_deep_bases_win_over_dependents_test_deep_bases_win_over_dependents.assert_o_b_o_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_deep_bases_win_over_dependents_test_deep_bases_win_over_dependents.assert_o_b_o_c_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 182, "end_line": 203, "span_ids": ["test_deep_bases_win_over_dependents"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deep_bases_win_over_dependents(abcde):\n r\"\"\"\n It's not clear who should run first, e or d\n\n 1. d is nicer because it exposes parallelism\n 2. e is nicer (hypothetically) because it will be sooner released\n (though in this case we need d to run first regardless)\n\n Regardless of e or d first, we should run b before c.\n\n a\n / | \\ .\n b c |\n / \\ | /\n e d\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {a: (f, b, c, d), b: (f, d, e), c: (f, d), d: 1, e: 2}\n\n o = order(dsk)\n assert o[e] < o[d] # ambiguous, but this is what we currently expect\n assert o[b] < o[c]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_deep_test_prefer_deep.assert_o_b_o_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_deep_test_prefer_deep.assert_o_b_o_d_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 206, "end_line": 221, "span_ids": ["test_prefer_deep"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_prefer_deep(abcde):\n \"\"\"\n c\n |\n e b\n | |\n d a\n\n Prefer longer chains first so we should start with c\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {a: 1, b: (f, a), c: (f, b), d: 1, e: (f, d)}\n\n o = order(dsk)\n assert o[a] < o[d]\n assert o[b] < o[d]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_stacklimit_test_order_doesnt_fail_on_mixed_type_keys.order_x_inc_1_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_stacklimit_test_order_doesnt_fail_on_mixed_type_keys.order_x_inc_1_y", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 224, "end_line": 244, "span_ids": ["test_stacklimit", "test_order_doesnt_fail_on_mixed_type_keys", "test_break_ties_by_str"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stacklimit(abcde):\n dsk = {\"x%s\" % (i + 1): (inc, \"x%s\" % i) for i in range(10000)}\n dependencies, dependents = get_deps(dsk)\n ndependencies(dependencies, dependents)\n\n\ndef test_break_ties_by_str(abcde):\n a, b, c, d, e = abcde\n dsk = {(\"x\", i): (inc, i) for i in range(10)}\n x_keys = sorted(dsk)\n dsk[\"y\"] = list(x_keys)\n\n o = order(dsk)\n expected = {\"y\": 10}\n expected.update({k: i for i, k in enumerate(x_keys)})\n\n assert o == expected\n\n\ndef test_order_doesnt_fail_on_mixed_type_keys(abcde):\n order({\"x\": (inc, 1), (\"y\", 0): (inc, 2), \"z\": (add, \"x\", (\"y\", 0))})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_gh_3055_test_gh_3055._operate_in_order": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_gh_3055_test_gh_3055._operate_in_order", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 264, "span_ids": ["test_gh_3055"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gh_3055():\n da = pytest.importorskip(\"dask.array\")\n A, B = 20, 99\n orig = x = da.random.normal(size=(A, B), chunks=(1, None))\n for _ in range(2):\n y = (x[:, None, :] * x[:, :, None]).cumsum(axis=0)\n x = x.cumsum(axis=0)\n w = (y * x[:, None]).sum(axis=(1, 2))\n\n dsk = dict(w.__dask_graph__())\n o = order(dsk)\n L = [o[k] for k in w.__dask_keys__()]\n assert sum(x < len(o) / 2 for x in L) > len(L) / 3 # some complete quickly\n\n L = [o[k] for kk in orig.__dask_keys__() for k in kk]\n assert sum(x > len(o) / 2 for x in L) > len(L) / 3 # some start later\n\n assert sorted(L) == L # operate in order", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_type_comparisions_ok_test_prefer_short_dependents.assert_o_e_o_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_type_comparisions_ok_test_prefer_short_dependents.assert_o_e_o_b_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 267, "end_line": 290, "span_ids": ["test_type_comparisions_ok", "test_prefer_short_dependents"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_type_comparisions_ok(abcde):\n a, b, c, d, e = abcde\n dsk = {a: 1, (a, 1): 2, (a, b, 1): 3}\n order(dsk) # this doesn't err\n\n\ndef test_prefer_short_dependents(abcde):\n r\"\"\"\n\n a\n |\n d b e\n \\ | /\n c\n\n Prefer to finish d and e before starting b. That way c can be released\n during the long computations.\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {c: (f,), d: (f, c), e: (f, c), b: (f, c), a: (f, b)}\n\n o = order(dsk)\n assert o[d] < o[b]\n assert o[e] < o[b]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_run_smaller_sections_test_run_smaller_sections.assert_log_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_run_smaller_sections_test_run_smaller_sections.assert_log_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 293, "end_line": 331, "span_ids": ["test_run_smaller_sections"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"This is challenging to do precisely\")\ndef test_run_smaller_sections(abcde):\n r\"\"\"\n aa\n / |\n b d bb dd\n / \\ /| | /\n a c e cc\n\n Prefer to run acb first because then we can get that out of the way\n \"\"\"\n a, b, c, d, e = abcde\n aa, bb, cc, dd = (x * 2 for x in [a, b, c, d])\n\n expected = [a, c, b, e, d, cc, bb, aa, dd]\n\n log = []\n\n def f(x):\n def _(*args):\n log.append(x)\n\n return _\n\n dsk = {\n a: (f(a),),\n c: (f(c),),\n e: (f(e),),\n cc: (f(cc),),\n b: (f(b), a, c),\n d: (f(d), c, e),\n bb: (f(bb), cc),\n aa: (f(aa), d, bb),\n dd: (f(dd), cc),\n }\n\n dask.get(dsk, [aa, b, dd]) # trigger computation\n\n assert log == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_local_parents_of_reduction_test_local_parents_of_reduction.assert_log_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_local_parents_of_reduction_test_local_parents_of_reduction.assert_log_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 379, "span_ids": ["test_local_parents_of_reduction"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_local_parents_of_reduction(abcde):\n \"\"\"\n\n c1\n |\n b1 c2\n | /|\n a1 b2 c3\n | /|\n a2 b3\n |\n a3\n\n Prefer to finish a1 stack before proceeding to b2\n \"\"\"\n a, b, c, d, e = abcde\n a1, a2, a3 = (a + i for i in \"123\")\n b1, b2, b3 = (b + i for i in \"123\")\n c1, c2, c3 = (c + i for i in \"123\")\n\n expected = [a3, a2, a1, b3, b2, b1, c3, c2, c1]\n\n log = []\n\n def f(x):\n def _(*args):\n log.append(x)\n\n return _\n\n dsk = {\n a3: (f(a3),),\n a2: (f(a2), a3),\n a1: (f(a1), a2),\n b3: (f(b3),),\n b2: (f(b2), b3, a2),\n b1: (f(b1), b2),\n c3: (f(c3),),\n c2: (f(c2), c3, b2),\n c1: (f(c1), c2),\n }\n\n order(dsk)\n dask.get(dsk, [a1, b1, c1]) # trigger computation\n\n assert log == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_nearest_neighbor_test_nearest_neighbor.assert_o_min_b1_b2_b3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_nearest_neighbor_test_nearest_neighbor.assert_o_min_b1_b2_b3_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 382, "end_line": 416, "span_ids": ["test_nearest_neighbor"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nearest_neighbor(abcde):\n r\"\"\"\n\n a1 a2 a3 a4 a5 a6 a7 a8 a9\n \\ | / \\ | / \\ | / \\ | /\n b1 b2 b3 b4\n\n Want to finish off a local group before moving on.\n This is difficult because all groups are connected.\n \"\"\"\n a, b, c, _, _ = abcde\n a1, a2, a3, a4, a5, a6, a7, a8, a9 = (a + i for i in \"123456789\")\n b1, b2, b3, b4 = (b + i for i in \"1234\")\n\n dsk = {\n b1: (f,),\n b2: (f,),\n b3: (f,),\n b4: (f,),\n a1: (f, b1),\n a2: (f, b1),\n a3: (f, b1, b2),\n a4: (f, b2),\n a5: (f, b2, b3),\n a6: (f, b3),\n a7: (f, b3, b4),\n a8: (f, b4),\n a9: (f, b4),\n }\n\n o = order(dsk)\n\n assert 3 < sum(o[a + i] < len(o) / 2 for i in \"123456789\") < 7\n assert 1 < sum(o[b + i] < len(o) / 2 for i in \"1234\") < 4\n assert o[min([b1, b2, b3, b4])] == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_string_ordering_test_string_ordering_dependents.assert_o_b_0_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_string_ordering_test_string_ordering_dependents.assert_o_b_0_a_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 419, "end_line": 430, "span_ids": ["test_string_ordering", "test_string_ordering_dependents"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_string_ordering():\n \"\"\"Prefer ordering tasks by name first\"\"\"\n dsk = {(\"a\", 1): (f,), (\"a\", 2): (f,), (\"a\", 3): (f,)}\n o = order(dsk)\n assert o == {(\"a\", 1): 0, (\"a\", 2): 1, (\"a\", 3): 2}\n\n\ndef test_string_ordering_dependents():\n \"\"\"Prefer ordering tasks by name first even when in dependencies\"\"\"\n dsk = {(\"a\", 1): (f, \"b\"), (\"a\", 2): (f, \"b\"), (\"a\", 3): (f, \"b\"), \"b\": (f,)}\n o = order(dsk)\n assert o == {\"b\": 0, (\"a\", 1): 1, (\"a\", 2): 2, (\"a\", 3): 3}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_narrow_test_prefer_short_narrow.assert_o_c_1_o_c_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_narrow_test_prefer_short_narrow.assert_o_c_1_o_c_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 433, "end_line": 448, "span_ids": ["test_prefer_short_narrow"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_prefer_short_narrow(abcde):\n # See test_prefer_short_ancestor for a fail case.\n a, b, c, _, _ = abcde\n dsk = {\n (a, 0): 0,\n (b, 0): 0,\n (c, 0): 0,\n (c, 1): (f, (c, 0), (a, 0), (b, 0)),\n (a, 1): 1,\n (b, 1): 1,\n (c, 2): (f, (c, 1), (a, 1), (b, 1)),\n }\n o = order(dsk)\n assert o[(b, 0)] < o[(b, 1)]\n assert o[(b, 0)] < o[(c, 2)]\n assert o[(c, 1)] < o[(c, 2)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_ancestor_test_prefer_short_ancestor.assert_o_c_1_o_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_prefer_short_ancestor_test_prefer_short_ancestor.assert_o_c_1_o_a_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 451, "end_line": 508, "span_ids": ["test_prefer_short_ancestor"], "tokens": 517}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_prefer_short_ancestor(abcde):\n r\"\"\"\n From https://github.com/dask/dask-ml/issues/206#issuecomment-395869929\n\n Two cases, one where chunks of an array are independent, and one where the\n chunks of an array have a shared source. We handled the independent one\n \"well\" earlier.\n\n Good:\n\n c2\n / \\ \\\n / \\ \\\n c1 \\ \\\n / | \\ \\ \\\n c0 a0 b0 a1 b1\n\n Bad:\n\n c2\n / \\ \\\n / \\ \\\n c1 \\ \\\n / | \\ \\ \\\n c0 a0 b0 a1 b1\n \\ \\ / /\n \\ \\ / /\n a-b\n\n\n The difference is that all the `a` and `b` tasks now have a common\n ancestor.\n\n We would like to choose c1 *before* a1, and b1 because\n\n * we can release a0 and b0 once c1 is done\n * we don't need a1 and b1 to compute c1.\n \"\"\"\n a, b, c, _, _ = abcde\n ab = a + b\n\n dsk = {\n ab: 0,\n (a, 0): (f, ab, 0, 0),\n (b, 0): (f, ab, 0, 1),\n (c, 0): 0,\n (c, 1): (f, (c, 0), (a, 0), (b, 0)),\n (a, 1): (f, ab, 1, 0),\n (b, 1): (f, ab, 1, 1),\n (c, 2): (f, (c, 1), (a, 1), (b, 1)),\n }\n o = order(dsk)\n\n assert o[(a, 0)] < o[(a, 1)]\n assert o[(b, 0)] < o[(b, 1)]\n assert o[(b, 0)] < o[(c, 2)]\n assert o[(c, 1)] < o[(c, 2)]\n assert o[(c, 1)] < o[(a, 1)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_map_overlap_test_map_overlap.assert_o_b_1_o_e_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_map_overlap_test_map_overlap.assert_o_b_1_o_e_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 511, "end_line": 545, "span_ids": ["test_map_overlap"], "tokens": 438}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap(abcde):\n r\"\"\"\n b1 b3 b5\n |\\ / | \\ / |\n c1 c2 c3 c4 c5\n |/ | \\ | / | \\|\n d1 d2 d3 d4 d5\n | | |\n e1 e2 e5\n\n Want to finish b1 before we start on e5\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (e, 1): (f,),\n (d, 1): (f, (e, 1)),\n (c, 1): (f, (d, 1)),\n (b, 1): (f, (c, 1), (c, 2)),\n (d, 2): (f,),\n (c, 2): (f, (d, 1), (d, 2), (d, 3)),\n (e, 3): (f,),\n (d, 3): (f, (e, 3)),\n (c, 3): (f, (d, 3)),\n (b, 3): (f, (c, 2), (c, 3), (c, 4)),\n (d, 4): (f,),\n (c, 4): (f, (d, 3), (d, 4), (d, 5)),\n (e, 5): (f,),\n (d, 5): (f, (e, 5)),\n (c, 5): (f, (d, 5)),\n (b, 5): (f, (c, 4), (c, 5)),\n }\n\n o = order(dsk)\n\n assert o[(b, 1)] < o[(e, 5)] or o[(b, 5)] < o[(e, 1)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_use_structure_not_keys_test_use_structure_not_keys.if_Bs_0_3_.else_.assert_Bs_1_3_5_7_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_use_structure_not_keys_test_use_structure_not_keys.if_Bs_0_3_.else_.assert_Bs_1_3_5_7_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 548, "end_line": 585, "span_ids": ["test_use_structure_not_keys"], "tokens": 644}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_use_structure_not_keys(abcde):\n \"\"\"See https://github.com/dask/dask/issues/5584#issuecomment-554963958\n\n We were using key names to infer structure, which could result in funny behavior.\n \"\"\"\n a, b, _, _, _ = abcde\n dsk = {\n (a, 0): (f,),\n (a, 1): (f,),\n (a, 2): (f,),\n (a, 3): (f,),\n (a, 4): (f,),\n (a, 5): (f,),\n (a, 6): (f,),\n (a, 7): (f,),\n (a, 8): (f,),\n (a, 9): (f,),\n (b, 5): (f, (a, 2)),\n (b, 7): (f, (a, 0), (a, 2)),\n (b, 9): (f, (a, 7), (a, 0), (a, 2)),\n (b, 1): (f, (a, 4), (a, 7), (a, 0)),\n (b, 2): (f, (a, 9), (a, 4), (a, 7)),\n (b, 4): (f, (a, 6), (a, 9), (a, 4)),\n (b, 3): (f, (a, 5), (a, 6), (a, 9)),\n (b, 8): (f, (a, 1), (a, 5), (a, 6)),\n (b, 6): (f, (a, 8), (a, 1), (a, 5)),\n (b, 0): (f, (a, 3), (a, 8), (a, 1)),\n }\n o = order(dsk)\n As = sorted(val for (letter, _), val in o.items() if letter == a)\n Bs = sorted(val for (letter, _), val in o.items() if letter == b)\n assert Bs[0] in {1, 3}\n if Bs[0] == 3:\n assert As == [0, 1, 2, 4, 6, 8, 10, 12, 14, 16]\n assert Bs == [3, 5, 7, 9, 11, 13, 15, 17, 18, 19]\n else:\n assert As == [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]\n assert Bs == [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_dont_run_all_dependents_too_early_test_dont_run_all_dependents_too_early.assert_expected_actual": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_dont_run_all_dependents_too_early_test_dont_run_all_dependents_too_early.assert_expected_actual", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 588, "end_line": 600, "span_ids": ["test_dont_run_all_dependents_too_early"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_run_all_dependents_too_early(abcde):\n \"\"\"From https://github.com/dask/dask-ml/issues/206#issuecomment-395873372\"\"\"\n a, b, c, d, e = abcde\n depth = 10\n dsk = {(a, 0): 0, (b, 0): 1, (c, 0): 2, (d, 0): (f, (a, 0), (b, 0), (c, 0))}\n for i in range(1, depth):\n dsk[(b, i)] = (f, (b, 0))\n dsk[(c, i)] = (f, (c, 0))\n dsk[(d, i)] = (f, (d, i - 1), (b, i), (c, i))\n o = order(dsk)\n expected = [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]\n actual = sorted(v for (letter, num), v in o.items() if letter == d)\n assert expected == actual", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_many_branches_use_ndependencies_test_many_branches_use_ndependencies.assert_o_c_1_o_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_many_branches_use_ndependencies_test_many_branches_use_ndependencies.assert_o_c_1_o_a_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 603, "end_line": 639, "span_ids": ["test_many_branches_use_ndependencies"], "tokens": 545}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_many_branches_use_ndependencies(abcde):\n \"\"\"From https://github.com/dask/dask/pull/5646#issuecomment-562700533\n\n Sometimes we need larger or wider DAGs to test behavior. This test\n ensures we choose the branch with more work twice in successtion.\n This is important, because ``order`` may search along dependencies\n and then along dependents.\n\n \"\"\"\n a, b, c, d, e = abcde\n dd = d + d\n ee = e + e\n dsk = {\n (a, 0): 0,\n (a, 1): (f, (a, 0)),\n (a, 2): (f, (a, 1)),\n (b, 1): (f, (a, 0)),\n (b, 2): (f, (b, 1)),\n (c, 1): (f, (a, 0)), # most short and thin; should go last\n (d, 1): (f, (a, 0)),\n (d, 2): (f, (d, 1)),\n (dd, 1): (f, (a, 0)),\n (dd, 2): (f, (dd, 1)),\n (dd, 3): (f, (d, 2), (dd, 2)),\n (e, 1): (f, (a, 0)),\n (e, 2): (f, (e, 1)),\n (ee, 1): (f, (a, 0)),\n (ee, 2): (f, (ee, 1)),\n (ee, 3): (f, (e, 2), (ee, 2)),\n (a, 3): (f, (a, 2), (b, 2), (c, 1), (dd, 3), (ee, 3)),\n }\n o = order(dsk)\n # run all d's and e's first\n expected = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n actual = sorted(v for (letter, _), v in o.items() if letter in {d, dd, e, ee})\n assert actual == expected\n assert o[(c, 1)] == o[(a, 3)] - 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_cycle_test_order_empty.assert_order_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_cycle_test_order_empty.assert_order_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 642, "end_line": 658, "span_ids": ["test_order_cycle", "test_order_empty"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_order_cycle():\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n dask.get({\"a\": (f, \"a\")}, \"a\") # we encounter this in `get`\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({\"a\": (f, \"a\")}) # trivial self-loop\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({(\"a\", 0): (f, (\"a\", 0))}) # non-string\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({\"a\": (f, \"b\"), \"b\": (f, \"c\"), \"c\": (f, \"a\")}) # non-trivial loop\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({\"a\": (f, \"b\"), \"b\": (f, \"c\"), \"c\": (f, \"a\", \"d\"), \"d\": 1})\n with pytest.raises(RuntimeError, match=\"Cycle detected\"):\n order({\"a\": (f, \"b\"), \"b\": (f, \"c\"), \"c\": (f, \"a\", \"d\"), \"d\": (f, \"b\")})\n\n\ndef test_order_empty():\n assert order({}) == {}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_switching_dependents_test_switching_dependents.assert_o_a_5_o_e_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_switching_dependents_test_switching_dependents.assert_o_a_5_o_e_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 661, "end_line": 713, "span_ids": ["test_switching_dependents"], "tokens": 564}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_switching_dependents(abcde):\n r\"\"\"\n\n a7 a8 <-- do these last\n | /\n a6 e6\n | /\n a5 c5 d5 e5\n | | / /\n a4 c4 d4 e4\n | \\ | / /\n a3 b3---/\n |\n a2\n |\n a1\n |\n a0 <-- start here\n\n Test that we are able to switch to better dependents.\n In this graph, we expect to start at a0. To compute a4, we need to compute b3.\n After computing b3, three \"better\" paths become available.\n Confirm that we take the better paths before continuing down `a` path.\n\n This test is pretty specific to how `order` is implemented\n and is intended to increase code coverage.\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (a, 0): 0,\n (a, 1): (f, (a, 0)),\n (a, 2): (f, (a, 1)),\n (a, 3): (f, (a, 2)),\n (a, 4): (f, (a, 3), (b, 3)),\n (a, 5): (f, (a, 4)),\n (a, 6): (f, (a, 5)),\n (a, 7): (f, (a, 6)),\n (a, 8): (f, (a, 6)),\n (b, 3): 1,\n (c, 4): (f, (b, 3)),\n (c, 5): (f, (c, 4)),\n (d, 4): (f, (b, 3)),\n (d, 5): (f, (d, 4)),\n (e, 4): (f, (b, 3)),\n (e, 5): (f, (e, 4)),\n (e, 6): (f, (e, 5)),\n }\n o = order(dsk)\n\n assert o[(a, 0)] == 0 # probably\n assert o[(a, 5)] > o[(c, 5)]\n assert o[(a, 5)] > o[(d, 5)]\n assert o[(a, 5)] > o[(e, 6)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_from_dask_rewrite_import__test_args.assert_args_1_2_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_from_dask_rewrite_import__test_args.assert_args_1_2_3_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["imports", "test_head", "test_args", "double"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dask.rewrite import VAR, RewriteRule, RuleSet, Traverser, args, head\nfrom dask.utils_test import add, inc\n\n\ndef double(x):\n return x * 2\n\n\ndef test_head():\n assert head((inc, 1)) == inc\n assert head((add, 1, 2)) == add\n assert head((add, (inc, 1), (inc, 1))) == add\n assert head([1, 2, 3]) == list\n\n\ndef test_args():\n assert args((inc, 1)) == (1,)\n assert args((add, 1, 2)) == (1, 2)\n assert args(1) == ()\n assert args([1, 2, 3]) == [1, 2, 3]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_traverser_test_traverser.assert_list_t2_add_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_traverser_test_traverser.assert_list_t2_add_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 23, "end_line": 36, "span_ids": ["test_traverser"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_traverser():\n term = (add, (inc, 1), (double, (inc, 1), 2))\n t = Traverser(term)\n t2 = t.copy()\n assert t.current == add\n t.next()\n assert t.current == inc\n # Ensure copies aren't advanced when the original advances\n assert t2.current == add\n t.skip()\n assert t.current == double\n t.next()\n assert t.current == inc\n assert list(t2) == [add, inc, 1, double, inc, 1, 2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_vars_rule6.RewriteRule_list_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_vars_rule6.RewriteRule_list_x_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 39, "end_line": 61, "span_ids": ["repl_list", "impl:13", "impl"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "vars = (\"a\", \"b\", \"c\")\n# add(a, 1) -> inc(a)\nrule1 = RewriteRule((add, \"a\", 1), (inc, \"a\"), vars)\n# add(a, a) -> double(a)\nrule2 = RewriteRule((add, \"a\", \"a\"), (double, \"a\"), vars)\n# add(inc(a), inc(a)) -> add(double(a), 2)\nrule3 = RewriteRule((add, (inc, \"a\"), (inc, \"a\")), (add, (double, \"a\"), 2), vars)\n# add(inc(b), inc(a)) -> add(add(a, b), 2)\nrule4 = RewriteRule((add, (inc, \"b\"), (inc, \"a\")), (add, (add, \"a\", \"b\"), 2), vars)\n# sum([c, b, a]) -> add(add(a, b), c)\nrule5 = RewriteRule((sum, [\"c\", \"b\", \"a\"]), (add, (add, \"a\", \"b\"), \"c\"), vars)\n# list(x) -> x if x is a list\n\n\ndef repl_list(sd):\n x = sd[\"x\"]\n if isinstance(x, list):\n return x\n else:\n return (list, x)\n\n\nrule6 = RewriteRule((list, \"x\"), repl_list, (\"x\",))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRule_test_RewriteRule.assert_rule5__varlist_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRule_test_RewriteRule.assert_rule5__varlist_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 64, "end_line": 75, "span_ids": ["test_RewriteRule"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_RewriteRule():\n # Test extraneous vars are removed, varlist is correct\n assert rule1.vars == (\"a\",)\n assert rule1._varlist == [\"a\"]\n assert rule2.vars == (\"a\",)\n assert rule2._varlist == [\"a\", \"a\"]\n assert rule3.vars == (\"a\",)\n assert rule3._varlist == [\"a\", \"a\"]\n assert rule4.vars == (\"a\", \"b\")\n assert rule4._varlist == [\"b\", \"a\"]\n assert rule5.vars == (\"a\", \"b\", \"c\")\n assert rule5._varlist == [\"c\", \"b\", \"a\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRuleSubs_test_RuleSet.assert_rs_rules_rules": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_RewriteRuleSubs_test_RuleSet.assert_rs_rules_rules", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 78, "end_line": 104, "span_ids": ["test_RuleSet", "impl:15", "test_RewriteRuleSubs"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_RewriteRuleSubs():\n # Test both rhs substitution and callable rhs\n assert rule1.subs({\"a\": 1}) == (inc, 1)\n assert rule6.subs({\"x\": [1, 2, 3]}) == [1, 2, 3]\n\n\nrules = [rule1, rule2, rule3, rule4, rule5, rule6]\nrs = RuleSet(*rules)\n\n\ndef test_RuleSet():\n net = (\n {\n add: (\n {\n VAR: ({VAR: ({}, [1]), 1: ({}, [0])}, []),\n inc: ({VAR: ({inc: ({VAR: ({}, [2, 3])}, [])}, [])}, []),\n },\n [],\n ),\n list: ({VAR: ({}, [5])}, []),\n sum: ({list: ({VAR: ({VAR: ({VAR: ({}, [4])}, [])}, [])}, [])}, []),\n },\n [],\n )\n assert rs._net == net\n assert rs.rules == rules", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_matches_test_matches.assert_len_matches_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_matches_test_matches.assert_len_matches_0", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 107, "end_line": 134, "span_ids": ["test_matches"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_matches():\n term = (add, 2, 1)\n matches = list(rs.iter_matches(term))\n assert len(matches) == 1\n assert matches[0] == (rule1, {\"a\": 2})\n # Test matches specific before general\n term = (add, 1, 1)\n matches = list(rs.iter_matches(term))\n assert len(matches) == 2\n assert matches[0] == (rule1, {\"a\": 1})\n assert matches[1] == (rule2, {\"a\": 1})\n # Test matches unhashable. What it's getting rewritten to doesn't make\n # sense, this is just to test that it works. :)\n term = (add, [1], [1])\n matches = list(rs.iter_matches(term))\n assert len(matches) == 1\n assert matches[0] == (rule2, {\"a\": [1]})\n # Test match at depth\n term = (add, (inc, 1), (inc, 1))\n matches = list(rs.iter_matches(term))\n assert len(matches) == 3\n assert matches[0] == (rule3, {\"a\": 1})\n assert matches[1] == (rule4, {\"a\": 1, \"b\": 1})\n assert matches[2] == (rule2, {\"a\": (inc, 1)})\n # Test non-linear pattern checking\n term = (add, 2, 3)\n matches = list(rs.iter_matches(term))\n assert len(matches) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_rewrite_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_rewrite.py_test_rewrite_", "embedding": null, "metadata": {"file_path": "dask/tests/test_rewrite.py", "file_name": "test_rewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 156, "span_ids": ["test_rewrite"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rewrite():\n # Rewrite inside list\n term = (sum, [(add, 1, 1), (add, 1, 1), (add, 1, 1)])\n new_term = rs.rewrite(term)\n assert new_term == (add, (add, (inc, 1), (inc, 1)), (inc, 1))\n # Rules aren't applied to exhaustion, this can be further simplified\n new_term = rs.rewrite(new_term)\n assert new_term == (add, (add, (double, 1), 2), (inc, 1))\n term = (\n add,\n (add, (add, (add, 1, 2), (add, 1, 2)), (add, (add, 1, 2), (add, 1, 2))),\n 1,\n )\n assert rs.rewrite(term) == (inc, (double, (double, (add, 1, 2))))\n # Callable RewriteRule rhs\n term = (list, [1, 2, 3])\n assert rs.rewrite(term) == [1, 2, 3]\n term = (list, (map, inc, [1, 2, 3]))\n assert rs.rewrite(term) == term", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_sys_test_numpy_0_strided.assert_sizeof_x_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_sys_test_numpy_0_strided.assert_sizeof_x_8", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 39, "span_ids": ["test_bytes_like", "imports", "test_containers", "test_numpy_0_strided", "test_base", "test_name", "test_numpy"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\nfrom array import array\n\nimport pytest\n\nfrom dask.sizeof import getsizeof, sizeof\nfrom dask.utils import funcname\n\n\ndef test_base():\n assert sizeof(1) == getsizeof(1)\n\n\ndef test_name():\n assert funcname(sizeof) == \"sizeof\"\n\n\ndef test_containers():\n assert sizeof([1, 2, [3]]) > (getsizeof(3) * 3 + getsizeof([]))\n\n\ndef test_bytes_like():\n assert 1000 <= sizeof(bytes(1000)) <= 2000\n assert 1000 <= sizeof(bytearray(1000)) <= 2000\n assert 1000 <= sizeof(memoryview(bytes(1000))) <= 2000\n assert 8000 <= sizeof(array(\"d\", range(1000))) <= 9000\n\n\ndef test_numpy():\n np = pytest.importorskip(\"numpy\")\n assert 8000 <= sizeof(np.empty(1000, dtype=\"f8\")) <= 9000\n dt = np.dtype(\"f8\")\n assert sizeof(dt) == sys.getsizeof(dt)\n\n\ndef test_numpy_0_strided():\n np = pytest.importorskip(\"numpy\")\n x = np.broadcast_to(1, (100, 100, 100))\n assert sizeof(x) <= 8", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_test_pandas.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_test_pandas.None_6", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 55, "span_ids": ["test_pandas"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pandas():\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(\n {\"x\": [1, 2, 3], \"y\": [\"a\" * 100, \"b\" * 100, \"c\" * 100]}, index=[10, 20, 30]\n )\n\n assert sizeof(df) >= sizeof(df.x) + sizeof(df.y) - sizeof(df.index)\n assert sizeof(df.x) >= sizeof(df.index)\n assert sizeof(df.y) >= 100 * 3\n assert sizeof(df.index) >= 20\n\n assert isinstance(sizeof(df), int)\n assert isinstance(sizeof(df.x), int)\n assert isinstance(sizeof(df.index), int)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_multiindex_test_pandas_repeated_column.assert_sizeof_df_x_x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pandas_multiindex_test_pandas_repeated_column.assert_sizeof_df_x_x", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 58, "end_line": 71, "span_ids": ["test_pandas_repeated_column", "test_pandas_multiindex"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pandas_multiindex():\n pd = pytest.importorskip(\"pandas\")\n index = pd.MultiIndex.from_product([range(5), [\"a\", \"b\", \"c\", \"d\", \"e\"]])\n actual_size = sys.getsizeof(index) + 1000 # adjust for serialization overhead\n\n assert 0.5 * actual_size < sizeof(index) < 2 * actual_size\n assert isinstance(sizeof(index), int)\n\n\ndef test_pandas_repeated_column():\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n\n assert sizeof(df[[\"x\", \"x\", \"x\"]]) > sizeof(df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_sparse_matrix_test_sparse_matrix.assert_sizeof_sp_tolil_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_sparse_matrix_test_sparse_matrix.assert_sizeof_sp_tolil_", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 74, "end_line": 84, "span_ids": ["test_sparse_matrix"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sparse_matrix():\n sparse = pytest.importorskip(\"scipy.sparse\")\n sp = sparse.eye(10)\n # These are the 32-bit Python 2.7 values.\n assert sizeof(sp.todia()) >= 152\n assert sizeof(sp.tobsr()) >= 232\n assert sizeof(sp.tocoo()) >= 240\n assert sizeof(sp.tocsc()) >= 232\n assert sizeof(sp.tocsr()) >= 232\n assert sizeof(sp.todok()) >= 192\n assert sizeof(sp.tolil()) >= 204", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_serires_object_dtype_test_dataframe_object_dtype.assert_sizeof_s_100000": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_serires_object_dtype_test_dataframe_object_dtype.assert_sizeof_s_100000", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 87, "end_line": 102, "span_ids": ["test_dataframe_object_dtype", "test_serires_object_dtype"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_serires_object_dtype():\n pd = pytest.importorskip(\"pandas\")\n s = pd.Series([\"a\"] * 1000)\n assert sizeof(\"a\") * 1000 < sizeof(s) < 2 * sizeof(\"a\") * 1000\n\n s = pd.Series([\"a\" * 1000] * 1000)\n assert sizeof(s) > 1000000\n\n\ndef test_dataframe_object_dtype():\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame({\"x\": [\"a\"] * 1000})\n assert sizeof(\"a\") * 1000 < sizeof(df) < 2 * sizeof(\"a\") * 1000\n\n s = pd.Series([\"a\" * 1000] * 1000)\n assert sizeof(s) > 1000000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_empty_test_empty.assert_sizeof_empty_index": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_empty_test_empty.assert_sizeof_empty_index", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 105, "end_line": 115, "span_ids": ["test_empty"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty():\n pd = pytest.importorskip(\"pandas\")\n df = pd.DataFrame(\n {\"x\": [1, 2, 3], \"y\": [\"a\" * 100, \"b\" * 100, \"c\" * 100]}, index=[10, 20, 30]\n )\n empty = df.head(0)\n\n assert sizeof(empty) > 0\n assert sizeof(empty.x) > 0\n assert sizeof(empty.y) > 0\n assert sizeof(empty.index) > 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pyarrow_table_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_sizeof.py_test_pyarrow_table_", "embedding": null, "metadata": {"file_path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "file_type": "text/x-python", "category": "test", "start_line": 118, "end_line": 150, "span_ids": ["test_dict", "test_pyarrow_table"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pyarrow_table():\n pd = pytest.importorskip(\"pandas\")\n pa = pytest.importorskip(\"pyarrow\")\n df = pd.DataFrame(\n {\"x\": [1, 2, 3], \"y\": [\"a\" * 100, \"b\" * 100, \"c\" * 100]}, index=[10, 20, 30]\n )\n table = pa.Table.from_pandas(df)\n\n assert sizeof(table) > sizeof(table.schema.metadata)\n assert isinstance(sizeof(table), int)\n assert isinstance(sizeof(table.columns[0]), int)\n assert isinstance(sizeof(table.columns[1]), int)\n assert isinstance(sizeof(table.columns[2]), int)\n\n empty = pa.Table.from_pandas(df.head(0))\n\n assert sizeof(empty) > sizeof(empty.schema.metadata)\n assert sizeof(empty.columns[0]) > 0\n assert sizeof(empty.columns[1]) > 0\n assert sizeof(empty.columns[2]) > 0\n\n\ndef test_dict():\n np = pytest.importorskip(\"numpy\")\n x = np.ones(10000)\n assert sizeof({\"x\": x}) > x.nbytes\n assert sizeof({\"x\": [x]}) > x.nbytes\n assert sizeof({\"x\": [{\"y\": x}]}) > x.nbytes\n\n d = {i: x for i in range(100)}\n assert sizeof(d) > x.nbytes * 100\n assert isinstance(sizeof(d), int)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_system.py_builtins_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_system.py_builtins_", "embedding": null, "metadata": {"file_path": "dask/tests/test_system.py", "file_name": "test_system.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 56, "span_ids": ["test_cpu_count", "imports", "test_cpu_count_cgroups"], "tokens": 333}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import builtins\nimport io\nimport os\nimport sys\n\nimport pytest\n\nfrom dask.system import cpu_count\n\npsutil = pytest.importorskip(\"psutil\")\n\n\ndef test_cpu_count():\n count = cpu_count()\n assert isinstance(count, int)\n assert count <= os.cpu_count()\n assert count >= 1\n\n\n@pytest.mark.parametrize(\"dirname\", [\"cpuacct,cpu\", \"cpu,cpuacct\", None])\ndef test_cpu_count_cgroups(dirname, monkeypatch):\n def mycpu_count():\n # Absurdly high, unlikely to match real value\n return 250\n\n monkeypatch.setattr(os, \"cpu_count\", mycpu_count)\n\n class MyProcess:\n def cpu_affinity(self):\n # No affinity set\n return []\n\n monkeypatch.setattr(psutil, \"Process\", MyProcess)\n\n if dirname:\n paths = {\n \"/sys/fs/cgroup/%s/cpu.cfs_quota_us\" % dirname: io.StringIO(\"2005\"),\n \"/sys/fs/cgroup/%s/cpu.cfs_period_us\" % dirname: io.StringIO(\"10\"),\n }\n builtin_open = builtins.open\n\n def myopen(path, *args, **kwargs):\n if path in paths:\n return paths.get(path)\n return builtin_open(path, *args, **kwargs)\n\n monkeypatch.setattr(builtins, \"open\", myopen)\n monkeypatch.setattr(sys, \"platform\", \"linux\")\n\n count = cpu_count()\n if dirname:\n # Rounds up\n assert count == 201\n else:\n assert count == 250", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_threaded_within_thread_test_threaded_within_thread.while_threading_active_co.assert_time_start_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_threaded_within_thread_test_threaded_within_thread.while_threading_active_co.assert_time_start_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_threaded.py", "file_name": "test_threaded.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 97, "span_ids": ["test_threaded_within_thread"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_threaded_within_thread():\n L = []\n\n def f(i):\n result = get({\"x\": (lambda: i,)}, \"x\", num_workers=2)\n L.append(result)\n\n before = threading.active_count()\n\n for i in range(20):\n t = threading.Thread(target=f, args=(1,))\n t.daemon = True\n t.start()\n t.join()\n assert L == [1]\n del L[:]\n\n start = time() # wait for most threads to join\n while threading.active_count() > before + 10:\n sleep(0.01)\n assert time() < start + 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_dont_spawn_too_many_threads_test_thread_safety.assert_L_1_20": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_dont_spawn_too_many_threads_test_thread_safety.assert_L_1_20", "embedding": null, "metadata": {"file_path": "dask/tests/test_threaded.py", "file_name": "test_threaded.py", "file_type": "text/x-python", "category": "test", "start_line": 100, "end_line": 147, "span_ids": ["test_dont_spawn_too_many_threads_CPU_COUNT", "test_thread_safety", "test_dont_spawn_too_many_threads"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dont_spawn_too_many_threads():\n before = threading.active_count()\n\n dsk = {(\"x\", i): (lambda: i,) for i in range(10)}\n dsk[\"x\"] = (sum, list(dsk))\n for i in range(20):\n get(dsk, \"x\", num_workers=4)\n\n after = threading.active_count()\n\n assert after <= before + 8\n\n\ndef test_dont_spawn_too_many_threads_CPU_COUNT():\n before = threading.active_count()\n\n dsk = {(\"x\", i): (lambda: i,) for i in range(10)}\n dsk[\"x\"] = (sum, list(dsk))\n for i in range(20):\n get(dsk, \"x\")\n\n after = threading.active_count()\n\n assert after <= before + CPU_COUNT * 2\n\n\ndef test_thread_safety():\n def f(x):\n return 1\n\n dsk = {\"x\": (sleep, 0.05), \"y\": (f, \"x\")}\n\n L = []\n\n def test_f():\n L.append(get(dsk, \"y\"))\n\n threads = []\n for i in range(20):\n t = threading.Thread(target=test_f)\n t.daemon = True\n t.start()\n threads.append(t)\n\n for thread in threads:\n thread.join()\n\n assert L == [1] * 20", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_interrupt_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_interrupt_", "embedding": null, "metadata": {"file_path": "dask/tests/test_threaded.py", "file_name": "test_threaded.py", "file_type": "text/x-python", "category": "test", "start_line": 152, "end_line": 179, "span_ids": ["test_interrupt"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_interrupt():\n # Windows implements `queue.get` using polling,\n # which means we can set an exception to interrupt the call to `get`.\n # Python 3 on other platforms requires sending SIGINT to the main thread.\n if os.name == \"nt\":\n from _thread import interrupt_main\n else:\n main_thread = threading.get_ident()\n\n def interrupt_main() -> None:\n signal.pthread_kill(main_thread, signal.SIGINT)\n\n # 7 seconds is is how long the test will take when you factor in teardown.\n # Don't set it too short or the test will become flaky on non-performing CI\n dsk = {(\"x\", i): (sleep, 7) for i in range(20)}\n dsk[\"x\"] = (len, list(dsk.keys()))\n\n # 3 seconds is how long the test will take without teardown\n interrupter = threading.Timer(3, interrupt_main)\n interrupter.start()\n\n start = time()\n with pytest.raises(KeyboardInterrupt):\n get(dsk, \"x\")\n stop = time()\n assert stop < start + 6", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_getargspec_test_getargspec.assert_getargspec_MyType_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_getargspec_test_getargspec.assert_getargspec_MyType_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 60, "span_ids": ["test_getargspec"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getargspec():\n def func(x, y):\n pass\n\n assert getargspec(func).args == [\"x\", \"y\"]\n\n func2 = functools.partial(func, 2)\n # this is a bit of a lie, but maybe close enough\n assert getargspec(func2).args == [\"x\", \"y\"]\n\n def wrapper(*args, **kwargs):\n pass\n\n wrapper.__wrapped__ = func\n assert getargspec(wrapper).args == [\"x\", \"y\"]\n\n class MyType:\n def __init__(self, x, y):\n pass\n\n assert getargspec(MyType).args == [\"self\", \"x\", \"y\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_takes_multiple_arguments_test_takes_multiple_arguments.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_takes_multiple_arguments_test_takes_multiple_arguments.None_7", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 63, "end_line": 91, "span_ids": ["test_takes_multiple_arguments"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_takes_multiple_arguments():\n assert takes_multiple_arguments(map)\n assert not takes_multiple_arguments(sum)\n\n def multi(a, b, c):\n return a, b, c\n\n class Singular:\n def __init__(self, a):\n pass\n\n class Multi:\n def __init__(self, a, b):\n pass\n\n assert takes_multiple_arguments(multi)\n assert not takes_multiple_arguments(Singular)\n assert takes_multiple_arguments(Multi)\n\n def f():\n pass\n\n assert not takes_multiple_arguments(f)\n\n def vararg(*args):\n pass\n\n assert takes_multiple_arguments(vararg)\n assert not takes_multiple_arguments(vararg, varargs=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_test_dispatch.assert_foo___doc___f__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_test_dispatch.assert_foo___doc___f__", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 99, "end_line": 121, "span_ids": ["test_dispatch"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dispatch():\n foo = Dispatch()\n foo.register(int, lambda a: a + 1)\n foo.register(float, lambda a: a - 1)\n foo.register(tuple, lambda a: tuple(foo(i) for i in a))\n\n def f(a):\n \"\"\"My Docstring\"\"\"\n return a\n\n foo.register(object, f)\n\n class Bar:\n pass\n\n b = Bar()\n assert foo(1) == 2\n assert foo.dispatch(int)(1) == 2\n assert foo(1.0) == 0.0\n assert foo(b) == b\n assert foo((1, 2.0, b)) == (2, 1.0, b)\n\n assert foo.__doc__ == f.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_kwargs_test_dispatch_variadic_on_first_argument.assert_foo_1_0_2_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_kwargs_test_dispatch_variadic_on_first_argument.assert_foo_1_0_2_0_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 129, "span_ids": ["test_dispatch_kwargs", "test_dispatch_variadic_on_first_argument"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dispatch_kwargs():\n foo = Dispatch()\n foo.register(int, lambda a, b=10: a + b)\n\n assert foo(1, b=20) == 21\n\n\ndef test_dispatch_variadic_on_first_argument():\n foo = Dispatch()\n foo.register(int, lambda a, b: a + b)\n foo.register(float, lambda a, b: a - b)\n\n assert foo(1, 2) == 3\n assert foo(1.0, 2.0) == -1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_lazy_test_dispatch_lazy.assert_foo_1_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_lazy_test_dispatch_lazy.assert_foo_1_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 132, "end_line": 152, "span_ids": ["test_dispatch_lazy"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dispatch_lazy():\n # this tests the recursive component of dispatch\n foo = Dispatch()\n foo.register(int, lambda a: a)\n\n import decimal\n\n # keep it outside lazy dec for test\n def foo_dec(a):\n return a + 1\n\n @foo.register_lazy(\"decimal\")\n def register_decimal():\n import decimal\n\n foo.register(decimal.Decimal, foo_dec)\n\n # This test needs to be *before* any other calls\n assert foo.dispatch(decimal.Decimal) == foo_dec\n assert foo(decimal.Decimal(1)) == decimal.Decimal(2)\n assert foo(1) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_random_state_data_test_random_state_data.None_1.assert_s1_s2_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_random_state_data_test_random_state_data.None_1.assert_s1_s2_all_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 158, "end_line": 179, "span_ids": ["test_random_state_data"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random_state_data():\n np = pytest.importorskip(\"numpy\")\n seed = 37\n state = np.random.RandomState(seed)\n n = 10000\n\n # Use an integer\n states = random_state_data(n, seed)\n assert len(states) == n\n\n # Use RandomState object\n states2 = random_state_data(n, state)\n for s1, s2 in zip(states, states2):\n assert s1.shape == (624,)\n assert (s1 == s2).all()\n\n # Consistent ordering\n states = random_state_data(10, 1234)\n states2 = random_state_data(20, 1234)[:10]\n\n for s1, s2 in zip(states, states2):\n assert (s1 == s2).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_memory_repr_test_method_caller.assert_count_in_repr_me": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_memory_repr_test_method_caller.assert_count_in_repr_me", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 219, "end_line": 234, "span_ids": ["test_memory_repr", "test_method_caller"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_memory_repr():\n for power, mem_repr in enumerate([\"1.0 bytes\", \"1.0 KB\", \"1.0 MB\", \"1.0 GB\"]):\n assert memory_repr(1024**power) == mem_repr\n\n\ndef test_method_caller():\n a = [1, 2, 3, 3, 3]\n f = methodcaller(\"count\")\n assert f(a, 3) == a.count(3)\n assert methodcaller(\"count\") is f\n assert M.count is f\n assert pickle.loads(pickle.dumps(f)) is f\n assert \"count\" in dir(M)\n\n assert \"count\" in str(methodcaller(\"count\"))\n assert \"count\" in repr(methodcaller(\"count\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_skip_doctest_test_skip_doctest.assert_res_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_skip_doctest_test_skip_doctest.assert_res_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 221, "span_ids": ["test_skip_doctest"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_skip_doctest():\n example = \"\"\">>> xxx\n>>>\n>>> # comment\n>>> xxx\"\"\"\n\n res = skip_doctest(example)\n assert (\n res\n == \"\"\">>> xxx # doctest: +SKIP\n>>>\n>>> # comment\n>>> xxx # doctest: +SKIP\"\"\"\n )\n\n assert skip_doctest(None) == \"\"\n\n example = \"\"\"\n>>> 1 + 2 # doctest: +ELLIPSES\n3\"\"\"\n\n expected = \"\"\"\n>>> 1 + 2 # doctest: +ELLIPSES, +SKIP\n3\"\"\"\n res = skip_doctest(example)\n assert res == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_extra_titles_test_asciitable.assert_res_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_extra_titles_test_asciitable.assert_res_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 224, "end_line": 270, "span_ids": ["test_extra_titles", "test_asciitable"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_extra_titles():\n example = \"\"\"\n\n Notes\n -----\n hello\n\n Foo\n ---\n\n Notes\n -----\n bar\n \"\"\"\n\n expected = \"\"\"\n\n Notes\n -----\n hello\n\n Foo\n ---\n\n Extra Notes\n -----------\n bar\n \"\"\"\n\n assert extra_titles(example) == expected\n\n\ndef test_asciitable():\n res = asciitable(\n [\"fruit\", \"color\"],\n [(\"apple\", \"red\"), (\"banana\", \"yellow\"), (\"tomato\", \"red\"), (\"pear\", \"green\")],\n )\n assert res == (\n \"+--------+--------+\\n\"\n \"| fruit | color |\\n\"\n \"+--------+--------+\\n\"\n \"| apple | red |\\n\"\n \"| banana | yellow |\\n\"\n \"| tomato | red |\\n\"\n \"| pear | green |\\n\"\n \"+--------+--------+\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_test_SerializableLock.None_4.for_y_in_b_b2_b3_.with_y_.with_x_.pass": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_test_SerializableLock.None_4.for_y_in_b_b2_b3_.with_y_.with_x_.pass", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 273, "end_line": 305, "span_ids": ["test_SerializableLock"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_SerializableLock():\n a = SerializableLock()\n b = SerializableLock()\n with a:\n pass\n\n with a:\n with b:\n pass\n\n with a:\n assert not a.acquire(False)\n\n a2 = pickle.loads(pickle.dumps(a))\n a3 = pickle.loads(pickle.dumps(a))\n a4 = pickle.loads(pickle.dumps(a2))\n\n for x in [a, a2, a3, a4]:\n for y in [a, a2, a3, a4]:\n with x:\n assert not y.acquire(False)\n\n b2 = pickle.loads(pickle.dumps(b))\n b3 = pickle.loads(pickle.dumps(b2))\n\n for x in [a, a2, a3, a4]:\n for y in [b, b2, b3]:\n with x:\n with y:\n pass\n with y:\n with x:\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_name_collision_test_funcname_numpy_vectorize.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_SerializableLock_name_collision_test_funcname_numpy_vectorize.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 314, "end_line": 397, "span_ids": ["test_funcname_toolz", "test_funcname_long", "test_SerializableLock_locked", "test_funcname_numpy_vectorize", "test_SerializableLock_acquire_blocking", "test_funcname_multipledispatch", "test_SerializableLock_name_collision", "test_funcname"], "tokens": 554}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_SerializableLock_name_collision():\n a = SerializableLock(\"a\")\n b = SerializableLock(\"b\")\n c = SerializableLock(\"a\")\n d = SerializableLock()\n\n assert a.lock is not b.lock\n assert a.lock is c.lock\n assert d.lock not in (a.lock, b.lock, c.lock)\n\n\ndef test_SerializableLock_locked():\n a = SerializableLock(\"a\")\n assert not a.locked()\n with a:\n assert a.locked()\n assert not a.locked()\n\n\ndef test_SerializableLock_acquire_blocking():\n a = SerializableLock(\"a\")\n assert a.acquire(blocking=True)\n assert not a.acquire(blocking=False)\n a.release()\n\n\ndef test_funcname():\n def foo(a, b, c):\n pass\n\n assert funcname(foo) == \"foo\"\n assert funcname(functools.partial(foo, a=1)) == \"foo\"\n assert funcname(M.sum) == \"sum\"\n assert funcname(lambda: 1) == \"lambda\"\n\n class Foo:\n pass\n\n assert funcname(Foo) == \"Foo\"\n assert \"Foo\" in funcname(Foo())\n\n\ndef test_funcname_long():\n def a_long_function_name_11111111111111111111111111111111111111111111111():\n pass\n\n result = funcname(\n a_long_function_name_11111111111111111111111111111111111111111111111\n )\n assert \"a_long_function_name\" in result\n assert len(result) < 60\n\n\ndef test_funcname_toolz():\n @curry\n def foo(a, b, c):\n pass\n\n assert funcname(foo) == \"foo\"\n assert funcname(foo(1)) == \"foo\"\n\n\ndef test_funcname_multipledispatch():\n md = pytest.importorskip(\"multipledispatch\")\n\n @md.dispatch(int, int, int)\n def foo(a, b, c):\n pass\n\n assert funcname(foo) == \"foo\"\n assert funcname(functools.partial(foo, a=1)) == \"foo\"\n\n\ndef test_funcname_numpy_vectorize():\n np = pytest.importorskip(\"numpy\")\n\n vfunc = np.vectorize(int)\n assert funcname(vfunc) == \"vectorize_int\"\n\n # Regression test for https://github.com/pydata/xarray/issues/3303\n # Partial functions don't have a __name__ attribute\n func = functools.partial(np.add, out=None)\n vfunc = np.vectorize(func)\n assert funcname(vfunc) == \"vectorize_add\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ndeepmap_test_ndeepmap.assert_ndeepmap_3_inc_L": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ndeepmap_test_ndeepmap.assert_ndeepmap_3_inc_L", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 396, "end_line": 410, "span_ids": ["test_ndeepmap"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ndeepmap():\n L = 1\n assert ndeepmap(0, inc, L) == 2\n\n L = [1]\n assert ndeepmap(0, inc, L) == 2\n\n L = [1, 2, 3]\n assert ndeepmap(1, inc, L) == [2, 3, 4]\n\n L = [[1, 2], [3, 4]]\n assert ndeepmap(2, inc, L) == [[2, 3], [4, 5]]\n\n L = [[[1, 2], [3, 4, 5]], [[6], []]]\n assert ndeepmap(3, inc, L) == [[[2, 3], [4, 5, 6]], [[7], []]]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ensure_dict_test_has_keyword.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_ensure_dict_test_has_keyword.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 453, "end_line": 497, "span_ids": ["test_ensure_dict", "test_has_keyword", "test_itemgetter", "test_partial_by_order"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ensure_dict():\n d = {\"x\": 1}\n assert ensure_dict(d) is d\n\n class mydict(dict):\n pass\n\n d2 = ensure_dict(d, copy=True)\n d3 = ensure_dict(HighLevelGraph.from_collections(\"x\", d))\n d4 = ensure_dict(mydict(d))\n\n for di in (d2, d3, d4):\n assert type(di) is dict\n assert di is not d\n assert di == d\n\n\ndef test_itemgetter():\n data = [1, 2, 3]\n g = itemgetter(1)\n assert g(data) == 2\n g2 = pickle.loads(pickle.dumps(g))\n assert g2(data) == 2\n assert g2.index == 1\n\n assert itemgetter(1) == itemgetter(1)\n assert itemgetter(1) != itemgetter(2)\n assert itemgetter(1) != 123\n\n\ndef test_partial_by_order():\n assert partial_by_order(5, function=operator.add, other=[(1, 20)]) == 25\n\n\ndef test_has_keyword():\n def foo(a, b, c=None):\n pass\n\n assert has_keyword(foo, \"a\")\n assert has_keyword(foo, \"b\")\n assert has_keyword(foo, \"c\")\n\n bar = functools.partial(foo, a=1)\n assert has_keyword(bar, \"b\")\n assert has_keyword(bar, \"c\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_test_derived_from.assert_extra_docstring": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_test_derived_from.assert_extra_docstring", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 455, "end_line": 489, "span_ids": ["test_derived_from"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_derived_from():\n class Foo:\n def f(a, b):\n \"\"\"A super docstring\n\n An explanation\n\n Parameters\n ----------\n a: int\n an explanation of a\n b: float\n an explanation of b\n \"\"\"\n\n class Bar:\n @derived_from(Foo)\n def f(a, c):\n pass\n\n class Zap:\n @derived_from(Foo)\n def f(a, c):\n \"extra docstring\"\n pass\n\n assert Bar.f.__doc__.strip().startswith(\"A super docstring\")\n assert \"Foo.f\" in Bar.f.__doc__\n assert any(\"inconsistencies\" in line for line in Bar.f.__doc__.split(\"\\n\")[:7])\n\n [b_arg] = [line for line in Bar.f.__doc__.split(\"\\n\") if \"b:\" in line]\n assert \"not supported\" in b_arg.lower()\n assert \"dask\" in b_arg.lower()\n\n assert \" extra docstring\\n\\n\" in Zap.f.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_bytes_test_parse_bytes.assert_parse_bytes_5GB_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_bytes_test_parse_bytes.assert_parse_bytes_5GB_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 563, "end_line": 575, "span_ids": ["test_parse_bytes"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_bytes():\n assert parse_bytes(\"100\") == 100\n assert parse_bytes(\"100 MB\") == 100000000\n assert parse_bytes(\"100M\") == 100000000\n assert parse_bytes(\"5kB\") == 5000\n assert parse_bytes(\"5.4 kB\") == 5400\n assert parse_bytes(\"1kiB\") == 1024\n assert parse_bytes(\"1Mi\") == 2**20\n assert parse_bytes(\"1e6\") == 1000000\n assert parse_bytes(\"1e6 kB\") == 1000000000\n assert parse_bytes(\"MB\") == 1000000\n assert parse_bytes(123) == 123\n assert parse_bytes(\".5GB\") == 500000000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_timedelta_test_parse_timedelta.assert_parse_timedelta_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_parse_timedelta_test_parse_timedelta.assert_parse_timedelta_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 532, "end_line": 557, "span_ids": ["test_parse_timedelta"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_timedelta():\n for text, value in [\n (\"1s\", 1),\n (\"100ms\", 0.1),\n (\"5S\", 5),\n (\"5.5s\", 5.5),\n (\"5.5 s\", 5.5),\n (\"1 second\", 1),\n (\"3.3 seconds\", 3.3),\n (\"3.3 milliseconds\", 0.0033),\n (\"3500 us\", 0.0035),\n (\"1 ns\", 1e-9),\n (\"2m\", 120),\n (\"2 minutes\", 120),\n (None, None),\n (3, 3),\n (datetime.timedelta(seconds=2), 2),\n (datetime.timedelta(milliseconds=100), 0.1),\n ]:\n result = parse_timedelta(text)\n assert result == value or abs(result - value) < 1e-14\n\n assert parse_timedelta(\"1ms\", default=\"seconds\") == 0.001\n assert parse_timedelta(\"1\", default=\"seconds\") == 1\n assert parse_timedelta(\"1\", default=\"ms\") == 0.001\n assert parse_timedelta(1, default=\"ms\") == 0.001", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py___pack_exception.return.e_sys_exc_info_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py___pack_exception.return.e_sys_exc_info_2_", "embedding": null, "metadata": {"file_path": "dask/threaded.py", "file_name": "threaded.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 33, "span_ids": ["pack_exception", "_thread_get_id", "impl", "docstring"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nA threaded shared-memory scheduler\n\nSee local.py\n\"\"\"\nfrom __future__ import annotations\n\nimport atexit\nimport multiprocessing.pool\nimport sys\nimport threading\nfrom collections import defaultdict\nfrom concurrent.futures import Executor, ThreadPoolExecutor\nfrom threading import Lock, current_thread\n\nfrom . import config\nfrom .local import MultiprocessingPoolExecutor, get_async\nfrom .system import CPU_COUNT\nfrom .utils_test import add, inc # noqa: F401\n\n\ndef _thread_get_id():\n return current_thread().ident\n\n\nmain_thread = current_thread()\ndefault_pool: Executor | None = None\npools: defaultdict[threading.Thread, dict[int, Executor]] = defaultdict(dict)\npools_lock = Lock()\n\n\ndef pack_exception(e, dumps):\n return e, sys.exc_info()[2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py_get_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/threaded.py_get_", "embedding": null, "metadata": {"file_path": "dask/threaded.py", "file_name": "threaded.py", "file_type": "text/x-python", "category": "implementation", "start_line": 34, "end_line": 100, "span_ids": ["get"], "tokens": 458}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get(dsk, result, cache=None, num_workers=None, pool=None, **kwargs):\n \"\"\"Threaded cached implementation of dask.get\n\n Parameters\n ----------\n\n dsk: dict\n A dask dictionary specifying a workflow\n result: key or list of keys\n Keys corresponding to desired data\n num_workers: integer of thread count\n The number of threads to use in the ThreadPool that will actually execute tasks\n cache: dict-like (optional)\n Temporary storage of results\n\n Examples\n --------\n\n >>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}\n >>> get(dsk, 'w')\n 4\n >>> get(dsk, ['w', 'y'])\n (4, 2)\n \"\"\"\n global default_pool\n pool = pool or config.get(\"pool\", None)\n num_workers = num_workers or config.get(\"num_workers\", None)\n thread = current_thread()\n\n with pools_lock:\n if pool is None:\n if num_workers is None and thread is main_thread:\n if default_pool is None:\n default_pool = ThreadPoolExecutor(CPU_COUNT)\n atexit.register(default_pool.shutdown)\n pool = default_pool\n elif thread in pools and num_workers in pools[thread]:\n pool = pools[thread][num_workers]\n else:\n pool = ThreadPoolExecutor(num_workers)\n atexit.register(pool.shutdown)\n pools[thread][num_workers] = pool\n elif isinstance(pool, multiprocessing.pool.Pool):\n pool = MultiprocessingPoolExecutor(pool)\n\n results = get_async(\n pool.submit,\n pool._max_workers,\n dsk,\n result,\n cache=cache,\n get_id=_thread_get_id,\n pack_exception=pack_exception,\n **kwargs,\n )\n\n # Cleanup pools associated to dead threads\n with pools_lock:\n active_threads = set(threading.enumerate())\n if thread is not main_thread:\n for t in list(pools):\n if t not in active_threads:\n for p in pools.pop(t).values():\n p.shutdown()\n\n return results", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_deepmap_deepmap.if_isinstance_seqs_0_l.else_.return.func_seqs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_deepmap_deepmap.if_isinstance_seqs_0_l.else_.return.func_seqs_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 34, "end_line": 48, "span_ids": ["deepmap"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def deepmap(func, *seqs):\n \"\"\"Apply function inside nested lists\n\n >>> inc = lambda x: x + 1\n >>> deepmap(inc, [[1, 2], [3, 4]])\n [[2, 3], [4, 5]]\n\n >>> add = lambda x, y: x + y\n >>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])\n [[11, 22], [33, 44]]\n \"\"\"\n if isinstance(seqs[0], (list, Iterator)):\n return [deepmap(func, *items) for items in zip(*seqs)]\n else:\n return func(*seqs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_filetexts_concrete.return.seq": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_filetexts_concrete.return.seq", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 260, "end_line": 308, "span_ids": ["filetexts", "concrete"], "tokens": 304}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef filetexts(d, open=open, mode=\"t\", use_tmpdir=True):\n \"\"\"Dumps a number of textfiles to disk\n\n Parameters\n ----------\n d : dict\n a mapping from filename to text like {'a.csv': '1,1\\n2,2'}\n\n Since this is meant for use in tests, this context manager will\n automatically switch to a temporary current directory, to avoid\n race conditions when running tests in parallel.\n \"\"\"\n with (tmp_cwd() if use_tmpdir else nullcontext()):\n for filename, text in d.items():\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError:\n pass\n f = open(filename, \"w\" + mode)\n try:\n f.write(text)\n finally:\n try:\n f.close()\n except AttributeError:\n pass\n\n yield list(d)\n\n for filename in d:\n if os.path.exists(filename):\n with suppress(OSError):\n os.remove(filename)\n\n\ndef concrete(seq):\n \"\"\"Make nested iterators concrete lists\n\n >>> data = [[1, 2], [3, 4]]\n >>> seq = iter(map(iter, data))\n >>> concrete(seq)\n [[1, 2], [3, 4]]\n \"\"\"\n if isinstance(seq, Iterator):\n seq = list(seq)\n if isinstance(seq, (tuple, list)):\n seq = list(map(concrete, seq))\n return seq", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_pseudorandom_pseudorandom.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_pseudorandom_pseudorandom.return.out", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 241, "end_line": 265, "span_ids": ["pseudorandom"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pseudorandom(n, p, random_state=None):\n \"\"\"Pseudorandom array of integer indexes\n\n >>> pseudorandom(5, [0.5, 0.5], random_state=123)\n array([1, 0, 0, 1, 1], dtype=int8)\n\n >>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)\n array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)\n \"\"\"\n import numpy as np\n\n p = list(p)\n cp = np.cumsum([0] + p)\n assert np.allclose(1, cp[-1])\n assert len(p) < 256\n\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n x = random_state.random_sample(n)\n out = np.empty(n, dtype=\"i1\")\n\n for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):\n out[(x >= low) & (x < high)] = i\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_random_state_data_random_state_data.return.l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_random_state_data_random_state_data.return.l", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 268, "end_line": 289, "span_ids": ["random_state_data"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def random_state_data(n, random_state=None):\n \"\"\"Return a list of arrays that can initialize\n ``np.random.RandomState``.\n\n Parameters\n ----------\n n : int\n Number of arrays to return.\n random_state : int or np.random.RandomState, optional\n If an int, is used to seed a new ``RandomState``.\n \"\"\"\n import numpy as np\n\n if not all(\n hasattr(random_state, attr) for attr in [\"normal\", \"beta\", \"bytes\", \"uniform\"]\n ):\n random_state = np.random.RandomState(random_state)\n\n random_data = random_state.bytes(624 * n * 4) # `n * 624` 32-bit integers\n l = list(np.frombuffer(random_data, dtype=np.uint32).reshape((n, -1)))\n assert len(l) == n\n return l", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_integer_getargspec.if_isinstance_func_type_.else_.return.inspect_getfullargspec_fu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_integer_getargspec.if_isinstance_func_type_.else_.return.inspect_getfullargspec_fu", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 416, "end_line": 500, "span_ids": ["impl:10", "is_integer", "getargspec"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def is_integer(i):\n \"\"\"\n >>> is_integer(6)\n True\n >>> is_integer(42.0)\n True\n >>> is_integer('abc')\n False\n \"\"\"\n return isinstance(i, Integral) or (isinstance(i, float) and i.is_integer())\n\n\nONE_ARITY_BUILTINS = {\n abs,\n all,\n any,\n ascii,\n bool,\n bytearray,\n bytes,\n callable,\n chr,\n classmethod,\n complex,\n dict,\n dir,\n enumerate,\n eval,\n float,\n format,\n frozenset,\n hash,\n hex,\n id,\n int,\n iter,\n len,\n list,\n max,\n min,\n next,\n oct,\n open,\n ord,\n range,\n repr,\n reversed,\n round,\n set,\n slice,\n sorted,\n staticmethod,\n str,\n sum,\n tuple,\n type,\n vars,\n zip,\n memoryview,\n}\nMULTI_ARITY_BUILTINS = {\n compile,\n delattr,\n divmod,\n filter,\n getattr,\n hasattr,\n isinstance,\n issubclass,\n map,\n pow,\n setattr,\n}\n\n\ndef getargspec(func):\n \"\"\"Version of inspect.getargspec that works with partial and warps.\"\"\"\n if isinstance(func, functools.partial):\n return getargspec(func.func)\n\n func = getattr(func, \"__wrapped__\", func)\n if isinstance(func, type):\n return inspect.getfullargspec(func.__init__)\n else:\n return inspect.getfullargspec(func)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_takes_multiple_arguments_takes_multiple_arguments.return.len_spec_args_ndefault": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_takes_multiple_arguments_takes_multiple_arguments.return.len_spec_args_ndefault", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 391, "end_line": 435, "span_ids": ["takes_multiple_arguments"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def takes_multiple_arguments(func, varargs=True):\n \"\"\"Does this function take multiple arguments?\n\n >>> def f(x, y): pass\n >>> takes_multiple_arguments(f)\n True\n\n >>> def f(x): pass\n >>> takes_multiple_arguments(f)\n False\n\n >>> def f(x, y=None): pass\n >>> takes_multiple_arguments(f)\n False\n\n >>> def f(*args): pass\n >>> takes_multiple_arguments(f)\n True\n\n >>> class Thing:\n ... def __init__(self, a): pass\n >>> takes_multiple_arguments(Thing)\n False\n\n \"\"\"\n if func in ONE_ARITY_BUILTINS:\n return False\n elif func in MULTI_ARITY_BUILTINS:\n return True\n\n try:\n spec = getargspec(func)\n except Exception:\n return False\n\n try:\n is_constructor = spec.args[0] == \"self\" and isinstance(func, type)\n except Exception:\n is_constructor = False\n\n if varargs and spec.varargs:\n return True\n\n ndefaults = 0 if spec.defaults is None else len(spec.defaults)\n return len(spec.args) - ndefaults - is_constructor > 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_named_args_Dispatch.register_lazy.return.wrapper_func_if_func_is_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_named_args_Dispatch.register_lazy.return.wrapper_func_if_func_is_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 438, "end_line": 480, "span_ids": ["Dispatch", "get_named_args", "Dispatch.__init__", "Dispatch.register", "Dispatch.register_lazy"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_named_args(func):\n \"\"\"Get all non ``*args/**kwargs`` arguments for a function\"\"\"\n s = inspect.signature(func)\n return [\n n\n for n, p in s.parameters.items()\n if p.kind in [p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY, p.KEYWORD_ONLY]\n ]\n\n\nclass Dispatch:\n \"\"\"Simple single dispatch.\"\"\"\n\n def __init__(self, name=None):\n self._lookup = {}\n self._lazy = {}\n if name:\n self.__name__ = name\n\n def register(self, type, func=None):\n \"\"\"Register dispatch of `func` on arguments of type `type`\"\"\"\n\n def wrapper(func):\n if isinstance(type, tuple):\n for t in type:\n self.register(t, func)\n else:\n self._lookup[type] = func\n return func\n\n return wrapper(func) if func is not None else wrapper\n\n def register_lazy(self, toplevel, func=None):\n \"\"\"\n Register a registration function which will be called if the\n *toplevel* module (e.g. 'pandas') is ever loaded.\n \"\"\"\n\n def wrapper(func):\n self._lazy[toplevel] = func\n return func\n\n return wrapper(func) if func is not None else wrapper", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_Dispatch.dispatch_Dispatch.__doc__.try_.except_TypeError_.return._Single_Dispatch_for_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_Dispatch.dispatch_Dispatch.__doc__.try_.except_TypeError_.return._Single_Dispatch_for_s_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 594, "end_line": 631, "span_ids": ["Dispatch.__doc__", "Dispatch.dispatch", "Dispatch.__call__"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Dispatch:\n\n def dispatch(self, cls):\n \"\"\"Return the function implementation for the given ``cls``\"\"\"\n lk = self._lookup\n for cls2 in cls.__mro__:\n try:\n impl = lk[cls2]\n except KeyError:\n pass\n else:\n if cls is not cls2:\n # Cache lookup\n lk[cls] = impl\n return impl\n # Is a lazy registration function present?\n toplevel, _, _ = cls2.__module__.partition(\".\")\n try:\n register = self._lazy.pop(toplevel)\n except KeyError:\n pass\n else:\n register()\n return self.dispatch(cls) # recurse\n raise TypeError(f\"No dispatch for {cls}\")\n\n def __call__(self, arg, *args, **kwargs):\n \"\"\"\n Call the corresponding method based on type of argument.\n \"\"\"\n meth = self.dispatch(type(arg))\n return meth(arg, *args, **kwargs)\n\n @property\n def __doc__(self):\n try:\n func = self.dispatch(object)\n return func.__doc__\n except TypeError:\n return \"Single Dispatch for %s\" % self.__name__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ensure_not_exists_skip_doctest.return._n_join__skip_doctest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ensure_not_exists_skip_doctest.return._n_join__skip_doctest_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 516, "end_line": 544, "span_ids": ["ensure_not_exists", "skip_doctest", "_skip_doctest"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ensure_not_exists(filename):\n \"\"\"\n Ensure that a file does not exist.\n \"\"\"\n try:\n os.unlink(filename)\n except OSError as e:\n if e.errno != ENOENT:\n raise\n\n\ndef _skip_doctest(line):\n # NumPy docstring contains cursor and comment only example\n stripped = line.strip()\n if stripped == \">>>\" or stripped.startswith(\">>> #\"):\n return line\n elif \">>>\" in stripped and \"+SKIP\" not in stripped:\n if \"# doctest:\" in line:\n return line + \", +SKIP\"\n else:\n return line + \" # doctest: +SKIP\"\n else:\n return line\n\n\ndef skip_doctest(doc):\n if doc is None:\n return \"\"\n return \"\\n\".join([_skip_doctest(line) for line in doc.split(\"\\n\")])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_extra_titles_extra_titles.return._n_join_lines_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_extra_titles_extra_titles.return._n_join_lines_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 547, "end_line": 564, "span_ids": ["extra_titles"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def extra_titles(doc):\n lines = doc.split(\"\\n\")\n titles = {\n i: lines[i].strip()\n for i in range(len(lines) - 1)\n if lines[i + 1].strip() and all(c == \"-\" for c in lines[i + 1].strip())\n }\n\n seen = set()\n for i, title in sorted(titles.items()):\n if title in seen:\n new_title = \"Extra \" + title\n lines[i] = lines[i].replace(title, new_title)\n lines[i + 1] = lines[i + 1].replace(\"-\" * len(title), \"-\" * len(new_title))\n else:\n seen.add(title)\n\n return \"\\n\".join(lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ignore_warning_ignore_warning.return.doc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ignore_warning_ignore_warning.return.doc", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 685, "end_line": 719, "span_ids": ["ignore_warning"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ignore_warning(doc, cls, name, extra=\"\", skipblocks=0):\n \"\"\"Expand docstring by adding disclaimer and extra text\"\"\"\n import inspect\n\n if inspect.isclass(cls):\n l1 = \"This docstring was copied from {}.{}.{}.\\n\\n\".format(\n cls.__module__,\n cls.__name__,\n name,\n )\n else:\n l1 = f\"This docstring was copied from {cls.__name__}.{name}.\\n\\n\"\n l2 = \"Some inconsistencies with the Dask version may exist.\"\n\n i = doc.find(\"\\n\\n\")\n if i != -1:\n # Insert our warning\n head = doc[: i + 2]\n tail = doc[i + 2 :]\n while skipblocks > 0:\n i = tail.find(\"\\n\\n\")\n head = tail[: i + 2]\n tail = tail[i + 2 :]\n skipblocks -= 1\n # Indentation of next line\n indent = re.match(r\"\\s*\", tail).group(0)\n # Insert the warning, indented, with a blank line before and after\n if extra:\n more = [indent, extra.rstrip(\"\\n\") + \"\\n\\n\"]\n else:\n more = []\n bits = [head, indent, l1, indent, l2, \"\\n\\n\"] + more + [tail]\n doc = \"\".join(bits)\n\n return doc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_unsupported_arguments_unsupported_arguments.return._n_join_lines_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_unsupported_arguments_unsupported_arguments.return._n_join_lines_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 674, "end_line": 686, "span_ids": ["unsupported_arguments"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unsupported_arguments(doc, args):\n \"\"\"Mark unsupported arguments with a disclaimer\"\"\"\n lines = doc.split(\"\\n\")\n for arg in args:\n subset = [\n (i, line)\n for i, line in enumerate(lines)\n if re.match(r\"^\\s*\" + arg + \" ?:\", line)\n ]\n if len(subset) == 1:\n [(i, line)] = subset\n lines[i] = line + \" (Not supported in Dask)\"\n return \"\\n\".join(lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__derived_from__derived_from.return.doc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__derived_from__derived_from.return.doc", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 719, "end_line": 769, "span_ids": ["_derived_from"], "tokens": 389}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _derived_from(cls, method, ua_args=None, extra=\"\", skipblocks=0):\n \"\"\"Helper function for derived_from to ease testing\"\"\"\n ua_args = ua_args or []\n\n # do not use wraps here, as it hides keyword arguments displayed\n # in the doc\n original_method = getattr(cls, method.__name__)\n\n doc = getattr(original_method, \"__doc__\", None)\n\n if isinstance(original_method, property):\n # some things like SeriesGroupBy.unique are generated.\n original_method = original_method.fget\n if not doc:\n doc = getattr(original_method, \"__doc__\", None)\n\n if doc is None:\n doc = \"\"\n\n # pandas DataFrame/Series sometimes override methods without setting __doc__\n if not doc and cls.__name__ in {\"DataFrame\", \"Series\"}:\n for obj in cls.mro():\n obj_method = getattr(obj, method.__name__, None)\n if obj_method is not None and obj_method.__doc__:\n doc = obj_method.__doc__\n break\n\n # Insert disclaimer that this is a copied docstring\n if doc:\n doc = ignore_warning(\n doc, cls, method.__name__, extra=extra, skipblocks=skipblocks\n )\n elif extra:\n doc += extra.rstrip(\"\\n\") + \"\\n\\n\"\n\n # Mark unsupported arguments\n try:\n method_args = get_named_args(method)\n original_args = get_named_args(original_method)\n not_supported = [m for m in original_args if m not in method_args]\n except ValueError:\n not_supported = []\n if len(ua_args) > 0:\n not_supported.extend(ua_args)\n if len(not_supported) > 0:\n doc = unsupported_arguments(doc, not_supported)\n\n doc = skip_doctest(doc)\n doc = extra_titles(doc)\n\n return doc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from.wrapper_derived_from.return.wrapper": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from.wrapper_derived_from.return.wrapper", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 802, "end_line": 827, "span_ids": ["derived_from"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def derived_from(original_klass, version=None, ua_args=None, skipblocks=0):\n # ... other code\n\n def wrapper(method):\n try:\n extra = getattr(method, \"__doc__\", None) or \"\"\n method.__doc__ = _derived_from(\n original_klass,\n method,\n ua_args=ua_args,\n extra=extra,\n skipblocks=skipblocks,\n )\n return method\n\n except AttributeError:\n module_name = original_klass.__module__.split(\".\")[0]\n\n @functools.wraps(method)\n def wrapped(*args, **kwargs):\n msg = f\"Base package doesn't support '{method.__name__}'.\"\n if version is not None:\n msg2 = \" Use {0} {1} or later to use this method.\"\n msg += msg2.format(module_name, version)\n raise NotImplementedError(msg)\n\n return wrapped\n\n return wrapper", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_funcname_funcname.try_.except_AttributeError_.return.str_func_50_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_funcname_funcname.try_.except_AttributeError_.return.str_func_50_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 709, "end_line": 738, "span_ids": ["funcname"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def funcname(func):\n \"\"\"Get the name of a function.\"\"\"\n # functools.partial\n if isinstance(func, functools.partial):\n return funcname(func.func)\n # methodcaller\n if isinstance(func, methodcaller):\n return func.method[:50]\n\n module_name = getattr(func, \"__module__\", None) or \"\"\n type_name = getattr(type(func), \"__name__\", None) or \"\"\n\n # toolz.curry\n if \"toolz\" in module_name and \"curry\" == type_name:\n return func.func_name[:50]\n # multipledispatch objects\n if \"multipledispatch\" in module_name and \"Dispatcher\" == type_name:\n return func.name[:50]\n # numpy.vectorize objects\n if \"numpy\" in module_name and \"vectorize\" == type_name:\n return (\"vectorize_\" + funcname(func.pyfunc))[:50]\n\n # All other callables\n try:\n name = func.__name__\n if name == \"\":\n return \"lambda\"\n return name[:50]\n except AttributeError:\n return str(func)[:50]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_asciitable_asciitable.return._n_join_bar_header_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_asciitable_asciitable.return._n_join_bar_header_b", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 843, "end_line": 861, "span_ids": ["asciitable"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def asciitable(columns, rows):\n \"\"\"Formats an ascii table for given columns and rows.\n\n Parameters\n ----------\n columns : list\n The column names\n rows : list of tuples\n The rows in the table. Each tuple must be the same length as\n ``columns``.\n \"\"\"\n rows = [tuple(str(i) for i in r) for r in rows]\n columns = tuple(str(i) for i in columns)\n widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))\n row_template = (\"|\" + (\" %%-%ds |\" * len(columns))) % widths\n header = row_template % tuple(columns)\n bar = \"+%s+\" % \"+\".join(\"-\" * (w + 2) for w in widths)\n data = \"\\n\".join(row_template % r for r in rows)\n return \"\\n\".join([bar, header, bar, data, bar])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_put_lines_methodcaller.__repr__.__str__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_put_lines_methodcaller.__repr__.__str__", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 983, "end_line": 1026, "span_ids": ["methodcaller.__reduce__", "methodcaller.__call__", "methodcaller.__str__", "methodcaller.func", "methodcaller.__new__", "impl:14", "methodcaller:6", "put_lines", "methodcaller"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def put_lines(buf, lines):\n if any(not isinstance(x, str) for x in lines):\n lines = [str(x) for x in lines]\n buf.write(\"\\n\".join(lines))\n\n\n_method_cache: dict[str, methodcaller] = {}\n\n\nclass methodcaller:\n \"\"\"\n Return a callable object that calls the given method on its operand.\n\n Unlike the builtin `operator.methodcaller`, instances of this class are\n cached and arguments are passed at call time instead of build time.\n \"\"\"\n\n __slots__ = (\"method\",)\n method: str\n\n @property\n def func(self) -> str:\n # For `funcname` to work\n return self.method\n\n def __new__(cls, method: str):\n try:\n return _method_cache[method]\n except KeyError:\n self = object.__new__(cls)\n self.method = method\n _method_cache[method] = self\n return self\n\n def __call__(self, __obj, *args, **kwargs):\n return getattr(__obj, self.method)(*args, **kwargs)\n\n def __reduce__(self):\n return (methodcaller, (self.method,))\n\n def __str__(self):\n return f\"<{self.__class__.__name__}: {self.method}>\"\n\n __repr__ = __str__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_itemgetter_M.MethodCache_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_itemgetter_M.MethodCache_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1029, "end_line": 1065, "span_ids": ["itemgetter.__eq__", "MethodCache", "impl:16", "MethodCache.__dir__", "itemgetter.__call__", "itemgetter.__init__", "MethodCache.__getattr__", "itemgetter.__reduce__", "itemgetter"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class itemgetter:\n \"\"\"Variant of operator.itemgetter that supports equality tests\"\"\"\n\n __slots__ = (\"index\",)\n\n def __init__(self, index):\n self.index = index\n\n def __call__(self, x):\n return x[self.index]\n\n def __reduce__(self):\n return (itemgetter, (self.index,))\n\n def __eq__(self, other):\n return type(self) is type(other) and self.index == other.index\n\n\nclass MethodCache:\n \"\"\"Attribute access on this object returns a methodcaller for that\n attribute.\n\n Examples\n --------\n >>> a = [1, 3, 3]\n >>> M.count(a, 3) == a.count(3)\n True\n \"\"\"\n\n def __getattr__(self, item):\n return methodcaller(item)\n\n def __dir__(self):\n return list(_method_cache)\n\n\nM = MethodCache()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_SerializableLock_SerializableLock.__repr__.__str__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_SerializableLock_SerializableLock.__repr__.__str__", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1068, "end_line": 1132, "span_ids": ["SerializableLock.__exit__", "SerializableLock.__getstate__", "SerializableLock:7", "SerializableLock.__setstate__", "SerializableLock.__init__", "SerializableLock.release", "SerializableLock.__str__", "SerializableLock", "SerializableLock.locked", "SerializableLock.__enter__", "SerializableLock.acquire"], "tokens": 440}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SerializableLock:\n \"\"\"A Serializable per-process Lock\n\n This wraps a normal ``threading.Lock`` object and satisfies the same\n interface. However, this lock can also be serialized and sent to different\n processes. It will not block concurrent operations between processes (for\n this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``\n but will consistently deserialize into the same lock.\n\n So if we make a lock in one process::\n\n lock = SerializableLock()\n\n And then send it over to another process multiple times::\n\n bytes = pickle.dumps(lock)\n a = pickle.loads(bytes)\n b = pickle.loads(bytes)\n\n Then the deserialized objects will operate as though they were the same\n lock, and collide as appropriate.\n\n This is useful for consistently protecting resources on a per-process\n level.\n\n The creation of locks is itself not threadsafe.\n \"\"\"\n\n _locks: ClassVar[WeakValueDictionary[Hashable, Lock]] = WeakValueDictionary()\n token: Hashable\n lock: Lock\n\n def __init__(self, token: Hashable | None = None):\n self.token = token or str(uuid.uuid4())\n if self.token in SerializableLock._locks:\n self.lock = SerializableLock._locks[self.token]\n else:\n self.lock = Lock()\n SerializableLock._locks[self.token] = self.lock\n\n def acquire(self, *args, **kwargs):\n return self.lock.acquire(*args, **kwargs)\n\n def release(self, *args, **kwargs):\n return self.lock.release(*args, **kwargs)\n\n def __enter__(self):\n self.lock.__enter__()\n\n def __exit__(self, *args):\n self.lock.__exit__(*args)\n\n def locked(self):\n return self.lock.locked()\n\n def __getstate__(self):\n return self.token\n\n def __setstate__(self, token):\n self.__init__(token)\n\n def __str__(self):\n return f\"<{self.__class__.__name__}: {self.token}>\"\n\n __repr__ = __str__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_natural_sort_key_factors.return.set_functools_reduce_list": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_natural_sort_key_factors.return.set_functools_reduce_list", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1145, "end_line": 1180, "span_ids": ["factors", "natural_sort_key"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def natural_sort_key(s):\n \"\"\"\n Sorting `key` function for performing a natural sort on a collection of\n strings\n\n See https://en.wikipedia.org/wiki/Natural_sort_order\n\n Parameters\n ----------\n s : str\n A string that is an element of the collection being sorted\n\n Returns\n -------\n tuple[str or int]\n Tuple of the parts of the input string where each part is either a\n string or an integer\n\n Examples\n --------\n >>> a = ['f0', 'f1', 'f2', 'f8', 'f9', 'f10', 'f11', 'f19', 'f20', 'f21']\n >>> sorted(a)\n ['f0', 'f1', 'f10', 'f11', 'f19', 'f2', 'f20', 'f21', 'f8', 'f9']\n >>> sorted(a, key=natural_sort_key)\n ['f0', 'f1', 'f2', 'f8', 'f9', 'f10', 'f11', 'f19', 'f20', 'f21']\n \"\"\"\n return [int(part) if part.isdigit() else part for part in re.split(r\"(\\d+)\", s)]\n\n\ndef factors(n):\n \"\"\"Return the factors of an integer\n\n https://stackoverflow.com/a/6800214/616616\n \"\"\"\n seq = ([i, n // i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)\n return set(functools.reduce(list.__add__, seq))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_bytes_parse_bytes.return.int_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_bytes_parse_bytes.return.int_result_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1348, "end_line": 1402, "span_ids": ["parse_bytes"], "tokens": 378}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def parse_bytes(s):\n \"\"\"Parse byte string to numbers\n\n >>> from dask.utils import parse_bytes\n >>> parse_bytes('100')\n 100\n >>> parse_bytes('100 MB')\n 100000000\n >>> parse_bytes('100M')\n 100000000\n >>> parse_bytes('5kB')\n 5000\n >>> parse_bytes('5.4 kB')\n 5400\n >>> parse_bytes('1kiB')\n 1024\n >>> parse_bytes('1e6')\n 1000000\n >>> parse_bytes('1e6 kB')\n 1000000000\n >>> parse_bytes('MB')\n 1000000\n >>> parse_bytes(123)\n 123\n >>> parse_bytes('5 foos')\n Traceback (most recent call last):\n ...\n ValueError: Could not interpret 'foos' as a byte unit\n \"\"\"\n if isinstance(s, (int, float)):\n return int(s)\n s = s.replace(\" \", \"\")\n if not any(char.isdigit() for char in s):\n s = \"1\" + s\n\n for i in range(len(s) - 1, -1, -1):\n if not s[i].isalpha():\n break\n index = i + 1\n\n prefix = s[:index]\n suffix = s[index:]\n\n try:\n n = float(prefix)\n except ValueError as e:\n raise ValueError(\"Could not interpret '%s' as a number\" % prefix) from e\n\n try:\n multiplier = byte_sizes[suffix.lower()]\n except KeyError as e:\n raise ValueError(\"Could not interpret '%s' as a byte unit\" % suffix) from e\n\n result = n * multiplier\n return int(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_byte_sizes_byte_sizes_update_k_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_byte_sizes_byte_sizes_update_k_1_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1406, "end_line": 1422, "span_ids": ["impl:18"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "byte_sizes = {\n \"kB\": 10**3,\n \"MB\": 10**6,\n \"GB\": 10**9,\n \"TB\": 10**12,\n \"PB\": 10**15,\n \"KiB\": 2**10,\n \"MiB\": 2**20,\n \"GiB\": 2**30,\n \"TiB\": 2**40,\n \"PiB\": 2**50,\n \"B\": 1,\n \"\": 1,\n}\nbyte_sizes = {k.lower(): v for k, v in byte_sizes.items()}\nbyte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and \"i\" not in k})\nbyte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and \"i\" in k})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_time_format_time.return._2f_us_n_1e6_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_time_format_time.return._2f_us_n_1e6_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1257, "end_line": 1274, "span_ids": ["format_time"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_time(n):\n \"\"\"format integers as time\n\n >>> from dask.utils import format_time\n >>> format_time(1)\n '1.00 s'\n >>> format_time(0.001234)\n '1.23 ms'\n >>> format_time(0.00012345)\n '123.45 us'\n >>> format_time(123.456)\n '123.46 s'\n \"\"\"\n if n >= 1:\n return \"%.2f s\" % n\n if n >= 1e-3:\n return \"%.2f ms\" % (n * 1e3)\n return \"%.2f us\" % (n * 1e6)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_timedelta_sizes_timedelta_sizes_update_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_timedelta_sizes_timedelta_sizes_update_k", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1397, "end_line": 1418, "span_ids": ["impl:24"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "timedelta_sizes = {\n \"s\": 1,\n \"ms\": 1e-3,\n \"us\": 1e-6,\n \"ns\": 1e-9,\n \"m\": 60,\n \"h\": 3600,\n \"d\": 3600 * 24,\n}\n\ntds2 = {\n \"second\": 1,\n \"minute\": 60,\n \"hour\": 60 * 60,\n \"day\": 60 * 60 * 24,\n \"millisecond\": 1e-3,\n \"microsecond\": 1e-6,\n \"nanosecond\": 1e-9,\n}\ntds2.update({k + \"s\": v for k, v in tds2.items()})\ntimedelta_sizes.update(tds2)\ntimedelta_sizes.update({k.upper(): v for k, v in timedelta_sizes.items()})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_timedelta_parse_timedelta.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_parse_timedelta_parse_timedelta.return.result", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1331, "end_line": 1373, "span_ids": ["parse_timedelta"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def parse_timedelta(s, default=\"seconds\"):\n \"\"\"Parse timedelta string to number of seconds\n\n Examples\n --------\n >>> from datetime import timedelta\n >>> from dask.utils import parse_timedelta\n >>> parse_timedelta('3s')\n 3\n >>> parse_timedelta('3.5 seconds')\n 3.5\n >>> parse_timedelta('300ms')\n 0.3\n >>> parse_timedelta(timedelta(seconds=3)) # also supports timedeltas\n 3\n \"\"\"\n if s is None:\n return None\n if isinstance(s, timedelta):\n s = s.total_seconds()\n return int(s) if int(s) == s else s\n if isinstance(s, Number):\n s = str(s)\n s = s.replace(\" \", \"\")\n if not s[0].isdigit():\n s = \"1\" + s\n\n for i in range(len(s) - 1, -1, -1):\n if not s[i].isalpha():\n break\n index = i + 1\n\n prefix = s[:index]\n suffix = s[index:] or default\n\n n = float(prefix)\n\n multiplier = timedelta_sizes[suffix.lower()]\n\n result = n * multiplier\n if int(result) == result:\n result = int(result)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_has_keyword_hex_pattern.re_compile_a_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_has_keyword_hex_pattern.re_compile_a_f_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1466, "end_line": 1510, "span_ids": ["has_keyword", "ndimlist", "iter_chunks", "impl:31"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def has_keyword(func, keyword):\n try:\n return keyword in inspect.signature(func).parameters\n except Exception:\n return False\n\n\ndef ndimlist(seq):\n if not isinstance(seq, (list, tuple)):\n return 0\n elif not seq:\n return 1\n else:\n return 1 + ndimlist(seq[0])\n\n\ndef iter_chunks(sizes, max_size):\n \"\"\"Split sizes into chunks of total max_size each\n\n Parameters\n ----------\n sizes : iterable of numbers\n The sizes to be chunked\n max_size : number\n Maximum total size per chunk.\n It must be greater or equal than each size in sizes\n \"\"\"\n chunk, chunk_sum = [], 0\n iter_sizes = iter(sizes)\n size = next(iter_sizes, None)\n while size is not None:\n assert size <= max_size\n if chunk_sum + size <= max_size:\n chunk.append(size)\n chunk_sum += size\n size = next(iter_sizes, None)\n else:\n assert chunk\n yield chunk\n chunk, chunk_sum = [], 0\n if chunk:\n yield chunk\n\n\nhex_pattern = re.compile(\"[a-f]+\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_list_top_level_GetFunctionTestMixin.test_get_with_list_top_level.assert_self_get_d_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_list_top_level_GetFunctionTestMixin.test_get_with_list_top_level.assert_self_get_d_f_", "embedding": null, "metadata": {"file_path": "dask/utils_test.py", "file_name": "utils_test.py", "file_type": "text/x-python", "category": "test", "start_line": 73, "end_line": 87, "span_ids": ["GetFunctionTestMixin.test_get_with_list_top_level"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class GetFunctionTestMixin:\n\n def test_get_with_list_top_level(self):\n d = {\n \"a\": [1, 2, 3],\n \"b\": \"a\",\n \"c\": [1, (inc, 1)],\n \"d\": [(sum, \"a\")],\n \"e\": [\"a\", \"b\"],\n \"f\": [[[(sum, \"a\"), \"c\"], (sum, \"b\")], 2],\n }\n assert self.get(d, \"a\") == [1, 2, 3]\n assert self.get(d, \"b\") == [1, 2, 3]\n assert self.get(d, \"c\") == [1, 2]\n assert self.get(d, \"d\") == [6]\n assert self.get(d, \"e\") == [[1, 2, 3], [1, 2, 3]]\n assert self.get(d, \"f\") == [[[6, [1, 2]], 6], 2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_crosstalk_crosstalk.return.d_x_height_1_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_crosstalk_crosstalk.return.d_x_height_1_i_", "embedding": null, "metadata": {"file_path": "docs/source/scripts/scheduling.py", "file_name": "scheduling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 25, "end_line": 38, "span_ids": ["crosstalk"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def crosstalk(width, height, connections):\n \"\"\"Natural looking dask with some inter-connections\"\"\"\n d = {(\"x\", 0, i): i for i in range(width)}\n for j in range(1, height):\n d.update(\n {\n (\"x\", j, i): (\n noop,\n [(\"x\", j - 1, randint(0, width)) for _ in range(connections)],\n )\n for i in range(width)\n }\n )\n return d, [(\"x\", height - 1, i) for i in range(width)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/setup.py__usr_bin_env_python_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/setup.py__usr_bin_env_python_", "embedding": null, "metadata": {"file_path": "setup.py", "file_name": "setup.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 88, "span_ids": ["impl:17", "docstring"], "tokens": 638}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#!/usr/bin/env python\n\nfrom __future__ import annotations\n\nimport sys\nfrom os.path import exists\n\nfrom setuptools import setup\n\nimport versioneer\n\n# NOTE: These are tested in `continuous_integration/test_imports.sh` If\n# you modify these, make sure to change the corresponding line there.\nextras_require: dict[str, list[str]] = {\n \"array\": [\"numpy >= 1.18\"],\n \"bag\": [], # keeping for backwards compatibility\n \"dataframe\": [\"numpy >= 1.18\", \"pandas >= 1.0\"],\n \"distributed\": [\"distributed == 2022.02.1\"],\n \"diagnostics\": [\n \"bokeh >= 2.4.2\",\n \"jinja2\",\n ],\n \"delayed\": [], # keeping for backwards compatibility\n}\nextras_require[\"complete\"] = sorted({v for req in extras_require.values() for v in req})\n# after complete is set, add in test\nextras_require[\"test\"] = [\n \"pytest\",\n \"pytest-rerunfailures\",\n \"pytest-xdist\",\n \"pre-commit\",\n]\n\ninstall_requires = [\n \"cloudpickle >= 1.1.1\",\n \"fsspec >= 0.6.0\",\n \"packaging >= 20.0\",\n \"partd >= 0.3.10\",\n \"pyyaml >= 5.3.1\",\n \"toolz >= 0.8.2\",\n]\n\npackages = [\n \"dask\",\n \"dask.array\",\n \"dask.bag\",\n \"dask.bytes\",\n \"dask.dataframe\",\n \"dask.dataframe.io\",\n \"dask.dataframe.tseries\",\n \"dask.diagnostics\",\n]\n\ntests = [p + \".tests\" for p in packages]\n\n# Only include pytest-runner in setup_requires if we're invoking tests\nif {\"pytest\", \"test\", \"ptr\"}.intersection(sys.argv):\n setup_requires = [\"pytest-runner\"]\nelse:\n setup_requires = []\n\nsetup(\n name=\"dask\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Parallel PyData with Task Scheduling\",\n url=\"https://github.com/dask/dask/\",\n maintainer=\"Matthew Rocklin\",\n maintainer_email=\"mrocklin@gmail.com\",\n license=\"BSD\",\n keywords=\"task-scheduling parallel numpy pandas pydata\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: BSD License\",\n ],\n packages=packages + tests,\n long_description=open(\"README.rst\").read() if exists(\"README.rst\") else \"\",\n python_requires=\">=3.8\",\n install_requires=install_requires,\n setup_requires=setup_requires,\n tests_require=[\"pytest\"],\n extras_require=extras_require,\n include_package_data=True,\n zip_safe=False,\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py__Version_0_16_get_root.return.root": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py__Version_0_16_get_root.return.root", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 398, "span_ids": ["VersioneerConfig", "imports", "get_root", "docstring"], "tokens": 475}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Version: 0.16\n\n\nimport configparser\nimport errno\nimport json\nimport os\nimport re\nimport subprocess\nimport sys\n\n\nclass VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\"\n\n\ndef get_root():\n \"\"\"Get the project root directory.\n\n We require that all commands are run from the project root, i.e. the\n directory that contains setup.py, setup.cfg, and versioneer.py .\n \"\"\"\n root = os.path.realpath(os.path.abspath(os.getcwd()))\n setup_py = os.path.join(root, \"setup.py\")\n versioneer_py = os.path.join(root, \"versioneer.py\")\n if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):\n # allow 'python path/to/setup.py COMMAND'\n root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))\n setup_py = os.path.join(root, \"setup.py\")\n versioneer_py = os.path.join(root, \"versioneer.py\")\n if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):\n err = (\"Versioneer was unable to run the project root directory. \"\n \"Versioneer requires setup.py to be executed from \"\n \"its immediate directory (like 'python setup.py COMMAND'), \"\n \"or in a way that lets it use sys.argv[0] to find the root \"\n \"(like 'python path/to/setup.py COMMAND').\")\n raise VersioneerBadRootError(err)\n try:\n # Certain runtime workflows (setup.py install/develop in a setuptools\n # tree) execute all dependencies in a single python process, so\n # \"versioneer\" may be imported multiple times, and python's shared\n # module-import table will cache the first one. So we can't use\n # os.path.dirname(__file__), as that will find whichever\n # versioneer.py was first imported, even in later projects.\n me = os.path.realpath(os.path.abspath(__file__))\n if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:\n print(\"Warning: build in %s is using versioneer.py from %s\"\n % (os.path.dirname(me), versioneer_py))\n except NameError:\n pass\n return root", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_config_from_root_get_config_from_root.return.cfg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_config_from_root_get_config_from_root.return.cfg", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 405, "end_line": 431, "span_ids": ["get_config_from_root"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_config_from_root(root):\n \"\"\"Read the project setup.cfg file to determine Versioneer config.\"\"\"\n # This might raise EnvironmentError (if setup.cfg is missing), or\n # configparser.NoSectionError (if it lacks a [versioneer] section), or\n # configparser.NoOptionError (if it lacks \"VCS=\"). See the docstring at\n # the top of versioneer.py for instructions on writing your setup.cfg .\n setup_cfg = os.path.join(root, \"setup.cfg\")\n parser = configparser.SafeConfigParser()\n with open(setup_cfg) as f:\n parser.readfp(f)\n VCS = parser.get(\"versioneer\", \"VCS\") # mandatory\n\n def get(parser, name):\n if parser.has_option(\"versioneer\", name):\n return parser.get(\"versioneer\", name)\n return None\n cfg = VersioneerConfig()\n cfg.VCS = VCS\n cfg.style = get(parser, \"style\") or \"\"\n cfg.versionfile_source = get(parser, \"versionfile_source\")\n cfg.versionfile_build = get(parser, \"versionfile_build\")\n cfg.tag_prefix = get(parser, \"tag_prefix\")\n if cfg.tag_prefix in (\"''\", '\"\"'):\n cfg.tag_prefix = \"\"\n cfg.parentdir_prefix = get(parser, \"parentdir_prefix\")\n cfg.verbose = get(parser, \"verbose\")\n return cfg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_NotThisMethod_register_vcs_handler.return.decorate": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_NotThisMethod_register_vcs_handler.return.decorate", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 430, "end_line": 446, "span_ids": ["register_vcs_handler", "NotThisMethod", "impl"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\"\n\n# these dictionaries contain VCS-specific tools\nLONG_VERSION_PY = {}\nHANDLERS = {}\n\n\ndef register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n return decorate", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_run_command_run_command.return.stdout": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_run_command_run_command.return.stdout", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 453, "end_line": 484, "span_ids": ["run_command"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr\n else None))\n break\n except OSError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None\n else:\n if verbose:\n print(f\"unable to find command, tried {commands}\")\n return None\n stdout = p.communicate()[0].strip()\n if sys.version_info[0] >= 3:\n stdout = stdout.decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n return None\n return stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_get_keywords_git_get_keywords.return.keywords": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_get_keywords_git_get_keywords.return.keywords", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 972, "end_line": 994, "span_ids": ["git_get_keywords"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs)\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n f.close()\n except OSError:\n pass\n return keywords", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_versions_from_keywords_git_versions_from_keywords.return._version_0_unknown_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_versions_from_keywords_git_versions_from_keywords.return._version_0_unknown_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 997, "end_line": 1040, "span_ids": ["git_versions_from_keywords"], "tokens": 554}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = {r.strip() for r in refnames.strip(\"()\").split(\",\")}\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"main\".\n tags = {r for r in refs if re.search(r'\\d', r)}\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs-tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix):]\n if verbose:\n print(\"picking %s\" % r)\n return {\"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": None\n }\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": \"no suitable tags\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_git_pieces_from_vcs_git_pieces_from_vcs.return.pieces", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1043, "end_line": 1124, "span_ids": ["git_pieces_from_vcs"], "tokens": 772}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n if not os.path.exists(os.path.join(root, \".git\")):\n if verbose:\n print(\"no .git in %s\" % root)\n raise NotThisMethod(\"no .git directory\")\n\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out = run_command(GITS, [\"describe\", \"--tags\", \"--dirty\",\n \"--always\", \"--long\",\n \"--match\", \"%s*\" % tag_prefix],\n cwd=root)\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[:git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r'^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = (\"unable to parse git-describe output: '%s'\"\n % describe_out)\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%s' doesn't start with prefix '%s'\"\n print(fmt % (full_tag, tag_prefix))\n pieces[\"error\"] = (\"tag '%s' doesn't start with prefix '%s'\"\n % (full_tag, tag_prefix))\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix):]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"],\n cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n return pieces", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_vcs_install_do_vcs_install.run_command_GITS_add_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_vcs_install_do_vcs_install.run_command_GITS_add_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1127, "end_line": 1162, "span_ids": ["do_vcs_install"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def do_vcs_install(manifest_in, versionfile_source, ipy):\n \"\"\"Git-specific installation logic for Versioneer.\n\n For Git, this means creating/changing .gitattributes to mark _version.py\n for export-time keyword substitution.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n files = [manifest_in, versionfile_source]\n if ipy:\n files.append(ipy)\n try:\n me = __file__\n if me.endswith(\".pyc\") or me.endswith(\".pyo\"):\n me = os.path.splitext(me)[0] + \".py\"\n versioneer_file = os.path.relpath(me)\n except NameError:\n versioneer_file = \"versioneer.py\"\n files.append(versioneer_file)\n present = False\n try:\n f = open(\".gitattributes\")\n for line in f.readlines():\n if line.strip().startswith(versionfile_source):\n if \"export-subst\" in line.strip().split()[1:]:\n present = True\n f.close()\n except OSError:\n pass\n if not present:\n f = open(\".gitattributes\", \"a+\")\n f.write(\"%s export-subst\\n\" % versionfile_source)\n f.close()\n files.append(\".gitattributes\")\n run_command(GITS, [\"add\", \"--\"] + files)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_versions_from_parentdir_versions_from_parentdir.return._version_dirname_len_p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_versions_from_parentdir_versions_from_parentdir.return._version_dirname_len_p", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1165, "end_line": 1179, "span_ids": ["versions_from_parentdir"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes\n both the project name and a version string.\n \"\"\"\n dirname = os.path.basename(root)\n if not dirname.startswith(parentdir_prefix):\n if verbose:\n print(\"guessing rootdir is '%s', but '%s' doesn't start with \"\n \"prefix '%s'\" % (root, dirname, parentdir_prefix))\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n return {\"version\": dirname[len(parentdir_prefix):],\n \"full-revisionid\": None,\n \"dirty\": False, \"error\": None}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_SHORT_VERSION_PY_versions_from_file.return.json_loads_mo_group_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_SHORT_VERSION_PY_versions_from_file.return.json_loads_mo_group_1_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1177, "end_line": 1207, "span_ids": ["versions_from_file", "impl:6"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "SHORT_VERSION_PY = \"\"\"\n# This file was generated by 'versioneer.py' (0.16) from\n# revision-control system data, or from the parent directory name of an\n# unpacked source archive. Distribution tarballs contain a pre-generated copy\n# of this file.\n\nimport json\nimport sys\n\nversion_json = '''\n%s\n''' # END VERSION_JSON\n\n\ndef get_versions():\n return json.loads(version_json)\n\"\"\"\n\n\ndef versions_from_file(filename):\n \"\"\"Try to determine the version from _version.py if present.\"\"\"\n try:\n with open(filename) as f:\n contents = f.read()\n except OSError:\n raise NotThisMethod(\"unable to read _version.py\")\n mo = re.search(r\"version_json = '''\\n(.*)''' # END VERSION_JSON\",\n contents, re.M | re.S)\n if not mo:\n raise NotThisMethod(\"no version_json in _version.py\")\n return json.loads(mo.group(1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_write_to_version_file_plus_or_dot.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_write_to_version_file_plus_or_dot.return._", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1214, "end_line": 1229, "span_ids": ["plus_or_dot", "write_to_version_file"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def write_to_version_file(filename, versions):\n \"\"\"Write the given version number to the given _version.py file.\"\"\"\n os.unlink(filename)\n contents = json.dumps(versions, sort_keys=True,\n indent=1, separators=(\",\", \": \"))\n with open(filename, \"w\") as f:\n f.write(SHORT_VERSION_PY % contents)\n\n print(\"set {} to '{}'\".format(filename, versions[\"version\"]))\n\n\ndef plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_render_pep440_pre.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_render_pep440_pre.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1232, "end_line": 1270, "span_ids": ["render_pep440_pre", "render_pep440"], "tokens": 317}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%d.g%s\" % (pieces[\"distance\"],\n pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered\n\n\ndef render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%d\" % pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%d\" % pieces[\"distance\"]\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_post_render_pep440_post.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_post_render_pep440_post.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1273, "end_line": 1297, "span_ids": ["render_pep440_post"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%s\" % pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%s\" % pieces[\"short\"]\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_old_render_pep440_old.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_pep440_old_render_pep440_old.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1300, "end_line": 1319, "span_ids": ["render_pep440_old"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_render_git_describe.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_render_git_describe.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1322, "end_line": 1339, "span_ids": ["render_git_describe"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_long_render_git_describe_long.return.rendered": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_git_describe_long_render_git_describe_long.return.rendered", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1342, "end_line": 1359, "span_ids": ["render_git_describe_long"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_VersioneerBadRootError._The_project_root_direc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_render_VersioneerBadRootError._The_project_root_direc", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1362, "end_line": 1393, "span_ids": ["VersioneerBadRootError", "render"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"]}\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%s'\" % style)\n\n return {\"version\": rendered, \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"], \"error\": None}\n\n\nclass VersioneerBadRootError(Exception):\n \"\"\"The project root directory is unknown or missing key files.\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_versions_get_versions.return._version_0_unknown_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_versions_get_versions.return._version_0_unknown_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1396, "end_line": 1468, "span_ids": ["get_versions"], "tokens": 605}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_versions(verbose=False):\n \"\"\"Get the project version from whatever source is available.\n\n Returns dict with two keys: 'version' and 'full'.\n \"\"\"\n if \"versioneer\" in sys.modules:\n # see the discussion in cmdclass.py:get_cmdclass()\n del sys.modules[\"versioneer\"]\n\n root = get_root()\n cfg = get_config_from_root(root)\n\n assert cfg.VCS is not None, \"please set [versioneer]VCS= in setup.cfg\"\n handlers = HANDLERS.get(cfg.VCS)\n assert handlers, \"unrecognized VCS '%s'\" % cfg.VCS\n verbose = verbose or cfg.verbose\n assert cfg.versionfile_source is not None, \\\n \"please set versioneer.versionfile_source\"\n assert cfg.tag_prefix is not None, \"please set versioneer.tag_prefix\"\n\n versionfile_abs = os.path.join(root, cfg.versionfile_source)\n\n # extract version from first of: _version.py, VCS command (e.g. 'git\n # describe'), parentdir. This is meant to work for developers using a\n # source checkout, for users of a tarball created by 'setup.py sdist',\n # and for users of a tarball/zipball created by 'git archive' or github's\n # download-from-tag feature or the equivalent in other VCSes.\n\n get_keywords_f = handlers.get(\"get_keywords\")\n from_keywords_f = handlers.get(\"keywords\")\n if get_keywords_f and from_keywords_f:\n try:\n keywords = get_keywords_f(versionfile_abs)\n ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)\n if verbose:\n print(\"got version from expanded keyword %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n try:\n ver = versions_from_file(versionfile_abs)\n if verbose:\n print(f\"got version from file {versionfile_abs} {ver}\")\n return ver\n except NotThisMethod:\n pass\n\n from_vcs_f = handlers.get(\"pieces_from_vcs\")\n if from_vcs_f:\n try:\n pieces = from_vcs_f(cfg.tag_prefix, root, verbose)\n ver = render(pieces, cfg.style)\n if verbose:\n print(\"got version from VCS %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n if verbose:\n print(\"got version from parentdir %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n if verbose:\n print(\"unable to compute version\")\n\n return {\"version\": \"0+unknown\", \"full-revisionid\": None,\n \"dirty\": None, \"error\": \"unable to compute version\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_version_get_cmdclass.from_distutils_core_impor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_version_get_cmdclass.from_distutils_core_impor", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1471, "end_line": 1496, "span_ids": ["get_cmdclass", "get_version"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_version():\n \"\"\"Get the short version string for this project.\"\"\"\n return get_versions()[\"version\"]\n\n\ndef get_cmdclass():\n \"\"\"Get the custom setuptools/distutils subclasses used by Versioneer.\"\"\"\n if \"versioneer\" in sys.modules:\n del sys.modules[\"versioneer\"]\n # this fixes the \"python setup.py develop\" case (also 'install' and\n # 'easy_install .'), in which subdependencies of the main project are\n # built (using setup.py bdist_egg) in the same python process. Assume\n # a main project A and a dependency B, which use different versions\n # of Versioneer. A's setup.py imports A's Versioneer, leaving it in\n # sys.modules by the time B's setup.py is executed, causing B to run\n # with the wrong versioneer. Setuptools wraps the sub-dep builds in a\n # sandbox that restores sys.modules to it's pre-build state, so the\n # parent is protected against the child's \"import versioneer\". By\n # removing ourselves from sys.modules here, before the child build\n # happens, we protect the child from the parent's versioneer too.\n # Also see https://github.com/warner/python-versioneer/issues/52\n\n cmds = {}\n\n # we add \"version\" to both distutils and setuptools\n from distutils.core import Command\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_version_get_cmdclass.cmd_version.run.if_vers_error_.print_error_s_vers": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_version_get_cmdclass.cmd_version.run.if_vers_error_.print_error_s_vers", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1498, "end_line": 1515, "span_ids": ["get_cmdclass"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n\n class cmd_version(Command):\n description = \"report generated version string\"\n user_options = []\n boolean_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n vers = get_versions(verbose=True)\n print(\"Version: %s\" % vers[\"version\"])\n print(\" full-revisionid: %s\" % vers.get(\"full-revisionid\"))\n print(\" dirty: %s\" % vers.get(\"dirty\"))\n if vers[\"error\"]:\n print(\" error: %s\" % vers[\"error\"])\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_version_cmd_ver_get_cmdclass.if_setuptools_in_sys_mo.else_.from_distutils_command_bu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_version_cmd_ver_get_cmdclass.if_setuptools_in_sys_mo.else_.from_distutils_command_bu", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1516, "end_line": 1532, "span_ids": ["get_cmdclass"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n cmds[\"version\"] = cmd_version\n\n # we override \"build_py\" in both distutils and setuptools\n #\n # most invocation pathways end up running build_py:\n # distutils/build -> build_py\n # distutils/install -> distutils/build ->..\n # setuptools/bdist_wheel -> distutils/install ->..\n # setuptools/bdist_egg -> distutils/install_lib -> build_py\n # setuptools/install -> bdist_egg ->..\n # setuptools/develop -> ?\n\n # we override different \"build_py\" commands for both environments\n if \"setuptools\" in sys.modules:\n from setuptools.command.build_py import build_py as _build_py\n else:\n from distutils.command.build_py import build_py as _build_py\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_build_py_get_cmdclass.cmd_build_py.run.if_cfg_versionfile_build_.write_to_version_file_tar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_build_py_get_cmdclass.cmd_build_py.run.if_cfg_versionfile_build_.write_to_version_file_tar", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1534, "end_line": 1546, "span_ids": ["get_cmdclass"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n\n class cmd_build_py(_build_py):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n _build_py.run(self)\n # now locate _version.py in the new build/ directory and replace\n # it with an updated value\n if cfg.versionfile_build:\n target_versionfile = os.path.join(self.build_lib,\n cfg.versionfile_build)\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_build_py_cmd_bu_get_cmdclass.None_3.else_.from_distutils_command_sd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmds_build_py_cmd_bu_get_cmdclass.None_3.else_.from_distutils_command_sd", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1547, "end_line": 1579, "span_ids": ["get_cmdclass"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n cmds[\"build_py\"] = cmd_build_py\n\n if \"cx_Freeze\" in sys.modules: # cx_freeze enabled?\n from cx_Freeze.dist import build_exe as _build_exe\n\n class cmd_build_exe(_build_exe):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n target_versionfile = cfg.versionfile_source\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n\n _build_exe.run(self)\n os.unlink(target_versionfile)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(LONG %\n {\"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n })\n cmds[\"build_exe\"] = cmd_build_exe\n del cmds[\"build_py\"]\n\n # we override different \"sdist\" commands for both environments\n if \"setuptools\" in sys.modules:\n from setuptools.command.sdist import sdist as _sdist\n else:\n from distutils.command.sdist import sdist as _sdist\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_sdist_get_cmdclass.return.cmds": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_get_cmdclass.cmd_sdist_get_cmdclass.return.cmds", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1581, "end_line": 1603, "span_ids": ["get_cmdclass"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_cmdclass():\n # ... other code\n\n class cmd_sdist(_sdist):\n def run(self):\n versions = get_versions()\n self._versioneer_generated_versions = versions\n # unless we update this, the command will keep using the old\n # version\n self.distribution.metadata.version = versions[\"version\"]\n return _sdist.run(self)\n\n def make_release_tree(self, base_dir, files):\n root = get_root()\n cfg = get_config_from_root(root)\n _sdist.make_release_tree(self, base_dir, files)\n # now locate _version.py in the new base_dir directory\n # (remembering that it may be a hardlink) and replace it with an\n # updated value\n target_versionfile = os.path.join(base_dir, cfg.versionfile_source)\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile,\n self._versioneer_generated_versions)\n cmds[\"sdist\"] = cmd_sdist\n\n return cmds", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_CONFIG_ERROR_INIT_PY_SNIPPET._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_CONFIG_ERROR_INIT_PY_SNIPPET._", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1602, "end_line": 1643, "span_ids": ["impl:8"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "CONFIG_ERROR = \"\"\"\nsetup.cfg is missing the necessary Versioneer configuration. You need\na section like:\n\n [versioneer]\n VCS = git\n style = pep440\n versionfile_source = src/myproject/_version.py\n versionfile_build = myproject/_version.py\n tag_prefix =\n parentdir_prefix = myproject-\n\nYou will also need to edit your setup.py to use the results:\n\n import versioneer\n setup(version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(), ...)\n\nPlease read the docstring in ./versioneer.py for configuration instructions,\nedit setup.cfg, and re-run the installer or 'python versioneer.py setup'.\n\"\"\"\n\nSAMPLE_CONFIG = \"\"\"\n# See the docstring in versioneer.py for instructions. Note that you must\n# re-run 'versioneer.py setup' after changing this section, and commit the\n# resulting files.\n\n[versioneer]\n#VCS = git\n#style = pep440\n#versionfile_source =\n#versionfile_build =\n#tag_prefix =\n#parentdir_prefix =\n\n\"\"\"\n\nINIT_PY_SNIPPET = \"\"\"\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_setup_do_setup.return.0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_do_setup_do_setup.return.0", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1650, "end_line": 1728, "span_ids": ["do_setup"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def do_setup():\n \"\"\"Main VCS-independent setup function for installing Versioneer.\"\"\"\n root = get_root()\n try:\n cfg = get_config_from_root(root)\n except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:\n if isinstance(e, (EnvironmentError, configparser.NoSectionError)):\n print(\"Adding sample versioneer config to setup.cfg\",\n file=sys.stderr)\n with open(os.path.join(root, \"setup.cfg\"), \"a\") as f:\n f.write(SAMPLE_CONFIG)\n print(CONFIG_ERROR, file=sys.stderr)\n return 1\n\n print(\" creating %s\" % cfg.versionfile_source)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(LONG % {\"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n })\n\n ipy = os.path.join(os.path.dirname(cfg.versionfile_source),\n \"__init__.py\")\n if os.path.exists(ipy):\n try:\n with open(ipy) as f:\n old = f.read()\n except OSError:\n old = \"\"\n if INIT_PY_SNIPPET not in old:\n print(\" appending to %s\" % ipy)\n with open(ipy, \"a\") as f:\n f.write(INIT_PY_SNIPPET)\n else:\n print(\" %s unmodified\" % ipy)\n else:\n print(\" %s doesn't exist, ok\" % ipy)\n ipy = None\n\n # Make sure both the top-level \"versioneer.py\" and versionfile_source\n # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so\n # they'll be copied into source distributions. Pip won't be able to\n # install the package without this.\n manifest_in = os.path.join(root, \"MANIFEST.in\")\n simple_includes = set()\n try:\n with open(manifest_in) as f:\n for line in f:\n if line.startswith(\"include \"):\n for include in line.split()[1:]:\n simple_includes.add(include)\n except OSError:\n pass\n # That doesn't cover everything MANIFEST.in can do\n # (https://docs.python.org/2/distutils/sourcedist.html#commands), so\n # it might give some false negatives. Appending redundant 'include'\n # lines is safe, though.\n if \"versioneer.py\" not in simple_includes:\n print(\" appending 'versioneer.py' to MANIFEST.in\")\n with open(manifest_in, \"a\") as f:\n f.write(\"include versioneer.py\\n\")\n else:\n print(\" 'versioneer.py' already in MANIFEST.in\")\n if cfg.versionfile_source not in simple_includes:\n print(\" appending versionfile_source ('%s') to MANIFEST.in\" %\n cfg.versionfile_source)\n with open(manifest_in, \"a\") as f:\n f.write(\"include %s\\n\" % cfg.versionfile_source)\n else:\n print(\" versionfile_source already in MANIFEST.in\")\n\n # Make VCS-specific changes. For git, this means creating/changing\n # .gitattributes to mark _version.py for export-time keyword\n # substitution.\n do_vcs_install(manifest_in, cfg.versionfile_source, ipy)\n return 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_scan_setup_py_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/versioneer.py_scan_setup_py_", "embedding": null, "metadata": {"file_path": "versioneer.py", "file_name": "versioneer.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1727, "end_line": 1770, "span_ids": ["scan_setup_py", "impl:14"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def scan_setup_py():\n \"\"\"Validate the contents of setup.py against Versioneer's expectations.\"\"\"\n found = set()\n setters = False\n errors = 0\n with open(\"setup.py\") as f:\n for line in f.readlines():\n if \"import versioneer\" in line:\n found.add(\"import\")\n if \"versioneer.get_cmdclass()\" in line:\n found.add(\"cmdclass\")\n if \"versioneer.get_version()\" in line:\n found.add(\"get_version\")\n if \"versioneer.VCS\" in line:\n setters = True\n if \"versioneer.versionfile_source\" in line:\n setters = True\n if len(found) != 3:\n print(\"\")\n print(\"Your setup.py appears to be missing some important items\")\n print(\"(but I might be wrong). Please make sure it has something\")\n print(\"roughly like the following:\")\n print(\"\")\n print(\" import versioneer\")\n print(\" setup( version=versioneer.get_version(),\")\n print(\" cmdclass=versioneer.get_cmdclass(), ...)\")\n print(\"\")\n errors += 1\n if setters:\n print(\"You should remove lines like 'versioneer.VCS = ' and\")\n print(\"'versioneer.versionfile_source = ' . This configuration\")\n print(\"now lives in setup.cfg, and should be removed from setup.py\")\n print(\"\")\n errors += 1\n return errors\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1]\n if cmd == \"setup\":\n errors = do_setup()\n errors += scan_setup_py()\n if errors:\n sys.exit(1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_np_register_chunk_type._HANDLED_CHUNK_TYPES_appe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk_types.py_np_register_chunk_type._HANDLED_CHUNK_TYPES_appe", "embedding": null, "metadata": {"file_path": "dask/array/chunk_types.py", "file_name": "chunk_types.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 104, "span_ids": ["imports", "register_chunk_type"], "tokens": 1011}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\n\n# Start list of valid chunk types, to be added to with guarded imports\n_HANDLED_CHUNK_TYPES = [np.ndarray, np.ma.MaskedArray]\n\n\ndef register_chunk_type(type):\n \"\"\"Register the given type as a valid chunk and downcast array type\n\n Parameters\n ----------\n type : type\n Duck array type to be registered as a type Dask can safely wrap as a chunk and\n to which Dask does not defer in arithmetic operations and NumPy\n functions/ufuncs.\n\n Notes\n -----\n A :py:class:`dask.array.Array` can contain any sufficiently \"NumPy-like\" array in\n its chunks. These are also referred to as \"duck arrays\" since they match the most\n important parts of NumPy's array API, and so, behave the same way when relying on\n duck typing.\n\n However, for multiple duck array types to interoperate properly, they need to\n properly defer to each other in arithmetic operations and NumPy functions/ufuncs\n according to a well-defined type casting hierarchy (\n `see NEP 13 `__\n ). In an effort to maintain this hierarchy, Dask defers to all other duck array\n types except those in its internal registry. By default, this registry contains\n\n * :py:class:`numpy.ndarray`\n * :py:class:`numpy.ma.MaskedArray`\n * :py:class:`cupy.ndarray`\n * :py:class:`sparse.SparseArray`\n * :py:class:`scipy.sparse.spmatrix`\n\n This function exists to append any other types to this registry. If a type is not\n in this registry, and yet is a downcast type (it comes below\n :py:class:`dask.array.Array` in the type casting hierarchy), a ``TypeError`` will\n be raised due to all operand types returning ``NotImplemented``.\n\n Examples\n --------\n Using a mock ``FlaggedArray`` class as an example chunk type unknown to Dask with\n minimal duck array API:\n\n >>> import numpy.lib.mixins\n >>> class FlaggedArray(numpy.lib.mixins.NDArrayOperatorsMixin):\n ... def __init__(self, a, flag=False):\n ... self.a = a\n ... self.flag = flag\n ... def __repr__(self):\n ... return f\"Flag: {self.flag}, Array: \" + repr(self.a)\n ... def __array__(self):\n ... return np.asarray(self.a)\n ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n ... if method == '__call__':\n ... downcast_inputs = []\n ... flag = False\n ... for input in inputs:\n ... if isinstance(input, self.__class__):\n ... flag = flag or input.flag\n ... downcast_inputs.append(input.a)\n ... elif isinstance(input, np.ndarray):\n ... downcast_inputs.append(input)\n ... else:\n ... return NotImplemented\n ... return self.__class__(ufunc(*downcast_inputs, **kwargs), flag)\n ... else:\n ... return NotImplemented\n ... @property\n ... def shape(self):\n ... return self.a.shape\n ... @property\n ... def ndim(self):\n ... return self.a.ndim\n ... @property\n ... def dtype(self):\n ... return self.a.dtype\n ... def __getitem__(self, key):\n ... return type(self)(self.a[key], self.flag)\n ... def __setitem__(self, key, value):\n ... self.a[key] = value\n\n Before registering ``FlaggedArray``, both types will attempt to defer to the\n other:\n\n >>> import dask.array as da\n >>> da.ones(5) - FlaggedArray(np.ones(5), True)\n Traceback (most recent call last):\n ...\n TypeError: operand type(s) all returned NotImplemented ...\n\n However, once registered, Dask will be able to handle operations with this new\n type:\n\n >>> da.register_chunk_type(FlaggedArray)\n >>> x = da.ones(5) - FlaggedArray(np.ones(5), True)\n >>> x\n dask.array\n >>> x.compute()\n Flag: True, Array: array([0., 0., 0., 0., 0.])\n \"\"\"\n _HANDLED_CHUNK_TYPES.append(type)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__should_delegate_check_if_handled_given_other.return.wrapper": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__should_delegate_check_if_handled_given_other.return.wrapper", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 173, "end_line": 203, "span_ids": ["check_if_handled_given_other", "_should_delegate"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _should_delegate(other) -> bool:\n \"\"\"Check whether Dask should delegate to the other.\n This implementation follows NEP-13:\n https://numpy.org/neps/nep-0013-ufunc-overrides.html#behavior-in-combination-with-python-s-binary-operations\n \"\"\"\n if hasattr(other, \"__array_ufunc__\") and other.__array_ufunc__ is None:\n return True\n elif (\n hasattr(other, \"__array_ufunc__\")\n and not is_valid_array_chunk(other)\n and type(other).__array_ufunc__ is not Array.__array_ufunc__\n ):\n return True\n return False\n\n\ndef check_if_handled_given_other(f):\n \"\"\"Check if method is handled by Dask given type of other\n\n Ensures proper deferral to upcast types in dunder operations without\n assuming unknown types are automatically downcast types.\n \"\"\"\n\n @wraps(f)\n def wrapper(self, other):\n if _should_delegate(other):\n return NotImplemented\n else:\n return f(self, other)\n\n return wrapper", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_arg_normalize_arg.if_is_dask_collection_x_.else_.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_normalize_arg_normalize_arg.if_is_dask_collection_x_.else_.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 409, "end_line": 428, "span_ids": ["normalize_arg"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def normalize_arg(x):\n \"\"\"Normalize user provided arguments to blockwise or map_blocks\n\n We do a few things:\n\n 1. If they are string literals that might collide with blockwise_token then we\n quote them\n 2. IF they are large (as defined by sizeof) then we put them into the\n graph on their own by using dask.delayed\n \"\"\"\n if is_dask_collection(x):\n return x\n elif isinstance(x, str) and re.match(r\"_\\d+\", x):\n return delayed(x)\n elif isinstance(x, list) and len(x) >= 10:\n return delayed(x)\n elif sizeof(x) > 1e6:\n return delayed(x)\n else:\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__pass_extra_kwargs_map_blocks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__pass_extra_kwargs_map_blocks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 434, "end_line": 847, "span_ids": ["map_blocks", "_pass_extra_kwargs"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _pass_extra_kwargs(func, keys, *args, **kwargs):\n \"\"\"Helper for :func:`dask.array.map_blocks` to pass `block_info` or `block_id`.\n\n For each element of `keys`, a corresponding element of args is changed\n to a keyword argument with that key, before all arguments re passed on\n to `func`.\n \"\"\"\n kwargs.update(zip(keys, args))\n return func(*args[len(keys) :], **kwargs)\n\n\ndef map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks._Map_a_function_across__map_blocks._Map_a_function_across_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks._Map_a_function_across__map_blocks._Map_a_function_across_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 479, "end_line": 695, "span_ids": ["map_blocks"], "tokens": 2362}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n \"\"\"Map a function across all blocks of a dask array.\n\n Note that ``map_blocks`` will attempt to automatically determine the output\n array type by calling ``func`` on 0-d versions of the inputs. Please refer to\n the ``meta`` keyword argument below if you expect that the function will not\n succeed when operating on 0-d arrays.\n\n Parameters\n ----------\n func : callable\n Function to apply to every block in the array.\n If ``func`` accepts ``block_info=`` or ``block_id=``\n as keyword arguments, these will be passed dictionaries\n containing information about input and output chunks/arrays\n during computation. See examples for details.\n args : dask arrays or other objects\n dtype : np.dtype, optional\n The ``dtype`` of the output array. It is recommended to provide this.\n If not provided, will be inferred by applying the function to a small\n set of fake data.\n chunks : tuple, optional\n Chunk shape of resulting blocks if the function does not preserve\n shape. If not provided, the resulting array is assumed to have the same\n block structure as the first input array.\n drop_axis : number or iterable, optional\n Dimensions lost by the function.\n new_axis : number or iterable, optional\n New dimensions created by the function. Note that these are applied\n after ``drop_axis`` (if present).\n token : string, optional\n The key prefix to use for the output array. If not provided, will be\n determined from the function name.\n name : string, optional\n The key name to use for the output array. Note that this fully\n specifies the output key name, and must be unique. If not provided,\n will be determined by a hash of the arguments.\n meta : array-like, optional\n The ``meta`` of the output array, when specified is expected to be an\n array of the same type and dtype of that returned when calling ``.compute()``\n on the array returned by this function. When not provided, ``meta`` will be\n inferred by applying the function to a small set of fake data, usually a\n 0-d array. It's important to ensure that ``func`` can successfully complete\n computation without raising exceptions when 0-d is passed to it, providing\n ``meta`` will be required otherwise. If the output type is known beforehand\n (e.g., ``np.ndarray``, ``cupy.ndarray``), an empty array of such type dtype\n can be passed, for example: ``meta=np.array((), dtype=np.int32)``.\n **kwargs :\n Other keyword arguments to pass to function. Values must be constants\n (not dask.arrays)\n\n See Also\n --------\n dask.array.blockwise : Generalized operation with control over block alignment.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n\n >>> x.map_blocks(lambda x: x * 2).compute()\n array([ 0, 2, 4, 6, 8, 10])\n\n The ``da.map_blocks`` function can also accept multiple arrays.\n\n >>> d = da.arange(5, chunks=2)\n >>> e = da.arange(5, chunks=2)\n\n >>> f = da.map_blocks(lambda a, b: a + b**2, d, e)\n >>> f.compute()\n array([ 0, 2, 6, 12, 20])\n\n If the function changes shape of the blocks then you must provide chunks\n explicitly.\n\n >>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))\n\n You have a bit of freedom in specifying chunks. If all of the output chunk\n sizes are the same, you can provide just that chunk size as a single tuple.\n\n >>> a = da.arange(18, chunks=(6,))\n >>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))\n\n If the function changes the dimension of the blocks you must specify the\n created or destroyed dimensions.\n\n >>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),\n ... new_axis=[0, 2])\n\n If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to\n add the necessary number of axes on the left.\n\n Map_blocks aligns blocks by block positions without regard to shape. In the\n following example we have two arrays with the same number of blocks but\n with different shape and chunk sizes.\n\n >>> x = da.arange(1000, chunks=(100,))\n >>> y = da.arange(100, chunks=(10,))\n\n The relevant attribute to match is numblocks.\n\n >>> x.numblocks\n (10,)\n >>> y.numblocks\n (10,)\n\n If these match (up to broadcasting rules) then we can map arbitrary\n functions across blocks\n\n >>> def func(a, b):\n ... return np.array([a.max(), b.max()])\n\n >>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')\n dask.array\n\n >>> _.compute()\n array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,\n 69, 799, 79, 899, 89, 999, 99])\n\n Your block function can get information about where it is in the array by\n accepting a special ``block_info`` or ``block_id`` keyword argument.\n During computation, they will contain information about each of the input\n and output chunks (and dask arrays) relevant to each call of ``func``.\n\n >>> def func(block_info=None):\n ... pass\n\n This will receive the following information:\n\n >>> block_info # doctest: +SKIP\n {0: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)]},\n None: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)],\n 'chunk-shape': (100,),\n 'dtype': dtype('float64')}}\n\n The keys to the ``block_info`` dictionary indicate which is the input and\n output Dask array:\n\n - **Input Dask array(s):** ``block_info[0]`` refers to the first input Dask array.\n The dictionary key is ``0`` because that is the argument index corresponding\n to the first input Dask array.\n In cases where multiple Dask arrays have been passed as input to the function,\n you can access them with the number corresponding to the input argument,\n eg: ``block_info[1]``, ``block_info[2]``, etc.\n (Note that if you pass multiple Dask arrays as input to map_blocks,\n the arrays must match each other by having matching numbers of chunks,\n along corresponding dimensions up to broadcasting rules.)\n - **Output Dask array:** ``block_info[None]`` refers to the output Dask array,\n and contains information about the output chunks.\n The output chunk shape and dtype may may be different than the input chunks.\n\n For each dask array, ``block_info`` describes:\n\n - ``shape``: the shape of the full Dask array,\n - ``num-chunks``: the number of chunks of the full array in each dimension,\n - ``chunk-location``: the chunk location (for example the fourth chunk over\n in the first dimension), and\n - ``array-location``: the array location within the full Dask array\n (for example the slice corresponding to ``40:50``).\n\n In addition to these, there are two extra parameters described by\n ``block_info`` for the output array (in ``block_info[None]``):\n\n - ``chunk-shape``: the output chunk shape, and\n - ``dtype``: the output dtype.\n\n These features can be combined to synthesize an array from scratch, for\n example:\n\n >>> def func(block_info=None):\n ... loc = block_info[None]['array-location'][0]\n ... return np.arange(loc[0], loc[1])\n\n >>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)\n dask.array\n\n >>> _.compute()\n array([0, 1, 2, 3, 4, 5, 6, 7])\n\n ``block_id`` is similar to ``block_info`` but contains only the ``chunk_location``:\n\n >>> def func(block_id=None):\n ... pass\n\n This will receive the following information:\n\n >>> block_id # doctest: +SKIP\n (4, 3)\n\n You may specify the key name prefix of the resulting task in the graph with\n the optional ``token`` keyword argument.\n\n >>> x.map_blocks(lambda x: x + 1, name='increment')\n dask.array\n\n For functions that may not handle 0-d arrays, it's also possible to specify\n ``meta`` with an empty array matching the type of the expected result. In\n the example below, ``func`` will result in an ``IndexError`` when computing\n ``meta``:\n\n >>> da.map_blocks(lambda x: x[2], da.random.random(5), meta=np.array(()))\n dask.array\n\n Similarly, it's possible to specify a non-NumPy array to ``meta``, and provide\n a ``dtype``:\n\n >>> import cupy # doctest: +SKIP\n >>> rs = da.random.RandomState(RandomState=cupy.random.RandomState) # doctest: +SKIP\n >>> dt = np.float32\n >>> da.map_blocks(lambda x: x[2], rs.random(5, dtype=dt), meta=cupy.array((), dtype=dt)) # doctest: +SKIP\n dask.array\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.None_11_map_blocks.None_11.extra_names_append_block": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.None_11_map_blocks.None_11.extra_names_append_block", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 735, "end_line": 809, "span_ids": ["map_blocks"], "tokens": 696}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n # ... other code\n if has_keyword(func, \"block_info\"):\n starts = {}\n num_chunks = {}\n shapes = {}\n\n for i, (arg, in_ind) in enumerate(argpairs):\n if in_ind is not None:\n shapes[i] = arg.shape\n if drop_axis:\n # We concatenate along dropped axes, so we need to treat them\n # as if there is only a single chunk.\n starts[i] = [\n (\n cached_cumsum(arg.chunks[j], initial_zero=True)\n if ind in out_ind\n else [0, arg.shape[j]]\n )\n for j, ind in enumerate(in_ind)\n ]\n num_chunks[i] = tuple(len(s) - 1 for s in starts[i])\n else:\n starts[i] = [\n cached_cumsum(c, initial_zero=True) for c in arg.chunks\n ]\n num_chunks[i] = arg.numblocks\n out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]\n\n block_info_name = \"block-info-\" + out.name\n block_info_dsk = {}\n for block_id in product(*(range(len(c)) for c in out.chunks)):\n # Get position of chunk, indexed by axis labels\n location = {out_ind[i]: loc for i, loc in enumerate(block_id)}\n info = {}\n for i, shape in shapes.items():\n # Compute chunk key in the array, taking broadcasting into\n # account. We don't directly know which dimensions are\n # broadcast, but any dimension with only one chunk can be\n # treated as broadcast.\n arr_k = tuple(\n location.get(ind, 0) if num_chunks[i][j] > 1 else 0\n for j, ind in enumerate(argpairs[i][1])\n )\n info[i] = {\n \"shape\": shape,\n \"num-chunks\": num_chunks[i],\n \"array-location\": [\n (starts[i][ij][j], starts[i][ij][j + 1])\n for ij, j in enumerate(arr_k)\n ],\n \"chunk-location\": arr_k,\n }\n\n info[None] = {\n \"shape\": out.shape,\n \"num-chunks\": out.numblocks,\n \"array-location\": [\n (out_starts[ij][j], out_starts[ij][j + 1])\n for ij, j in enumerate(block_id)\n ],\n \"chunk-location\": block_id,\n \"chunk-shape\": tuple(\n out.chunks[ij][j] for ij, j in enumerate(block_id)\n ),\n \"dtype\": dtype,\n }\n block_info_dsk[(block_info_name,) + block_id] = info\n\n block_info = Array(\n block_info_dsk,\n block_info_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_info, out_ind))\n extra_names.append(\"block_info\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumsum_Array.cumsum.return.cumsum_self_axis_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumsum_Array.cumsum.return.cumsum_self_axis_dtype_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2332, "end_line": 2347, "span_ids": ["Array.cumsum"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def cumsum(self, axis, dtype=None, out=None, *, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumsum. Default is 'sequential'.\n\n * 'sequential' performs the cumsum of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by\n first taking the sum of each block and combines the sums via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n from .reductions import cumsum\n\n return cumsum(self, axis, dtype, out=out, method=method)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumprod_Array.cumprod.return.cumprod_self_axis_dtype": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.cumprod_Array.cumprod.return.cumprod_self_axis_dtype", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2349, "end_line": 2364, "span_ids": ["Array.cumprod"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def cumprod(self, axis, dtype=None, out=None, *, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumprod. Default is 'sequential'.\n\n * 'sequential' performs the cumprod of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first\n taking the product of each block and combines the products via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n from .reductions import cumprod\n\n return cumprod(self, axis, dtype, out=out, method=method)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.squeeze_Array.clip.return.clip_self_min_max_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.squeeze_Array.clip.return.clip_self_min_max_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2561, "end_line": 2596, "span_ids": ["Array.conj", "Array.squeeze", "Array.real", "Array.rechunk", "Array.clip", "Array.imag"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @derived_from(np.ndarray)\n def squeeze(self, axis=None):\n from .routines import squeeze\n\n return squeeze(self, axis)\n\n def rechunk(\n self, chunks=\"auto\", threshold=None, block_size_limit=None, balance=False\n ):\n \"\"\"See da.rechunk for docstring\"\"\"\n from .rechunk import rechunk # avoid circular import\n\n return rechunk(self, chunks, threshold, block_size_limit, balance)\n\n @property\n def real(self):\n from .ufunc import real\n\n return real(self)\n\n @property\n def imag(self):\n from .ufunc import imag\n\n return imag(self)\n\n def conj(self):\n from .ufunc import conj\n\n return conj(self)\n\n @derived_from(np.ndarray)\n def clip(self, min=None, max=None):\n from .ufunc import clip\n\n return clip(self, min, max)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_npy_stack_from_npy_stack.return.Array_dsk_name_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_npy_stack_from_npy_stack.return.Array_dsk_name_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5187, "end_line": 5214, "span_ids": ["from_npy_stack"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_npy_stack(dirname, mmap_mode=\"r\"):\n \"\"\"Load dask array from stack of npy files\n\n See :func:`dask.array.to_npy_stack` for docstring.\n\n Parameters\n ----------\n dirname: string\n Directory of .npy files\n mmap_mode: (None or 'r')\n Read data in memory map mode\n \"\"\"\n with open(os.path.join(dirname, \"info\"), \"rb\") as f:\n info = pickle.load(f)\n\n dtype = info[\"dtype\"]\n chunks = info[\"chunks\"]\n axis = info[\"axis\"]\n\n name = \"from-npy-stack-%s\" % dirname\n keys = list(product([name], *[range(len(c)) for c in chunks]))\n values = [\n (np.load, os.path.join(dirname, \"%d.npy\" % i), mmap_mode)\n for i in range(len(chunks[axis]))\n ]\n dsk = dict(zip(keys, values))\n\n return Array(dsk, name, chunks, dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_empty_like_empty_like.return.empty_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_empty_like_empty_like.return.empty_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 30, "end_line": 83, "span_ids": ["empty_like"], "tokens": 432}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def empty_like(a, dtype=None, order=\"C\", chunks=None, name=None, shape=None):\n \"\"\"\n Return a new array with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of the\n returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n chunks : sequence of ints\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n keyword arguments.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result.\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data with the same\n shape and type as `a`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n empty : Return a new uninitialized array.\n ones : Return a new array setting values to one.\n zeros : Return a new array setting values to zero.\n\n Notes\n -----\n This function does *not* initialize the returned array; to do that use\n `zeros_like` or `ones_like` instead. It may be marginally faster than\n the functions that do set the array values.\n \"\"\"\n\n a = asarray(a, name=False)\n shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)\n return empty(\n shape,\n dtype=(dtype or a.dtype),\n order=order,\n chunks=chunks,\n name=name,\n meta=a._meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_ones_like_ones_like.return.ones_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_ones_like_ones_like.return.ones_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 86, "end_line": 132, "span_ids": ["ones_like"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ones_like(a, dtype=None, order=\"C\", chunks=None, name=None, shape=None):\n \"\"\"\n Return an array of ones with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n chunks : sequence of ints\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n keyword arguments.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result.\n\n Returns\n -------\n out : ndarray\n Array of ones with the same shape and type as `a`.\n\n See Also\n --------\n zeros_like : Return an array of zeros with shape and type of input.\n empty_like : Return an empty array with shape and type of input.\n zeros : Return a new array setting values to zero.\n ones : Return a new array setting values to one.\n empty : Return a new uninitialized array.\n \"\"\"\n\n a = asarray(a, name=False)\n shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)\n return ones(\n shape,\n dtype=(dtype or a.dtype),\n order=order,\n chunks=chunks,\n name=name,\n meta=a._meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_zeros_like_zeros_like.return.zeros_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_zeros_like_zeros_like.return.zeros_", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 135, "end_line": 181, "span_ids": ["zeros_like"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def zeros_like(a, dtype=None, order=\"C\", chunks=None, name=None, shape=None):\n \"\"\"\n Return an array of zeros with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n chunks : sequence of ints\n The number of samples on each block. Note that the last block will have\n fewer samples if ``len(array) % chunks != 0``.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n keyword arguments.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result.\n\n Returns\n -------\n out : ndarray\n Array of zeros with the same shape and type as `a`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n empty_like : Return an empty array with shape and type of input.\n zeros : Return a new array setting values to zero.\n ones : Return a new array setting values to one.\n empty : Return a new uninitialized array.\n \"\"\"\n\n a = asarray(a, name=False)\n shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)\n return zeros(\n shape,\n dtype=(dtype or a.dtype),\n order=order,\n chunks=chunks,\n name=name,\n meta=a._meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_optimize.return.optimize_slices_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/optimization.py_optimize_optimize.return.optimize_slices_dsk_", "embedding": null, "metadata": {"file_path": "dask/array/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 23, "end_line": 77, "span_ids": ["optimize"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize(\n dsk,\n keys,\n fuse_keys=None,\n fast_functions=None,\n inline_functions_fast_functions=(getter_inline,),\n rename_fused_keys=True,\n **kwargs,\n):\n \"\"\"Optimize dask for array computation\n\n 1. Cull tasks not necessary to evaluate keys\n 2. Remove full slicing, e.g. x[:]\n 3. Inline fast functions like getitem and np.transpose\n \"\"\"\n if not isinstance(keys, (list, set)):\n keys = [keys]\n keys = list(flatten(keys))\n\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(id(dsk), dsk, dependencies=())\n\n dsk = optimize_blockwise(dsk, keys=keys)\n dsk = fuse_roots(dsk, keys=keys)\n dsk = dsk.cull(set(keys))\n\n # Perform low-level fusion unless the user has\n # specified False explicitly.\n if config.get(\"optimization.fuse.active\") is False:\n return dsk\n\n dependencies = dsk.get_all_dependencies()\n dsk = ensure_dict(dsk)\n\n # Low level task optimizations\n if fast_functions is not None:\n inline_functions_fast_functions = fast_functions\n\n hold = hold_keys(dsk, dependencies)\n\n dsk, dependencies = fuse(\n dsk,\n hold + keys + (fuse_keys or []),\n dependencies,\n rename_keys=rename_fused_keys,\n )\n if inline_functions_fast_functions:\n dsk = inline_functions(\n dsk,\n keys,\n dependencies=dependencies,\n fast_functions=inline_functions_fast_functions,\n )\n\n return optimize_slices(dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_add_dummy_padding_map_overlap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_add_dummy_padding_map_overlap", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 497, "end_line": 756, "span_ids": ["map_overlap", "add_dummy_padding"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def add_dummy_padding(x, depth, boundary):\n \"\"\"\n Pads an array which has 'none' as the boundary type.\n Used to simplify trimming arrays which use 'none'.\n\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n >>> add_dummy_padding(x, {0: 1}, {0: 'none'}).compute() # doctest: +NORMALIZE_WHITESPACE\n array([..., 0, 1, 2, 3, 4, 5, ...])\n \"\"\"\n for k, v in boundary.items():\n d = depth.get(k, 0)\n if v == \"none\" and d > 0:\n empty_shape = list(x.shape)\n empty_shape[k] = d\n\n empty_chunks = list(x.chunks)\n empty_chunks[k] = (d,)\n\n empty = empty_like(\n getattr(x, \"_meta\", x),\n shape=empty_shape,\n chunks=empty_chunks,\n dtype=x.dtype,\n )\n\n out_chunks = list(x.chunks)\n ax_chunks = list(out_chunks[k])\n ax_chunks[0] += d\n ax_chunks[-1] += d\n out_chunks[k] = tuple(ax_chunks)\n\n x = concatenate([empty, x, empty], axis=k)\n x = x.rechunk(out_chunks)\n return x\n\n\ndef map_overlap(\n func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs\n):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Map_a_function_over_bl_map_overlap._Map_a_function_over_bl": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Map_a_function_over_bl_map_overlap._Map_a_function_over_bl", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 475, "end_line": 624, "span_ids": ["map_overlap"], "tokens": 1857}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_overlap(\n func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs\n):\n \"\"\"Map a function over blocks of arrays with some overlap\n\n We share neighboring zones between blocks of the array, map a\n function, and then trim away the neighboring strips. If depth is\n larger than any chunk along a particular axis, then the array is\n rechunked.\n\n Note that this function will attempt to automatically determine the output\n array type before computing it, please refer to the ``meta`` keyword argument\n in ``map_blocks`` if you expect that the function will not succeed when\n operating on 0-d arrays.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block.\n If multiple arrays are provided, then the function should expect to\n receive chunks of each array in the same order.\n args : dask arrays\n depth: int, tuple, dict or list\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis.\n If a list then each element of that list must be an int, tuple or dict\n defining depth for the corresponding array in `args`.\n Asymmetric depths may be specified using a dict value of (-/+) tuples.\n Note that asymmetric depths are currently only supported when\n ``boundary`` is 'none'.\n The default value is 0.\n boundary: str, tuple, dict or list\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan.\n If a list then each element must be a str, tuple or dict defining the\n boundary for the corresponding array in `args`.\n The default value is 'reflect'.\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n align_arrays: bool\n Whether or not to align chunks along equally sized dimensions when\n multiple arrays are provided. This allows for larger chunks in some\n arrays to be broken into smaller ones that match chunk sizes in other\n arrays such that they are compatible for block function mapping. If\n this is false, then an error will be thrown if arrays do not already\n have the same number of blocks in each dimension.\n **kwargs:\n Other keyword arguments valid in ``map_blocks``\n\n Examples\n --------\n >>> import numpy as np\n >>> import dask.array as da\n\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = da.from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> d.map_overlap(lambda x: x + x.size, depth=1, boundary='reflect').compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n\n The ``da.map_overlap`` function can also accept multiple arrays.\n\n >>> func = lambda x, y: x + y\n >>> x = da.arange(8).reshape(2, 4).rechunk((1, 2))\n >>> y = da.arange(4).rechunk(2)\n >>> da.map_overlap(func, x, y, depth=1, boundary='reflect').compute() # doctest: +NORMALIZE_WHITESPACE\n array([[ 0, 2, 4, 6],\n [ 4, 6, 8, 10]])\n\n When multiple arrays are given, they do not need to have the\n same number of dimensions but they must broadcast together.\n Arrays are aligned block by block (just as in ``da.map_blocks``)\n so the blocks must have a common chunk size. This common chunking\n is determined automatically as long as ``align_arrays`` is True.\n\n >>> x = da.arange(8, chunks=4)\n >>> y = da.arange(8, chunks=2)\n >>> r = da.map_overlap(func, x, y, depth=1, boundary='reflect', align_arrays=True)\n >>> len(r.to_delayed())\n 4\n\n >>> da.map_overlap(func, x, y, depth=1, boundary='reflect', align_arrays=False).compute()\n Traceback (most recent call last):\n ...\n ValueError: Shapes do not align {'.0': {2, 4}}\n\n Note also that this function is equivalent to ``map_blocks``\n by default. A non-zero ``depth`` must be defined for any\n overlap to appear in the arrays provided to ``func``.\n\n >>> func = lambda x: x.sum()\n >>> x = da.ones(10, dtype='int')\n >>> block_args = dict(chunks=(), drop_axis=0)\n >>> da.map_blocks(func, x, **block_args).compute()\n 10\n >>> da.map_overlap(func, x, **block_args, boundary='reflect').compute()\n 10\n >>> da.map_overlap(func, x, **block_args, depth=1, boundary='reflect').compute()\n 12\n\n For functions that may not handle 0-d arrays, it's also possible to specify\n ``meta`` with an empty array matching the type of the expected result. In\n the example below, ``func`` will result in an ``IndexError`` when computing\n ``meta``:\n\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=np.array(()))\n >>> y\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>\n >>> y.compute()\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n\n Similarly, it's possible to specify a non-NumPy array to ``meta``:\n\n >>> import cupy # doctest: +SKIP\n >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP\n >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP\n >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=cupy.array(())) # doctest: +SKIP\n >>> y # doctest: +SKIP\n dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray>\n >>> y.compute() # doctest: +SKIP\n array([[ 4, 6, 8, 10],\n [ 8, 10, 12, 14],\n [20, 22, 24, 26],\n [24, 26, 28, 30]])\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_chunks__get_chunks.return.tuple_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py_format_chunks__get_chunks.return.tuple_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 674, "end_line": 698, "span_ids": ["format_plan", "format_chunks", "_get_chunks"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_chunks(chunks):\n \"\"\"\n >>> format_chunks((10 * (3,), 3 * (10,)))\n (10*[3], 3*[10])\n \"\"\"\n assert isinstance(chunks, tuple)\n return tuple(format_blocks(c) for c in chunks)\n\n\ndef format_plan(plan):\n \"\"\"\n >>> format_plan([((10, 10, 10), (15, 15)), ((30,), (10, 10, 10))])\n [(3*[10], 2*[15]), ([30], 3*[10])]\n \"\"\"\n return [format_chunks(c) for c in plan]\n\n\ndef _get_chunks(n, chunksize):\n leftover = n % chunksize\n n_chunks = n // chunksize\n\n chunks = [chunksize] * n_chunks\n if leftover:\n chunks.append(leftover)\n return tuple(chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__balance_chunksizes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__balance_chunksizes_", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 742, "end_line": 777, "span_ids": ["_balance_chunksizes"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _balance_chunksizes(chunks: tuple[int, ...]) -> tuple[int, ...]:\n \"\"\"\n Balance the chunk sizes\n\n Parameters\n ----------\n chunks : tuple[int, ...]\n Chunk sizes for Dask array.\n\n Returns\n -------\n new_chunks : tuple[int, ...]\n New chunks for Dask array with balanced sizes.\n \"\"\"\n median_len = np.median(chunks).astype(int)\n n_chunks = len(chunks)\n eps = median_len // 2\n if min(chunks) <= 0.5 * max(chunks):\n n_chunks -= 1\n\n new_chunks = [\n _get_chunks(sum(chunks), chunk_len)\n for chunk_len in range(median_len - eps, median_len + eps + 1)\n ]\n possible_chunks = [c for c in new_chunks if len(c) == n_chunks]\n if not len(possible_chunks):\n warn(\n \"chunk size balancing not possible with given chunks. \"\n \"Try increasing the chunk size.\"\n )\n return chunks\n\n diffs = [max(c) - min(c) for c in possible_chunks]\n best_chunk_size = np.argmin(diffs)\n return possible_chunks[best_chunk_size]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__prefixscan_combine__prefixscan_first.return.func_x_axis_axis_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__prefixscan_combine__prefixscan_first.return.func_x_axis_axis_dtype_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1121, "end_line": 1164, "span_ids": ["_prefixscan_combine", "_prefixscan_first"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _prefixscan_combine(func, binop, pre, x, axis, dtype):\n \"\"\"Combine results of a parallel prefix scan such as cumsum\n\n Parameters\n ----------\n func : callable\n Cumulative function (e.g. ``np.cumsum``)\n binop : callable\n Associative function (e.g. ``add``)\n pre : np.array\n The value calculated in parallel from ``preop``.\n For example, the sum of all the previous blocks.\n x : np.array\n Current block\n axis : int\n dtype : dtype\n\n Returns\n -------\n np.array\n \"\"\"\n # We could compute this in two tasks.\n # This would allow us to do useful work (i.e., func), while waiting on `pre`.\n # Using one task may guide the scheduler to do better and reduce scheduling overhead.\n return binop(pre, func(x, axis=axis, dtype=dtype))\n\n\ndef _prefixscan_first(func, x, axis, dtype):\n \"\"\"Compute the prefix scan (e.g., cumsum) on the first block\n\n Parameters\n ----------\n func : callable\n Cumulative function (e.g. ``np.cumsum``)\n x : np.array\n Current block\n axis : int\n dtype : dtype\n\n Returns\n -------\n np.array\n \"\"\"\n return func(x, axis=axis, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch_prefixscan_blelloch.level.0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch_prefixscan_blelloch.level.0", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1208, "end_line": 1261, "span_ids": ["prefixscan_blelloch"], "tokens": 524}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def prefixscan_blelloch(func, preop, binop, x, axis=None, dtype=None, out=None):\n \"\"\"Generic function to perform parallel cumulative scan (a.k.a prefix scan)\n\n The Blelloch prefix scan is work-efficient and exposes parallelism.\n A parallel cumsum works by first taking the sum of each block, then do a binary tree\n merge followed by a fan-out (i.e., the Brent-Kung pattern). We then take the cumsum\n of each block and add the sum of the previous blocks.\n\n When performing a cumsum across N chunks, this method has 2 * lg(N) levels of dependencies.\n In contrast, the sequential method has N levels of dependencies.\n\n Floating point operations should be more accurate with this method compared to sequential.\n\n Parameters\n ----------\n func : callable\n Cumulative function (e.g. ``np.cumsum``)\n preop : callable\n Function to get the final value of a cumulative function (e.g., ``np.sum``)\n binop : callable\n Associative function (e.g. ``add``)\n x : dask array\n axis : int\n dtype : dtype\n\n Returns\n -------\n dask array\n \"\"\"\n if axis is None:\n x = x.flatten().rechunk(chunks=x.npartitions)\n axis = 0\n if dtype is None:\n dtype = getattr(func(np.empty((0,), dtype=x.dtype)), \"dtype\", object)\n assert isinstance(axis, Integral)\n axis = validate_axis(axis, x.ndim)\n name = f\"{func.__name__}-{tokenize(func, axis, preop, binop, x, dtype)}\"\n base_key = (name,)\n\n # Right now, the metadata for batches is incorrect, but this should be okay\n batches = x.map_blocks(preop, axis=axis, keepdims=True, dtype=dtype)\n # We don't need the last index until the end\n *indices, last_index = full_indices = [\n list(\n product(\n *[range(nb) if j != axis else [i] for j, nb in enumerate(x.numblocks)]\n )\n )\n for i in range(x.numblocks[axis])\n ]\n prefix_vals = [[(batches.name,) + index for index in vals] for vals in indices]\n dsk = {}\n n_vals = len(prefix_vals)\n level = 0\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch.if_n_vals_2__prefixscan_blelloch.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_prefixscan_blelloch.if_n_vals_2__prefixscan_blelloch.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1221, "end_line": 1284, "span_ids": ["prefixscan_blelloch"], "tokens": 577}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def prefixscan_blelloch(func, preop, binop, x, axis=None, dtype=None, out=None):\n # ... other code\n if n_vals >= 2:\n # Upsweep\n stride = 1\n stride2 = 2\n while stride2 <= n_vals:\n for i in range(stride2 - 1, n_vals, stride2):\n new_vals = []\n for index, left_val, right_val in zip(\n indices[i], prefix_vals[i - stride], prefix_vals[i]\n ):\n key = base_key + index + (level, i)\n dsk[key] = (binop, left_val, right_val)\n new_vals.append(key)\n prefix_vals[i] = new_vals\n stride = stride2\n stride2 *= 2\n level += 1\n\n # Downsweep\n # With `n_vals == 3`, we would have `stride = 1` and `stride = 0`, but we need\n # to do a downsweep iteration, so make sure stride2 is at least 2.\n stride2 = builtins.max(2, 2 ** ceil(log2(n_vals // 2)))\n stride = stride2 // 2\n while stride > 0:\n for i in range(stride2 + stride - 1, n_vals, stride2):\n new_vals = []\n for index, left_val, right_val in zip(\n indices[i], prefix_vals[i - stride], prefix_vals[i]\n ):\n key = base_key + index + (level, i)\n dsk[key] = (binop, left_val, right_val)\n new_vals.append(key)\n prefix_vals[i] = new_vals\n stride2 = stride\n stride //= 2\n level += 1\n\n if full_indices:\n for index in full_indices[0]:\n dsk[base_key + index] = (\n _prefixscan_first,\n func,\n (x.name,) + index,\n axis,\n dtype,\n )\n for indexes, vals in zip(drop(1, full_indices), prefix_vals):\n for index, val in zip(indexes, vals):\n dsk[base_key + index] = (\n _prefixscan_combine,\n func,\n binop,\n val,\n (x.name,) + index,\n axis,\n dtype,\n )\n if len(full_indices) < 2:\n deps = [x]\n else:\n deps = [x, batches]\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)\n result = Array(graph, name, x.chunks, batches.dtype)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction_cumreduction.for_ind_in_indices_.dsk_name_ind_m_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction_cumreduction.for_ind_in_indices_.dsk_name_ind_m_n", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1330, "end_line": 1407, "span_ids": ["cumreduction"], "tokens": 709}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cumreduction(\n func,\n binop,\n ident,\n x,\n axis=None,\n dtype=None,\n out=None,\n method=\"sequential\",\n preop=None,\n):\n \"\"\"Generic function for cumulative reduction\n\n Parameters\n ----------\n func: callable\n Cumulative function like np.cumsum or np.cumprod\n binop: callable\n Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul``\n ident: Number\n Associated identity like ``np.cumsum->0`` or ``np.cumprod->1``\n x: dask Array\n axis: int\n dtype: dtype\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumsum. Default is 'sequential'.\n\n * 'sequential' performs the scan of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel scan. It exposes parallelism by first\n calling ``preop`` on each block and combines the values via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n preop: callable, optional\n Function used by 'blelloch' method like ``np.cumsum->np.sum`` or ``np.cumprod->np.prod``\n\n Returns\n -------\n dask array\n\n See also\n --------\n cumsum\n cumprod\n \"\"\"\n if method == \"blelloch\":\n if preop is None:\n raise TypeError(\n 'cumreduction with \"blelloch\" method required `preop=` argument'\n )\n return prefixscan_blelloch(func, preop, binop, x, axis, dtype, out=out)\n elif method != \"sequential\":\n raise ValueError(\n f'Invalid method for cumreduction. Expected \"sequential\" or \"blelloch\". Got: {method!r}'\n )\n\n if axis is None:\n x = x.flatten().rechunk(chunks=x.npartitions)\n axis = 0\n if dtype is None:\n dtype = getattr(func(np.empty((0,), dtype=x.dtype)), \"dtype\", object)\n assert isinstance(axis, Integral)\n axis = validate_axis(axis, x.ndim)\n\n m = x.map_blocks(func, axis=axis, dtype=dtype)\n\n name = f\"{func.__name__}-{tokenize(func, axis, binop, ident, x, dtype)}\"\n n = x.numblocks[axis]\n full = slice(None, None, None)\n slc = (full,) * axis + (slice(-1, None),) + (full,) * (x.ndim - axis - 1)\n\n indices = list(\n product(*[range(nb) if i != axis else [0] for i, nb in enumerate(x.numblocks)])\n )\n dsk = dict()\n for ind in indices:\n shape = tuple(x.chunks[i][ii] if i != axis else 1 for i, ii in enumerate(ind))\n dsk[(name, \"extra\") + ind] = (np.full, shape, ident, m.dtype)\n dsk[(name,) + ind] = (m.name,) + ind\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction.for_i_in_range_1_n__cumreduction.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumreduction.for_i_in_range_1_n__cumreduction.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1366, "end_line": 1384, "span_ids": ["cumreduction"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cumreduction(\n func,\n binop,\n ident,\n x,\n axis=None,\n dtype=None,\n out=None,\n method=\"sequential\",\n preop=None,\n):\n # ... other code\n\n for i in range(1, n):\n last_indices = indices\n indices = list(\n product(\n *[range(nb) if ii != axis else [i] for ii, nb in enumerate(x.numblocks)]\n )\n )\n for old, ind in zip(last_indices, indices):\n this_slice = (name, \"extra\") + ind\n dsk[this_slice] = (\n binop,\n (name, \"extra\") + old,\n (operator.getitem, (m.name,) + old, slc),\n )\n dsk[(name,) + ind] = (binop, this_slice, (m.name,) + ind)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[m])\n result = Array(graph, name, x.chunks, m.dtype)\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__cumsum_merge__cumprod_merge.return.a_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__cumsum_merge__cumprod_merge.return.a_b", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1387, "end_line": 1398, "span_ids": ["_cumsum_merge", "_cumprod_merge"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cumsum_merge(a, b):\n if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):\n values = np.ma.getdata(a) + np.ma.getdata(b)\n return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))\n return a + b\n\n\ndef _cumprod_merge(a, b):\n if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):\n values = np.ma.getdata(a) * np.ma.getdata(b)\n return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))\n return a * b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumsum_cumsum.return.cumreduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumsum_cumsum.return.cumreduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1401, "end_line": 1424, "span_ids": ["cumsum"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef cumsum(x, axis=None, dtype=None, out=None, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumsum. Default is 'sequential'.\n\n * 'sequential' performs the cumsum of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by\n first taking the sum of each block and combines the sums via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n return cumreduction(\n np.cumsum,\n _cumsum_merge,\n 0,\n x,\n axis,\n dtype,\n out=out,\n method=method,\n preop=np.sum,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumprod_cumprod.return.cumreduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_cumprod_cumprod.return.cumreduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1427, "end_line": 1450, "span_ids": ["cumprod"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef cumprod(x, axis=None, dtype=None, out=None, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumprod. Default is 'sequential'.\n\n * 'sequential' performs the cumprod of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first\n taking the product of each block and combines the products via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n return cumreduction(\n np.cumprod,\n _cumprod_merge,\n 1,\n x,\n axis,\n dtype,\n out=out,\n method=method,\n preop=np.prod,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_chunk_type_test_direct_deferral_wrapping_override.assert_eq_res_2_np_ara": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_is_valid_chunk_type_test_direct_deferral_wrapping_override.assert_eq_res_2_np_ara", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 209, "end_line": 235, "span_ids": ["test_is_valid_chunk_type", "test_direct_deferral_wrapping_override"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"arr_type, result\",\n [\n (WrappedArray, False),\n (da.Array, False),\n (EncapsulateNDArray, True),\n (np.ma.MaskedArray, True),\n (np.ndarray, True),\n (float, False),\n (int, False),\n ],\n)\ndef test_is_valid_chunk_type(arr_type, result):\n \"\"\"Test is_valid_chunk_type for correctness\"\"\"\n assert is_valid_chunk_type(arr_type) is result\n\n\ndef test_direct_deferral_wrapping_override():\n \"\"\"Directly test Dask defering to an upcast type and the ability to still wrap it.\"\"\"\n a = da.from_array(np.arange(4))\n b = WrappedArray(np.arange(4))\n assert a.__add__(b) is NotImplemented\n # Note: remove dask_graph to be able to wrap b in a dask array\n setattr(b, \"__dask_graph__\", None)\n res = a + da.from_array(b)\n assert isinstance(res, da.Array)\n assert_eq(res, 2 * np.arange(4), check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalarThatUnderstandsArrayOps_UnknownScalarThatUnderstandsArrayOps.__array_ufunc__.return.UnknownScalarThatUndersta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalarThatUnderstandsArrayOps_UnknownScalarThatUnderstandsArrayOps.__array_ufunc__.return.UnknownScalarThatUndersta", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 237, "end_line": 246, "span_ids": ["UnknownScalarThatUnderstandsArrayOps", "UnknownScalarThatUnderstandsArrayOps.__array_ufunc__"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class UnknownScalarThatUnderstandsArrayOps(np.lib.mixins.NDArrayOperatorsMixin):\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n outputs = kwargs.get(\"out\", ())\n for item in inputs + outputs:\n if hasattr(item, \"__array_ufunc__\") and not isinstance(\n item, (np.ndarray, Array, UnknownScalarThatUnderstandsArrayOps)\n ):\n return NotImplemented\n # This is a dummy scalar that just returns a new object for every op\n return UnknownScalarThatUnderstandsArrayOps()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_delegation_unknown_scalar_that_understands_arr_ops_test_delegation_unknown_scalar_that_understands_arr_ops.assert_type_np_multiply_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_test_delegation_unknown_scalar_that_understands_arr_ops_test_delegation_unknown_scalar_that_understands_arr_ops.assert_type_np_multiply_a", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 256, "span_ids": ["test_delegation_unknown_scalar_that_understands_arr_ops"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"arr\", [da.from_array([1, 2]), np.asarray([1, 2])])\ndef test_delegation_unknown_scalar_that_understands_arr_ops(arr):\n s = UnknownScalarThatUnderstandsArrayOps()\n assert type(arr * s) == UnknownScalarThatUnderstandsArrayOps\n assert type(s * arr) == UnknownScalarThatUnderstandsArrayOps\n # Explicit tests of numpy NEP-13 dispatching\n assert type(np.multiply(s, arr)) == UnknownScalarThatUnderstandsArrayOps\n assert type(np.multiply(arr, s)) == UnknownScalarThatUnderstandsArrayOps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalar_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_dispatch.py_UnknownScalar_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_dispatch.py", "file_name": "test_dispatch.py", "file_type": "text/x-python", "category": "test", "start_line": 259, "end_line": 286, "span_ids": ["UnknownScalar", "test_delegation_unknown_scalar", "test_delegation_specific_cases", "UnknownScalar:4", "UnknownScalar.__mul__"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class UnknownScalar:\n __array_ufunc__ = None\n\n def __mul__(self, other):\n return 42\n\n __rmul__ = __mul__\n\n\n@pytest.mark.parametrize(\"arr\", [da.from_array([1, 2]), np.asarray([1, 2])])\ndef test_delegation_unknown_scalar(arr):\n s = UnknownScalar()\n assert arr * s == 42\n assert s * arr == 42\n with pytest.raises(\n TypeError, match=\"operand 'UnknownScalar' does not support ufuncs\"\n ):\n np.multiply(s, arr)\n\n\ndef test_delegation_specific_cases():\n a = da.from_array([\"a\", \"b\", \".\", \"d\"])\n # Fixes GH6631\n assert_eq(a == \".\", [False, False, True, False])\n assert_eq(\".\" == a, [False, False, True, False])\n # Fixes GH6611\n assert \"b\" in a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lstsq_test_lstsq.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_lstsq_test_lstsq.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 818, "end_line": 864, "span_ids": ["test_lstsq"], "tokens": 586}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"iscomplex\", [False, True])\n@pytest.mark.parametrize((\"nrow\", \"ncol\", \"chunk\"), [(20, 10, 5), (100, 10, 10)])\ndef test_lstsq(nrow, ncol, chunk, iscomplex):\n np.random.seed(1)\n A = np.random.randint(1, 20, (nrow, ncol))\n b = np.random.randint(1, 20, nrow)\n if iscomplex:\n A = A + 1.0j * np.random.randint(1, 20, A.shape)\n b = b + 1.0j * np.random.randint(1, 20, b.shape)\n\n dA = da.from_array(A, (chunk, ncol))\n db = da.from_array(b, chunk)\n\n x, r, rank, s = np.linalg.lstsq(A, b, rcond=-1)\n dx, dr, drank, ds = da.linalg.lstsq(dA, db)\n\n assert_eq(dx, x)\n assert_eq(dr, r)\n assert drank.compute() == rank\n assert_eq(ds, s)\n\n # reduce rank causes multicollinearity, only compare rank\n A[:, 1] = A[:, 2]\n dA = da.from_array(A, (chunk, ncol))\n db = da.from_array(b, chunk)\n x, r, rank, s = np.linalg.lstsq(\n A, b, rcond=np.finfo(np.double).eps * max(nrow, ncol)\n )\n assert rank == ncol - 1\n dx, dr, drank, ds = da.linalg.lstsq(dA, db)\n assert drank.compute() == rank\n\n # 2D case\n A = np.random.randint(1, 20, (nrow, ncol))\n b2D = np.random.randint(1, 20, (nrow, ncol // 2))\n if iscomplex:\n A = A + 1.0j * np.random.randint(1, 20, A.shape)\n b2D = b2D + 1.0j * np.random.randint(1, 20, b2D.shape)\n dA = da.from_array(A, (chunk, ncol))\n db2D = da.from_array(b2D, (chunk, ncol // 2))\n x, r, rank, s = np.linalg.lstsq(A, b2D, rcond=-1)\n dx, dr, drank, ds = da.linalg.lstsq(dA, db2D)\n\n assert_eq(dx, x)\n assert_eq(dr, r)\n assert drank.compute() == rank\n assert_eq(ds, s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_correction_test_svd_flip_correction.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_correction_test_svd_flip_correction.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 856, "end_line": 878, "span_ids": ["test_svd_flip_correction"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape\", [(10, 20), (10, 10), (20, 10)])\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (10, -1), (-1, 10)])\n@pytest.mark.parametrize(\"dtype\", [\"f4\", \"f8\"])\ndef test_svd_flip_correction(shape, chunks, dtype):\n # Verify that sign-corrected SVD results can still\n # be used to reconstruct inputs\n x = da.random.random(size=shape, chunks=chunks).astype(dtype)\n u, s, v = da.linalg.svd(x)\n\n # Choose precision in evaluation based on float precision\n decimal = 9 if np.dtype(dtype).itemsize > 4 else 6\n\n # Validate w/ dask inputs\n uf, vf = svd_flip(u, v)\n assert uf.dtype == u.dtype\n assert vf.dtype == v.dtype\n np.testing.assert_almost_equal(np.asarray(np.dot(uf * s, vf)), x, decimal=decimal)\n\n # Validate w/ numpy inputs\n uc, vc = svd_flip(*da.compute(u, v))\n assert uc.dtype == u.dtype\n assert vc.dtype == v.dtype\n np.testing.assert_almost_equal(np.asarray(np.dot(uc * s, vc)), x, decimal=decimal)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_sign_test_svd_flip_sign.assert_eq_v_y_T_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_flip_sign_test_svd_flip_sign.assert_eq_v_y_T_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 881, "end_line": 899, "span_ids": ["test_svd_flip_sign"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [\"f2\", \"f4\", \"f8\", \"f16\", \"c8\", \"c16\", \"c32\"])\n@pytest.mark.parametrize(\"u_based\", [True, False])\ndef test_svd_flip_sign(dtype, u_based):\n try:\n x = np.array(\n [[1, -1, 1, -1], [1, -1, 1, -1], [-1, 1, 1, -1], [-1, 1, 1, -1]],\n dtype=dtype,\n )\n except TypeError:\n pytest.skip(\"128-bit floats not supported by NumPy\")\n u, v = svd_flip(x, x.T, u_based_decision=u_based)\n assert u.dtype == x.dtype\n assert v.dtype == x.dtype\n # Verify that all singular vectors have same\n # sign except for the last one (i.e. last column)\n y = x.copy()\n y[:, -1] *= y.dtype.type(-1)\n assert_eq(u, y)\n assert_eq(v, y.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_disable_lowlevel_fusion_test_disable_lowlevel_fusion.with_dask_config_set_op.assert_eq_y_1_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_disable_lowlevel_fusion_test_disable_lowlevel_fusion.with_dask_config_set_op.assert_eq_y_1_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 375, "end_line": 388, "span_ids": ["test_disable_lowlevel_fusion"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_disable_lowlevel_fusion():\n \"\"\"Check that by disabling fusion, the HLG survives through optimizations\"\"\"\n\n with dask.config.set({\"optimization.fuse.active\": False}):\n y = da.ones(3, chunks=(3,), dtype=\"int\")\n optimize = y.__dask_optimize__\n dsk1 = y.__dask_graph__()\n dsk2 = optimize(dsk1, y.__dask_keys__())\n assert isinstance(dsk1, HighLevelGraph)\n assert isinstance(dsk2, HighLevelGraph)\n assert dsk1 == dsk2\n y = y.persist()\n assert isinstance(y.__dask_graph__(), HighLevelGraph)\n assert_eq(y, [1] * 3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false.assert_z2_shape_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false_test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false.assert_z2_shape_10_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 520, "end_line": 532, "span_ids": ["test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_assumes_shape_matches_first_array_if_trim_is_false():\n # https://github.com/dask/dask/issues/6681\n x1 = da.ones((10,), chunks=(5, 5))\n x2 = x1.rechunk(10)\n\n def oversum(x):\n return x[2:-2]\n\n z1 = da.map_overlap(oversum, x1, depth=2, trim=False, boundary=\"none\")\n assert z1.shape == (10,)\n\n z2 = da.map_overlap(oversum, x2, depth=2, trim=False, boundary=\"none\")\n assert z2.shape == (10,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_deprecated_signature_test_map_overlap_deprecated_signature.None_2.assert_y_shape_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_deprecated_signature_test_map_overlap_deprecated_signature.None_2.assert_y_shape_3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 459, "end_line": 479, "span_ids": ["test_map_overlap_deprecated_signature"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_deprecated_signature():\n def func(x):\n return np.array(x.sum())\n\n x = da.ones(3)\n\n # Old positional signature: func, depth, boundary, trim\n with pytest.warns(FutureWarning):\n y = da.map_overlap(x, func, 0, \"reflect\", True)\n assert y.compute() == 3\n assert y.shape == (3,)\n\n with pytest.warns(FutureWarning):\n y = da.map_overlap(x, func, 1, \"reflect\", True)\n assert y.compute() == 5\n assert y.shape == (3,)\n\n with pytest.warns(FutureWarning):\n y = da.map_overlap(x, func, 1, \"reflect\", False)\n assert y.compute() == 5\n assert y.shape == (3,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_heterogeneous_test_plan_rechunk_heterogeneous.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_plan_rechunk_heterogeneous_test_plan_rechunk_heterogeneous.None_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 403, "end_line": 430, "span_ids": ["test_plan_rechunk_heterogeneous"], "tokens": 333}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_plan_rechunk_heterogeneous():\n c = (10,) * 1 # coarse\n f = (1,) * 10 # fine\n cf = c + f\n cc = c + c\n ff = f + f\n fc = f + c\n\n # No intermediate required\n steps = _plan((cc, cf), (ff, ff))\n _assert_steps(steps, [(ff, ff)])\n steps = _plan((cf, fc), (ff, cf))\n _assert_steps(steps, [(ff, cf)])\n\n # An intermediate is used to reduce graph size\n steps = _plan((cc, cf), (ff, cc))\n _assert_steps(steps, [(cc, cc), (ff, cc)])\n\n steps = _plan((cc, cf, cc), (ff, cc, cf))\n _assert_steps(steps, [(cc, cc, cc), (ff, cc, cf)])\n\n # Imposing a memory limit => the first intermediate is constrained:\n # * cc -> ff would increase the graph size: no\n # * ff -> cf would increase the block size too much: no\n # * cf -> cc fits the bill (graph size /= 10, block size neutral)\n # * cf -> fc also fits the bill (graph size and block size neutral)\n steps = _plan((cc, ff, cf), (ff, cf, cc), block_size_limit=100)\n _assert_steps(steps, [(cc, ff, cc), (ff, cf, cc)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_test_rechunk_bad_keys.assert_100_in_str_info": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_zero_test_rechunk_bad_keys.assert_100_in_str_info", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 754, "end_line": 779, "span_ids": ["test_rechunk_bad_keys", "test_rechunk_zero"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_zero():\n with dask.config.set({\"array.chunk-size\": \"1B\"}):\n x = da.ones(10, chunks=(5,))\n y = x.rechunk(\"auto\")\n assert y.chunks == ((1,) * 10,)\n\n\ndef test_rechunk_bad_keys():\n x = da.zeros((2, 3, 4), chunks=1)\n assert x.rechunk({-1: 4}).chunks == ((1, 1), (1, 1, 1), (4,))\n assert x.rechunk({-x.ndim: 2}).chunks == ((2,), (1, 1, 1), (1, 1, 1, 1))\n\n with pytest.raises(TypeError) as info:\n x.rechunk({\"blah\": 4})\n\n assert \"blah\" in str(info.value)\n\n with pytest.raises(ValueError) as info:\n x.rechunk({100: 4})\n\n assert \"100\" in str(info.value)\n\n with pytest.raises(ValueError) as info:\n x.rechunk({-100: 4})\n\n assert \"-100\" in str(info.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_test_balance_chunks_unchanged.assert_balanced_chunks_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_test_balance_chunks_unchanged.assert_balanced_chunks_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 782, "end_line": 799, "span_ids": ["test_balance_chunks_unchanged", "test_balance_basics"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_basics():\n arr_len = 220\n\n x = da.from_array(np.arange(arr_len), chunks=100)\n balanced = x.rechunk(chunks=100, balance=True)\n unbalanced = x.rechunk(chunks=100, balance=False)\n assert unbalanced.chunks[0] == (100, 100, 20)\n assert balanced.chunks[0] == (110, 110)\n\n\ndef test_balance_chunks_unchanged():\n arr_len = 220\n\n x = da.from_array(np.arange(arr_len))\n balanced = x.rechunk(chunks=100, balance=True)\n unbalanced = x.rechunk(chunks=100, balance=False)\n assert unbalanced.chunks[0] == (100, 100, 20)\n assert balanced.chunks[0] == (110, 110)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_small_test_balance_small.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_small_test_balance_small.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 802, "end_line": 817, "span_ids": ["test_balance_small"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_small():\n arr_len = 13\n\n x = da.from_array(np.arange(arr_len))\n balanced = x.rechunk(chunks=4, balance=True)\n unbalanced = x.rechunk(chunks=4, balance=False)\n assert balanced.chunks[0] == (5, 5, 3)\n assert unbalanced.chunks[0] == (4, 4, 4, 1)\n\n arr_len = 7\n\n x = da.from_array(np.arange(arr_len))\n balanced = x.rechunk(chunks=3, balance=True)\n unbalanced = x.rechunk(chunks=3, balance=False)\n assert balanced.chunks[0] == (4, 3)\n assert unbalanced.chunks[0] == (3, 3, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_n_chunks_size_test_balance_raises.x_rechunk_chunks_arr_len_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_n_chunks_size_test_balance_raises.x_rechunk_chunks_arr_len_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 820, "end_line": 842, "span_ids": ["test_balance_n_chunks_size", "test_balance_raises"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_n_chunks_size():\n arr_len = 100\n n_chunks = 8\n\n x = da.from_array(np.arange(arr_len))\n balanced = x.rechunk(chunks=arr_len // n_chunks, balance=True)\n unbalanced = x.rechunk(chunks=arr_len // n_chunks, balance=False)\n assert balanced.chunks[0] == (13,) * 7 + (9,)\n assert unbalanced.chunks[0] == (12,) * 8 + (4,)\n\n\ndef test_balance_raises():\n arr_len = 100\n n_chunks = 11\n\n x = da.from_array(np.arange(arr_len))\n with pytest.warns(UserWarning, match=\"Try increasing the chunk size\"):\n balanced = x.rechunk(chunks=arr_len // n_chunks, balance=True)\n unbalanced = x.rechunk(chunks=arr_len // n_chunks, balance=False)\n assert balanced.chunks == unbalanced.chunks\n\n n_chunks = 10\n x.rechunk(chunks=arr_len // n_chunks, balance=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_2d_test_balance_different_inputs.assert_balanced_chunks_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_basics_2d_test_balance_different_inputs.assert_balanced_chunks_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 845, "end_line": 872, "span_ids": ["test_balance_different_inputs", "test_balance_2d_negative_dimension", "test_balance_basics_2d"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_basics_2d():\n N = 210\n\n x = da.from_array(np.random.uniform(size=(N, N)))\n balanced = x.rechunk(chunks=(100, 100), balance=True)\n unbalanced = x.rechunk(chunks=(100, 100), balance=False)\n assert unbalanced.chunks == ((100, 100, 10), (100, 100, 10))\n assert balanced.chunks == ((105, 105), (105, 105))\n\n\ndef test_balance_2d_negative_dimension():\n N = 210\n\n x = da.from_array(np.random.uniform(size=(N, N)))\n balanced = x.rechunk(chunks=(100, -1), balance=True)\n unbalanced = x.rechunk(chunks=(100, -1), balance=False)\n assert unbalanced.chunks == ((100, 100, 10), (N,))\n assert balanced.chunks == ((105, 105), (N,))\n\n\ndef test_balance_different_inputs():\n N = 210\n\n x = da.from_array(np.random.uniform(size=(N, N)))\n balanced = x.rechunk(chunks=(\"10MB\", -1), balance=True)\n unbalanced = x.rechunk(chunks=(\"10MB\", -1), balance=False)\n assert balanced.chunks == unbalanced.chunks\n assert balanced.chunks[1] == (N,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_median_test_median.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_median_test_median.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 688, "end_line": 697, "span_ids": ["test_median"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"median\", \"nanmedian\"])\n@pytest.mark.parametrize(\"axis\", [0, [0, 1], 1, -1])\n@pytest.mark.parametrize(\"keepdims\", [True, False])\ndef test_median(axis, keepdims, func):\n x = np.arange(100).reshape((2, 5, 10))\n d = da.from_array(x, chunks=2)\n assert_eq(\n getattr(da, func)(d, axis=axis, keepdims=keepdims),\n getattr(np, func)(x, axis=axis, keepdims=keepdims),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_unknown_sizes_test_reshape_unknown_sizes.None_1.A_reshape_60_1_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_unknown_sizes_test_reshape_unknown_sizes.None_1.A_reshape_60_1_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 68, "end_line": 81, "span_ids": ["test_reshape_unknown_sizes"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reshape_unknown_sizes():\n a = np.random.random((10, 6, 6))\n A = da.from_array(a, chunks=(5, 2, 3))\n\n a2 = a.reshape((60, -1))\n A2 = A.reshape((60, -1))\n\n assert A2.shape == (60, 6)\n assert_eq(A2, a2)\n\n with pytest.raises(ValueError):\n a.reshape((60, -1, -1))\n with pytest.raises(ValueError):\n A.reshape((60, -1, -1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_chunked_no_merge_test_reshape_all_chunked_no_merge.assert_eq_result_base_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_chunked_no_merge_test_reshape_all_chunked_no_merge.assert_eq_result_base_re", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 85, "end_line": 130, "span_ids": ["test_reshape_all_chunked_no_merge"], "tokens": 768}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"inshape, inchunks, outshape, outchunks\",\n [\n # (2, 3, 4) -> (6, 4)\n ((2, 3, 4), ((1, 1), (1, 2), (2, 2)), (6, 4), ((1, 2, 1, 2), (2, 2))),\n # (1, 2, 3, 4) -> (12, 4)\n ((1, 2, 3, 4), ((1,), (1, 1), (1, 2), (2, 2)), (6, 4), ((1, 2, 1, 2), (2, 2))),\n # (2, 2, 3, 4) -> (12, 4)\n (\n (2, 2, 3, 4),\n ((1, 1), (1, 1), (1, 2), (2, 2)),\n (12, 4),\n ((1, 2, 1, 2, 1, 2, 1, 2), (2, 2)),\n ),\n # (2, 2, 3, 4) -> (4, 3, 4)\n (\n (2, 2, 3, 4),\n ((1, 1), (1, 1), (1, 2), (2, 2)),\n (4, 3, 4),\n ((1, 1, 1, 1), (1, 2), (2, 2)),\n ),\n # (2, 2, 3, 4) -> (4, 3, 4)\n ((2, 2, 3, 4), ((1, 1), (2,), (1, 2), (4,)), (4, 3, 4), ((2, 2), (1, 2), (4,))),\n # (2, 3, 4) -> (24,).\n ((2, 3, 4), ((1, 1), (1, 1, 1), (2, 2)), (24,), ((2,) * 12,)),\n # (2, 3, 4) -> (2, 12)\n ((2, 3, 4), ((1, 1), (1, 1, 1), (4,)), (2, 12), ((1, 1), (4,) * 3)),\n ],\n)\ndef test_reshape_all_chunked_no_merge(inshape, inchunks, outshape, outchunks):\n # https://github.com/dask/dask/issues/5544#issuecomment-712280433\n # When the early axes are completely chunked then we are just moving blocks\n # and can avoid any rechunking. The result inchunks are the same as the\n # input chunks.\n base = np.arange(np.prod(inshape)).reshape(inshape)\n a = da.from_array(base, chunks=inchunks)\n\n # test directly\n inchunks2, outchunks2 = reshape_rechunk(a.shape, outshape, inchunks)\n assert inchunks2 == inchunks\n assert outchunks2 == outchunks\n\n # and via reshape\n result = a.reshape(outshape)\n assert result.chunks == outchunks\n assert_eq(result, base.reshape(outshape))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_test_getitem_avoids_large_chunks.with_dask_config_set_ar.None_2.assert_result_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_test_getitem_avoids_large_chunks.with_dask_config_set_ar.None_2.assert_result_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 875, "end_line": 903, "span_ids": ["test_getitem_avoids_large_chunks"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_avoids_large_chunks():\n with dask.config.set({\"array.chunk-size\": \"0.1Mb\"}):\n a = np.arange(2 * 128 * 128, dtype=\"int64\").reshape(2, 128, 128)\n arr = da.from_array(a, chunks=(1, 128, 128))\n indexer = [0] + [1] * 11\n expected = a[indexer]\n\n # By default, we warn\n with pytest.warns(da.PerformanceWarning):\n result = arr[indexer]\n\n assert_eq(result, expected)\n assert result.chunks == ((1, 11), (128,), (128,))\n\n # Users can silence the warning\n with dask.config.set({\"array.slicing.split-large-chunks\": False}):\n with warnings.catch_warnings(record=True) as record:\n result = arr[indexer]\n assert_eq(result, expected)\n assert not record\n\n # Users can silence the warning\n with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n with warnings.catch_warnings(record=True) as record:\n result = arr[indexer]\n assert_eq(result, expected)\n assert not record\n\n assert result.chunks == ((1,) * 12, (128,), (128,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_missing_test_getitem_avoids_large_chunks_missing.with_dask_config_set_ar.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_getitem_avoids_large_chunks_missing_test_getitem_avoids_large_chunks_missing.with_dask_config_set_ar.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 915, "end_line": 935, "span_ids": ["test_getitem_avoids_large_chunks_missing"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"chunks\",\n [\n ((1, 1, 1, 1), (np.nan,), (np.nan,)),\n pytest.param(\n ((np.nan, np.nan, np.nan, np.nan), (500,), (500,)),\n marks=pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/6586\"),\n ),\n ],\n)\ndef test_getitem_avoids_large_chunks_missing(chunks):\n # We cannot apply the \"avoid large chunks\" optimization when\n # the chunks have unknown sizes.\n with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n a = np.arange(4 * 500 * 500).reshape(4, 500, 500)\n arr = da.from_array(a, chunks=(1, 500, 500))\n arr._chunks = chunks\n indexer = [0, 1] + [2] * 100 + [3]\n expected = a[indexer]\n result = arr[indexer]\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_avoids_large_chunks_test_take_avoids_large_chunks.with_dask_config_set_ar.None_11": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_avoids_large_chunks_test_take_avoids_large_chunks.with_dask_config_set_ar.None_11", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 938, "end_line": 962, "span_ids": ["test_take_avoids_large_chunks"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_avoids_large_chunks():\n # unit test for https://github.com/dask/dask/issues/6270\n with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n chunks = ((1, 1, 1, 1), (500,), (500,))\n itemsize = 8\n index = np.array([0, 1] + [2] * 101 + [3])\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize)\n assert chunks2 == ((1, 1, 51, 50, 1), (500,), (500,))\n assert len(dsk) == 5\n\n index = np.array([0] * 101 + [1, 2, 3])\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize)\n assert chunks2 == ((51, 50, 1, 1, 1), (500,), (500,))\n assert len(dsk) == 5\n\n index = np.array([0, 1, 2] + [3] * 101)\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize)\n assert chunks2 == ((1, 1, 1, 51, 50), (500,), (500,))\n assert len(dsk) == 5\n\n chunks = ((500,), (1, 1, 1, 1), (500,))\n index = np.array([0, 1, 2] + [3] * 101)\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize, axis=1)\n assert chunks2 == ((500,), (1, 1, 1, 51, 50), (500,))\n assert len(dsk) == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_uses_config_test_take_uses_config.with_dask_config_set_ar.assert_len_dsk_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_take_uses_config_test_take_uses_config.with_dask_config_set_ar.assert_len_dsk_4", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 965, "end_line": 973, "span_ids": ["test_take_uses_config"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_uses_config():\n with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n chunks = ((1, 1, 1, 1), (500,), (500,))\n index = np.array([0, 1] + [2] * 101 + [3])\n itemsize = 8\n with config.set({\"array.chunk-size\": \"10GB\"}):\n chunks2, dsk = take(\"a\", \"b\", chunks, index, itemsize)\n assert chunks2 == ((1, 1, 101, 1), (500,), (500,))\n assert len(dsk) == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_draw_sizes_test_draw_sizes.assert_b_c_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_draw_sizes_test_draw_sizes.assert_b_c_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 70, "end_line": 79, "span_ids": ["test_draw_sizes"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_draw_sizes():\n assert draw_sizes((10, 10), size=100) == (100, 100) # respect symmetry\n assert draw_sizes((10, 10), size=200) == (200, 200) # respect size keyword\n assert draw_sizes((10, 5), size=100) == (100, 50) # respect small ratios\n\n a, b, c = draw_sizes((1000, 100, 10))\n assert a > b\n assert b > c\n assert a < b * 5\n assert b < c * 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_too_many_lines_fills_sides_darker_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_test_too_many_lines_fills_sides_darker_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 82, "end_line": 92, "span_ids": ["test_too_many_lines_fills_sides_darker", "test_3d"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_too_many_lines_fills_sides_darker():\n data = da.ones((16000, 2400, 3600), chunks=(1, 2400, 3600))\n text = data.to_svg()\n assert \"8B4903\" in text\n assert text.count(\"\\n\") < 300\n\n\ndef test_3d():\n text = da.ones((10, 10, 10, 10, 10)).to_svg()\n assert text.count(\" set:\n \"\"\"Returns all possible keys in `tasks` including hashable literals.\n\n The definition of a key in a Dask graph is any hashable object\n that is not a task. This function returns all such objects in\n `tasks` even if the object is in fact a literal.\n\n \"\"\"\n ret = set()\n while tasks:\n work = []\n for w in tasks:\n typ = type(w)\n if typ is tuple and w and callable(w[0]): # istask(w)\n work.extend(w[1:])\n elif typ is list:\n work.extend(w)\n elif typ is dict:\n work.extend(w.values())\n else:\n try:\n ret.add(w)\n except TypeError: # not hashable\n pass\n tasks = work\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.nlargest_Series.isin.return.super_isin_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.nlargest_Series.isin.return.super_isin_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3013, "end_line": 3040, "span_ids": ["Series.nsmallest", "Series.isin", "Series.nlargest"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def nlargest(self, n=5, split_every=None):\n return aca(\n self,\n chunk=M.nlargest,\n aggregate=M.nlargest,\n meta=self._meta,\n token=\"series-nlargest\",\n split_every=split_every,\n n=n,\n )\n\n @derived_from(pd.Series)\n def nsmallest(self, n=5, split_every=None):\n return aca(\n self,\n chunk=M.nsmallest,\n aggregate=M.nsmallest,\n meta=self._meta,\n token=\"series-nsmallest\",\n split_every=split_every,\n n=n,\n )\n\n @derived_from(pd.Series)\n def isin(self, values):\n # Added just to get the different docstring for Series\n return super().isin(values)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_list__build_agg_args_list.return.dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_list__build_agg_args_list.return.dict_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 826, "end_line": 850, "span_ids": ["_build_agg_args_list"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_list(result_column, func, input_column):\n intermediate = _make_agg_id(\"list\", input_column)\n\n return dict(\n chunk_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(column=input_column, func=lambda s: s.apply(list)),\n )\n ],\n aggregate_funcs=[\n (\n intermediate,\n _apply_func_to_column,\n dict(\n column=intermediate,\n func=lambda s0: s0.apply(\n lambda chunks: list(it.chain.from_iterable(chunks))\n ),\n ),\n )\n ],\n finalizer=(result_column, itemgetter(intermediate), dict()),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy_SeriesGroupBy.__init__.super___init___df_by_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy_SeriesGroupBy.__init__.super___init___df_by_b", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2011, "end_line": 2034, "span_ids": ["SeriesGroupBy.__init__", "SeriesGroupBy"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SeriesGroupBy(_GroupBy):\n _token_prefix = \"series-groupby-\"\n\n def __init__(self, df, by=None, slice=None, observed=None, **kwargs):\n # for any non series object, raise pandas-compat error message\n # Hold off on setting observed by default: https://github.com/dask/dask/issues/6951\n observed = {\"observed\": observed} if observed is not None else {}\n\n if isinstance(df, Series):\n if isinstance(by, Series):\n pass\n elif isinstance(by, list):\n if len(by) == 0:\n raise ValueError(\"No group keys passed!\")\n\n non_series_items = [item for item in by if not isinstance(item, Series)]\n # raise error from pandas, if applicable\n\n df._meta.groupby(non_series_items, **observed)\n else:\n # raise error from pandas, if applicable\n df._meta.groupby(by, **observed)\n\n super().__init__(df, by=by, slice=slice, **observed, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py__Implementation_of_Hype_compute_first_bit.return.33_bits_sum_axis_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/hyperloglog.py__Implementation_of_Hype_compute_first_bit.return.33_bits_sum_axis_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/hyperloglog.py", "file_name": "hyperloglog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 22, "span_ids": ["compute_first_bit", "docstring"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Implementation of HyperLogLog\n\nThis implements the HyperLogLog algorithm for cardinality estimation, found\nin\n\n Philippe Flajolet, \u00c9ric Fusy, Olivier Gandouet and Fr\u00e9d\u00e9ric Meunier.\n \"HyperLogLog: the analysis of a near-optimal cardinality estimation\n algorithm\". 2007 Conference on Analysis of Algorithms. Nice, France\n (2007)\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom pandas.util import hash_pandas_object\n\n\ndef compute_first_bit(a):\n \"Compute the position of the first nonzero bit for each int in an array.\"\n # TODO: consider making this less memory-hungry\n bits = np.bitwise_and.outer(a, 1 << np.arange(32))\n bits = bits.cumsum(axis=1).astype(bool)\n return 33 - bits.sum(axis=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py__meta_from_array__meta_from_array.return.meta__constructor_data_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py__meta_from_array__meta_from_array.return.meta__constructor_data_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 23, "end_line": 76, "span_ids": ["_meta_from_array"], "tokens": 509}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _meta_from_array(x, columns=None, index=None, meta=None):\n \"\"\"Create empty DataFrame or Series which has correct dtype\"\"\"\n\n if x.ndim > 2:\n raise ValueError(\n \"from_array does not input more than 2D array, got\"\n \" array with shape %r\" % (x.shape,)\n )\n\n if index is not None:\n if not isinstance(index, Index):\n raise ValueError(\"'index' must be an instance of dask.dataframe.Index\")\n index = index._meta\n\n if meta is None:\n meta = pd.DataFrame()\n\n if getattr(x.dtype, \"names\", None) is not None:\n # record array has named columns\n if columns is None:\n columns = list(x.dtype.names)\n elif np.isscalar(columns):\n raise ValueError(\"For a struct dtype, columns must be a list.\")\n elif not all(i in x.dtype.names for i in columns):\n extra = sorted(set(columns).difference(x.dtype.names))\n raise ValueError(f\"dtype {x.dtype} doesn't have fields {extra}\")\n fields = x.dtype.fields\n dtypes = [fields[n][0] if n in fields else \"f8\" for n in columns]\n elif x.ndim == 1:\n if np.isscalar(columns) or columns is None:\n return meta._constructor_sliced(\n [], name=columns, dtype=x.dtype, index=index\n )\n elif len(columns) == 1:\n return meta._constructor(\n np.array([], dtype=x.dtype), columns=columns, index=index\n )\n raise ValueError(\n \"For a 1d array, columns must be a scalar or single element list\"\n )\n else:\n if np.isnan(x.shape[1]):\n raise ValueError(\"Shape along axis 1 must be known\")\n if columns is None:\n columns = list(range(x.shape[1])) if x.ndim == 2 else [0]\n elif len(columns) != x.shape[1]:\n raise ValueError(\n \"Number of column names must match width of the array. \"\n f\"Got {len(columns)} names for {x.shape[1]} columns\"\n )\n dtypes = [x.dtype] * len(columns)\n\n data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}\n return meta._constructor(data, columns=columns, index=index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql.if_not_isinstance_uri_st_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_to_sql.if_not_isinstance_uri_st_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 804, "end_line": 864, "span_ids": ["_extra_deps", "to_sql"], "tokens": 450}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_sql(\n df,\n name: str,\n uri: str,\n schema=None,\n if_exists: str = \"fail\",\n index: bool = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n compute=True,\n parallel=False,\n engine_kwargs=None,\n):\n if not isinstance(uri, str):\n raise ValueError(f\"Expected URI to be a string, got {type(uri)}.\")\n\n # This is the only argument we add on top of what Pandas supports\n kwargs = dict(\n name=name,\n uri=uri,\n engine_kwargs=engine_kwargs,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n )\n\n meta_task = delayed(_to_sql_chunk)(df._meta, **kwargs)\n\n # Partitions should always append to the empty table created from `meta` above\n worker_kwargs = dict(kwargs, if_exists=\"append\")\n\n if parallel:\n # Perform the meta insert, then one task that inserts all blocks concurrently:\n result = [\n _extra_deps(\n _to_sql_chunk,\n d,\n extras=meta_task,\n **worker_kwargs,\n dask_key_name=\"to_sql-%s\" % tokenize(d, **worker_kwargs),\n )\n for d in df.to_delayed()\n ]\n else:\n # Chain the \"meta\" insert and each block's insert\n result = []\n last = meta_task\n for d in df.to_delayed():\n result.append(\n _extra_deps(\n _to_sql_chunk,\n d,\n extras=last,\n **worker_kwargs,\n dask_key_name=\"to_sql-%s\" % tokenize(d, **worker_kwargs),\n )\n )\n last = result[-1]\n result = dask.delayed(result)\n\n if compute:\n dask.compute(result)\n else:\n return result\n\n\n@delayed\ndef _extra_deps(func, *args, extras=None, **kwargs):\n return func(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_with_null_partition_test_divisions_with_null_partition.assert_ddf_read_divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_divisions_with_null_partition_test_divisions_with_null_partition.assert_ddf_read_divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2815, "end_line": 2821, "span_ids": ["test_divisions_with_null_partition"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divisions_with_null_partition(tmpdir, engine):\n df = pd.DataFrame({\"a\": [1, 2, None, None], \"b\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(str(tmpdir), engine=engine, write_index=False)\n\n ddf_read = dd.read_parquet(str(tmpdir), engine=engine, index=\"a\")\n assert ddf_read.divisions == (None, None, None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_passing_engine_as_uri_raises_helpful_error_test_passing_engine_as_uri_raises_helpful_error.with_tmpfile_as_f_.with_pytest_raises_ValueE.ddf_to_sql_test_engine": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_passing_engine_as_uri_raises_helpful_error_test_passing_engine_as_uri_raises_helpful_error.with_tmpfile_as_f_.with_pytest_raises_ValueE.ddf_to_sql_test_engine", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 73, "span_ids": ["test_passing_engine_as_uri_raises_helpful_error"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_passing_engine_as_uri_raises_helpful_error(db):\n # https://github.com/dask/dask/issues/6473\n from sqlalchemy import create_engine\n\n df = pd.DataFrame([{\"i\": i, \"s\": str(i) * 2} for i in range(4)])\n ddf = dd.from_pandas(df, npartitions=2)\n\n with tmpfile() as f:\n db = \"sqlite:///%s\" % f\n engine = create_engine(db)\n with pytest.raises(ValueError, match=\"Expected URI to be a string\"):\n ddf.to_sql(\"test\", engine, if_exists=\"replace\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_meta_test_meta_no_head_rows.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_meta_test_meta_no_head_rows.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 285, "span_ids": ["test_meta_no_head_rows", "test_meta"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta(db):\n data = read_sql_table(\n \"test\", db, index_col=\"number\", meta=dd.from_pandas(df, npartitions=1)\n ).compute()\n assert (data.name == df.name).all()\n assert data.index.name == \"number\"\n assert_eq(data, df)\n\n\ndef test_meta_no_head_rows(db):\n data = read_sql_table(\n \"test\",\n db,\n index_col=\"number\",\n meta=dd.from_pandas(df, npartitions=1),\n npartitions=2,\n head_rows=0,\n )\n assert len(data.divisions) == 3\n data = data.compute()\n assert (data.name == df.name).all()\n assert data.index.name == \"number\"\n assert_eq(data, df)\n\n data = read_sql_table(\n \"test\",\n db,\n index_col=\"number\",\n meta=dd.from_pandas(df, npartitions=1),\n divisions=[0, 3, 6],\n head_rows=0,\n )\n assert len(data.divisions) == 3\n data = data.compute()\n assert (data.name == df.name).all()\n assert data.index.name == \"number\"\n assert_eq(data, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_meta_no_head_rows_test_datetimes.with_tmpfile_as_f_.assert_eq_data_map_partit": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_meta_no_head_rows_test_datetimes.with_tmpfile_as_f_.assert_eq_data_map_partit", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 335, "end_line": 361, "span_ids": ["test_limits", "test_no_meta_no_head_rows", "test_datetimes"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_meta_no_head_rows(db):\n with pytest.raises(ValueError):\n read_sql_table(\"test\", db, index_col=\"number\", head_rows=0, npartitions=1)\n\n\ndef test_limits(db):\n data = read_sql_table(\"test\", db, npartitions=2, index_col=\"number\", limits=[1, 4])\n assert data.index.min().compute() == 1\n assert data.index.max().compute() == 4\n\n\ndef test_datetimes():\n import datetime\n\n now = datetime.datetime.now()\n d = datetime.timedelta(seconds=1)\n df = pd.DataFrame(\n {\"a\": list(\"ghjkl\"), \"b\": [now + i * d for i in range(2, -3, -1)]}\n )\n with tmpfile() as f:\n uri = \"sqlite:///%s\" % f\n df.to_sql(\"test\", uri, index=False, if_exists=\"replace\")\n data = read_sql_table(\"test\", uri, npartitions=2, index_col=\"b\")\n assert data.index.dtype.kind == \"M\"\n assert data.divisions[0] == df.b.min()\n df2 = df.set_index(\"b\")\n assert_eq(data.map_partitions(lambda x: x.sort_index()), df2.sort_index())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_disk__noop.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_disk__noop.return.x", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 520, "end_line": 577, "span_ids": ["rearrange_by_column_disk", "_noop"], "tokens": 476}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_column_disk(df, column, npartitions=None, compute=False):\n \"\"\"Shuffle using local disk\n\n See Also\n --------\n rearrange_by_column_tasks:\n Same function, but using tasks rather than partd\n Has a more informative docstring\n \"\"\"\n if npartitions is None:\n npartitions = df.npartitions\n\n token = tokenize(df, column, npartitions)\n always_new_token = uuid.uuid1().hex\n\n p = (\"zpartd-\" + always_new_token,)\n dsk1 = {p: (maybe_buffered_partd(),)}\n\n # Partition data on disk\n name = \"shuffle-partition-\" + always_new_token\n dsk2 = {\n (name, i): (shuffle_group_3, key, column, npartitions, p)\n for i, key in enumerate(df.__dask_keys__())\n }\n\n dependencies = []\n if compute:\n graph = HighLevelGraph.merge(df.dask, dsk1, dsk2)\n graph = HighLevelGraph.from_collections(name, graph, dependencies=[df])\n keys = [p, sorted(dsk2)]\n pp, values = compute_as_if_collection(DataFrame, graph, keys)\n dsk1 = {p: pp}\n dsk2 = dict(zip(sorted(dsk2), values))\n else:\n dependencies.append(df)\n\n # Barrier\n barrier_token = \"barrier-\" + always_new_token\n dsk3 = {barrier_token: (barrier, list(dsk2))}\n\n # Collect groups\n name = \"shuffle-collect-\" + token\n dsk4 = {\n (name, i): (collect, p, i, df._meta, barrier_token) for i in range(npartitions)\n }\n\n divisions = (None,) * (npartitions + 1)\n\n layer = toolz.merge(dsk1, dsk2, dsk3, dsk4)\n graph = HighLevelGraph.from_collections(name, layer, dependencies=dependencies)\n return new_dd_object(graph, name, df._meta, divisions)\n\n\ndef _noop(x, cleanup_token):\n \"\"\"\n A task that does nothing.\n \"\"\"\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks_rearrange_by_column_tasks.max_branch.max_branch_or_32": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks_rearrange_by_column_tasks.max_branch.max_branch_or_32", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 894, "end_line": 953, "span_ids": ["rearrange_by_column_tasks"], "tokens": 703}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_column_tasks(\n df, column, max_branch=32, npartitions=None, ignore_index=False\n):\n \"\"\"Order divisions of DataFrame so that all values within column(s) align\n\n This enacts a task-based shuffle. It contains most of the tricky logic\n around the complex network of tasks. Typically before this function is\n called a new column, ``\"_partitions\"`` has been added to the dataframe,\n containing the output partition number of every row. This function\n produces a new dataframe where every row is in the proper partition. It\n accomplishes this by splitting each input partition into several pieces,\n and then concatenating pieces from different input partitions into output\n partitions. If there are enough partitions then it does this work in\n stages to avoid scheduling overhead.\n\n Lets explain the motivation for this further. Imagine that we have 1000\n input partitions and 1000 output partitions. In theory we could split each\n input into 1000 pieces, and then move the 1 000 000 resulting pieces\n around, and then concatenate them all into 1000 output groups. This would\n be fine, but the central scheduling overhead of 1 000 000 tasks would\n become a bottleneck. Instead we do this in stages so that we split each of\n the 1000 inputs into 30 pieces (we now have 30 000 pieces) move those\n around, concatenate back down to 1000, and then do the same process again.\n This has the same result as the full transfer, but now we've moved data\n twice (expensive) but done so with only 60 000 tasks (cheap).\n\n Note that the `column` input may correspond to a list of columns (rather\n than just a single column name). In this case, the `shuffle_group` and\n `shuffle_group_2` functions will use hashing to map each row to an output\n partition. This approach may require the same rows to be hased multiple\n times, but avoids the need to assign a new \"_partitions\" column.\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n column: str or list\n A column name on which we want to split, commonly ``\"_partitions\"``\n which is assigned by functions upstream. This could also be a list of\n columns (in which case shuffle_group will create a hash array/column).\n max_branch: int\n The maximum number of splits per input partition. Defaults to 32.\n If there are more partitions than this then the shuffling will occur in\n stages in order to avoid creating npartitions**2 tasks\n Increasing this number increases scheduling overhead but decreases the\n number of full-dataset transfers that we have to make.\n npartitions: Optional[int]\n The desired number of output partitions\n\n Returns\n -------\n df3: dask.dataframe.DataFrame\n\n See also\n --------\n rearrange_by_column_disk: same operation, but uses partd\n rearrange_by_column: parent function that calls this or rearrange_by_column_disk\n shuffle_group: does the actual splitting per-partition\n \"\"\"\n\n max_branch = max_branch or 32\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks.if_npartitions_or_df_npa_rearrange_by_column_tasks.return.df2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_rearrange_by_column_tasks.if_npartitions_or_df_npa_rearrange_by_column_tasks.return.df2", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 668, "end_line": 753, "span_ids": ["rearrange_by_column_tasks"], "tokens": 683}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rearrange_by_column_tasks(\n df, column, max_branch=32, npartitions=None, ignore_index=False\n):\n # ... other code\n\n if (npartitions or df.npartitions) <= max_branch:\n # We are creating a small number of output partitions.\n # No need for staged shuffling. Staged shuffling will\n # sometimes require extra work/communication in this case.\n token = tokenize(df, column, npartitions)\n shuffle_name = f\"simple-shuffle-{token}\"\n npartitions = npartitions or df.npartitions\n shuffle_layer = SimpleShuffleLayer(\n shuffle_name,\n column,\n npartitions,\n df.npartitions,\n ignore_index,\n df._name,\n df._meta,\n )\n graph = HighLevelGraph.from_collections(\n shuffle_name, shuffle_layer, dependencies=[df]\n )\n return new_dd_object(graph, shuffle_name, df._meta, [None] * (npartitions + 1))\n\n n = df.npartitions\n stages = int(math.ceil(math.log(n) / math.log(max_branch)))\n if stages > 1:\n k = int(math.ceil(n ** (1 / stages)))\n else:\n k = n\n\n inputs = [tuple(digit(i, j, k) for j in range(stages)) for i in range(k**stages)]\n\n npartitions_orig = df.npartitions\n token = tokenize(df, stages, column, n, k)\n for stage in range(stages):\n stage_name = f\"shuffle-{stage}-{token}\"\n stage_layer = ShuffleLayer(\n stage_name,\n column,\n inputs,\n stage,\n npartitions,\n n,\n k,\n ignore_index,\n df._name,\n df._meta,\n )\n graph = HighLevelGraph.from_collections(\n stage_name, stage_layer, dependencies=[df]\n )\n df = new_dd_object(graph, stage_name, df._meta, df.divisions)\n\n if npartitions is not None and npartitions != npartitions_orig:\n token = tokenize(df, npartitions)\n repartition_group_token = \"repartition-group-\" + token\n\n dsk = {\n (repartition_group_token, i): (\n shuffle_group_2,\n k,\n column,\n ignore_index,\n npartitions,\n )\n for i, k in enumerate(df.__dask_keys__())\n }\n\n repartition_get_name = \"repartition-get-\" + token\n\n for p in range(npartitions):\n dsk[(repartition_get_name, p)] = (\n shuffle_group_get,\n (repartition_group_token, p % npartitions_orig),\n p,\n )\n\n graph2 = HighLevelGraph.from_collections(\n repartition_get_name, dsk, dependencies=[df]\n )\n df2 = new_dd_object(\n graph2, repartition_get_name, df._meta, [None] * (npartitions + 1)\n )\n else:\n df2 = df\n df2.divisions = (None,) * (npartitions_orig + 1)\n\n return df2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_groupby_concat_cudf_test_groupby_concat_cudf.assert_eq_res_dd_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_groupby_concat_cudf_test_groupby_concat_cudf.assert_eq_res_dd_compute_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2223, "end_line": 2270, "span_ids": ["test_groupby_concat_cudf"], "tokens": 426}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"engine\", [\"pandas\", pytest.param(\"cudf\", marks=pytest.mark.gpu)]\n)\ndef test_groupby_concat_cudf(engine):\n\n # NOTE: Issue #5643 Reproducer\n\n size = 6\n npartitions = 3\n d1 = pd.DataFrame(\n {\n \"a\": np.random.permutation(np.arange(size)),\n \"b\": np.random.randint(100, size=size),\n }\n )\n d2 = pd.DataFrame(\n {\n \"c\": np.random.permutation(np.arange(size)),\n \"d\": np.random.randint(100, size=size),\n }\n )\n\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n d1 = cudf.from_pandas(d1)\n d2 = cudf.from_pandas(d2)\n dd1 = dask_cudf.from_cudf(d1, npartitions)\n dd2 = dask_cudf.from_cudf(d2, npartitions)\n else:\n dd1 = dd.from_pandas(d1, npartitions)\n dd2 = dd.from_pandas(d2, npartitions)\n\n grouped_d1 = d1.groupby([\"a\"]).sum()\n grouped_d2 = d2.groupby([\"c\"]).sum()\n res = concat([grouped_d1, grouped_d2], axis=1)\n\n grouped_dd1 = dd1.groupby([\"a\"]).sum()\n grouped_dd2 = dd2.groupby([\"c\"]).sum()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n res_dd = dd.concat([grouped_dd1, grouped_dd2], axis=1)\n\n assert_eq(res_dd.compute().sort_index(), res.sort_index())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_columns_missing_from_left_test_categorical_merge_with_columns_missing_from_left.assert_assert_eq_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_columns_missing_from_left_test_categorical_merge_with_columns_missing_from_left.assert_assert_eq_expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2175, "end_line": 2189, "span_ids": ["test_categorical_merge_with_columns_missing_from_left"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_merge_with_columns_missing_from_left():\n df1 = pd.DataFrame({\"A\": [0, 1], \"B\": pd.Categorical([\"a\", \"b\"])})\n df2 = pd.DataFrame({\"C\": pd.Categorical([\"a\", \"b\"])})\n\n expected = pd.merge(df2, df1, left_index=True, right_on=\"A\")\n\n ddf1 = dd.from_pandas(df1, npartitions=2)\n ddf2 = dd.from_pandas(df2, npartitions=2)\n\n actual = dd.merge(ddf2, ddf1, left_index=True, right_on=\"A\").compute()\n assert actual.C.dtype == \"category\"\n assert actual.B.dtype == \"category\"\n assert actual.A.dtype == \"int64\"\n assert actual.index.dtype == \"int64\"\n assert assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts.assert_assert_eq_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts_test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts.assert_assert_eq_expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2192, "end_line": 2206, "span_ids": ["test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_merge_with_merge_column_cat_in_one_and_not_other_upcasts():\n df1 = pd.DataFrame({\"A\": pd.Categorical([0, 1]), \"B\": pd.Categorical([\"a\", \"b\"])})\n df2 = pd.DataFrame({\"C\": pd.Categorical([\"a\", \"b\"])})\n\n expected = pd.merge(df2, df1, left_index=True, right_on=\"A\")\n\n ddf1 = dd.from_pandas(df1, npartitions=2)\n ddf2 = dd.from_pandas(df2, npartitions=2)\n\n actual = dd.merge(ddf2, ddf1, left_index=True, right_on=\"A\").compute()\n assert actual.C.dtype == \"category\"\n assert actual.B.dtype == \"category\"\n assert actual.A.dtype == \"int64\"\n assert actual.index.dtype == \"int64\"\n assert assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_test_shuffle_hlg_layer.assert_dsk_dict_culled_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_test_shuffle_hlg_layer.assert_dsk_dict_culled_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1152, "end_line": 1189, "span_ids": ["test_shuffle_hlg_layer"], "tokens": 389}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shuffle_hlg_layer():\n # This test checks that the `ShuffleLayer` HLG Layer\n # is used (as expected) for a multi-stage shuffle.\n ddf = dd.from_pandas(\n pd.DataFrame({\"a\": np.random.randint(0, 10, 100)}), npartitions=10\n )\n # Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks\n ddf_shuffled = ddf.shuffle(\"a\", max_branch=3, shuffle=\"tasks\")\n keys = [(ddf_shuffled._name, i) for i in range(ddf_shuffled.npartitions)]\n\n # Cull the HLG\n dsk = ddf_shuffled.__dask_graph__()\n dsk_culled = dsk.cull(set(keys))\n assert isinstance(dsk_culled, dask.highlevelgraph.HighLevelGraph)\n\n # Ensure we have ShuffleLayers\n assert any(\n isinstance(layer, dd.shuffle.ShuffleLayer) for layer in dsk.layers.values()\n )\n\n # Check that the ShuffleLayers are non-materialized\n for layer in dsk.layers.values():\n if isinstance(layer, dd.shuffle.ShuffleLayer):\n assert not hasattr(layer, \"_cached_dict\")\n\n # Make sure HLG culling reduces the graph size\n assert len(dsk_culled) < len(dsk)\n\n # Check ShuffleLayer names\n for name, layer in dsk.layers.items():\n if isinstance(layer, dd.shuffle.ShuffleLayer):\n assert name.startswith(\"shuffle-\")\n\n # Since we already culled the HLG,\n # culling the dictionary should not change the graph\n dsk_dict = dict(dsk_culled)\n dsk_dict_culled, _ = cull(dsk_dict, keys)\n assert dsk_dict_culled == dsk_dict", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._toposort_layers_HighLevelGraph._toposort_layers.return.ret": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph._toposort_layers_HighLevelGraph._toposort_layers.return.ret", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 814, "end_line": 843, "span_ids": ["HighLevelGraph._toposort_layers"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def _toposort_layers(self):\n \"\"\"Sort the layers in a high level graph topologically\n\n Parameters\n ----------\n hlg : HighLevelGraph\n The high level graph's layers to sort\n\n Returns\n -------\n sorted: list\n List of layer names sorted topologically\n \"\"\"\n degree = {k: len(v) for k, v in self.dependencies.items()}\n reverse_deps = {k: [] for k in self.dependencies}\n ready = []\n for k, v in self.dependencies.items():\n for dep in v:\n reverse_deps[dep].append(k)\n if not v:\n ready.append(k)\n ret = []\n while len(ready) > 0:\n layer = ready.pop()\n ret.append(layer)\n for rdep in reverse_deps[layer]:\n degree[rdep] -= 1\n if degree[rdep] == 0:\n ready.append(rdep)\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.cull_HighLevelGraph.cull.return.HighLevelGraph_ret_layers": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.cull_HighLevelGraph.cull.return.HighLevelGraph_ret_layers", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 910, "end_line": 962, "span_ids": ["HighLevelGraph.cull"], "tokens": 465}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def cull(self, keys: Iterable) -> HighLevelGraph:\n \"\"\"Return new HighLevelGraph with only the tasks required to calculate keys.\n\n In other words, remove unnecessary tasks from dask.\n\n Parameters\n ----------\n keys\n iterable of keys or nested list of keys such as the output of\n ``__dask_keys__()``\n\n Returns\n -------\n hlg: HighLevelGraph\n Culled high level graph\n \"\"\"\n keys_set = set(flatten(keys))\n\n all_ext_keys = self.get_all_external_keys()\n ret_layers = {}\n ret_key_deps = {}\n for layer_name in reversed(self._toposort_layers()):\n layer = self.layers[layer_name]\n # Let's cull the layer to produce its part of `keys`.\n # Note: use .intersection rather than & because the RHS is\n # a collections.abc.Set rather than a real set, and using &\n # would take time proportional to the size of the LHS, which\n # if there is no culling can be much bigger than the RHS.\n output_keys = keys_set.intersection(layer.get_output_keys())\n if output_keys:\n culled_layer, culled_deps = layer.cull(output_keys, all_ext_keys)\n # Update `keys` with all layer's external key dependencies, which\n # are all the layer's dependencies (`culled_deps`) excluding\n # the layer's output keys.\n external_deps = set()\n for d in culled_deps.values():\n external_deps |= d\n external_deps -= culled_layer.get_output_keys()\n keys_set |= external_deps\n\n # Save the culled layer and its key dependencies\n ret_layers[layer_name] = culled_layer\n ret_key_deps.update(culled_deps)\n\n # Converting dict_keys to a real set lets Python optimise the set\n # intersection to iterate over the smaller of the two sets.\n ret_layers_keys = set(ret_layers.keys())\n ret_dependencies = {\n layer_name: self.dependencies[layer_name] & ret_layers_keys\n for layer_name in ret_layers\n }\n\n return HighLevelGraph(ret_layers, ret_dependencies, ret_key_deps)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_SubgraphCallable_with_numpy_test_SubgraphCallable_with_numpy.assert_f1_f4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_SubgraphCallable_with_numpy_test_SubgraphCallable_with_numpy.assert_f1_f4", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1140, "end_line": 1156, "span_ids": ["test_SubgraphCallable_with_numpy"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_SubgraphCallable_with_numpy():\n np = pytest.importorskip(\"numpy\")\n\n # Testing support of numpy arrays in `dsk`, which uses elementwise equalities.\n dsk1 = {\"a\": np.arange(10)}\n f1 = SubgraphCallable(dsk1, \"a\", [None], name=\"test\")\n f2 = SubgraphCallable(dsk1, \"a\", [None], name=\"test\")\n assert f1 == f2\n\n # Notice, even though `dsk1` and `dsk2` are not equal they compare equal because\n # SubgraphCallable.__eq__() only checks name, outkeys, and inkeys.\n dsk2 = {\"a\": np.arange(10) + 1}\n f3 = SubgraphCallable(dsk2, \"a\", [None], name=\"test\")\n assert f1 == f3\n\n f4 = SubgraphCallable(dsk1, \"a\", [None], name=\"test2\")\n assert f1 != f4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_dense_dense.return.d_x_height_1_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_dense_dense.return.d_x_height_1_i_", "embedding": null, "metadata": {"file_path": "docs/source/scripts/scheduling.py", "file_name": "scheduling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 41, "end_line": 51, "span_ids": ["dense"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def dense(width, height):\n \"\"\"Full barriers between each step\"\"\"\n d = {(\"x\", 0, i): i for i in range(width)}\n for j in range(1, height):\n d.update(\n {\n (\"x\", j, i): (noop, [(\"x\", j - 1, k) for k in range(width)])\n for i in range(width)\n }\n )\n return d, [(\"x\", height - 1, i) for i in range(width)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_np_", "embedding": null, "metadata": {"file_path": "docs/source/scripts/scheduling.py", "file_name": "scheduling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 54, "end_line": 127, "span_ids": ["impl:3", "impl:59"], "tokens": 534}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\n\nx = np.logspace(0, 4, 10)\ntrivial_results = dict()\nfor get in (dask.get, threaded.get, local.get_sync, multiprocessing.get):\n y = list()\n for n in x:\n dsk, keys = trivial(int(n), 5)\n start = time()\n get(dsk, keys) # type: ignore\n end = time()\n y.append(end - start)\n trivial_results[get] = np.array(y)\n\n\n########\n# Plot #\n########\n\nf, (left, right) = plt.subplots(\n nrows=1, ncols=2, sharex=True, figsize=(12, 5), squeeze=True\n)\n\nfor get in trivial_results:\n left.loglog(x * 5, trivial_results[get], label=get.__module__)\n right.loglog(x * 5, trivial_results[get] / x, label=get.__module__)\n\nleft.set_title(\"Cost for Entire graph\")\nright.set_title(\"Cost per task\")\nleft.set_ylabel(\"Duration (s)\")\nright.set_ylabel(\"Duration (s)\")\nleft.set_xlabel(\"Number of tasks\")\nright.set_xlabel(\"Number of tasks\")\n\nplt.legend()\nplt.savefig(\"images/scaling-nodes.png\")\n\n#####################\n# Crosstalk example #\n#####################\n\nx = np.linspace(1, 100, 10)\ncrosstalk_results = dict()\nfor get in [threaded.get, local.get_sync]:\n y = list()\n for n in x:\n dsk, keys = crosstalk(1000, 5, int(n))\n start = time()\n get(dsk, keys) # type: ignore\n end = time()\n y.append(end - start)\n crosstalk_results[get] = np.array(y)\n\n########\n# Plot #\n########\n\nf, (left, right) = plt.subplots(\n nrows=1, ncols=2, sharex=True, figsize=(12, 5), squeeze=True\n)\n\nfor get in crosstalk_results:\n left.plot(x, crosstalk_results[get], label=get.__module__)\n right.semilogy(x, crosstalk_results[get] / 5000.0 / x, label=get.__module__)\n\nleft.set_title(\"Cost for Entire graph\")\nright.set_title(\"Cost per edge\")\nleft.set_ylabel(\"Duration (s)\")\nright.set_ylabel(\"Duration (s)\")\nleft.set_xlabel(\"Number of edges per task\")\nright.set_xlabel(\"Number of edges per task\")\nplt.legend()\nplt.savefig(\"images/scaling-edges.png\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_ensure_minimum_chunksize_ensure_minimum_chunksize.return.tuple_output_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_ensure_minimum_chunksize_ensure_minimum_chunksize.return.tuple_output_", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 421, "end_line": 469, "span_ids": ["ensure_minimum_chunksize"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ensure_minimum_chunksize(size, chunks):\n \"\"\"Determine new chunks to ensure that every chunk >= size\n\n Parameters\n ----------\n size: int\n The maximum size of any chunk.\n chunks: tuple\n Chunks along one axis, e.g. ``(3, 3, 2)``\n\n Examples\n --------\n >>> ensure_minimum_chunksize(10, (20, 20, 1))\n (20, 11, 10)\n >>> ensure_minimum_chunksize(3, (1, 1, 3))\n (5,)\n\n See Also\n --------\n overlap\n \"\"\"\n if size <= min(chunks):\n return chunks\n\n # add too-small chunks to chunks before them\n output = []\n new = 0\n for c in chunks:\n if c < size:\n if new > size + (size - c):\n output.append(new - (size - c))\n new = size\n else:\n new += c\n if new >= size:\n output.append(new)\n new = 0\n if c >= size:\n new += c\n if new >= size:\n output.append(new)\n elif len(output) >= 1:\n output[-1] += new\n else:\n raise ValueError(\n f\"The overlapping depth {size} is larger than your \" f\"array {sum(chunks)}.\"\n )\n\n return tuple(output)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_depth_greater_than_smallest_chunk_combines_chunks_test_depth_greater_than_dim.with_pytest_raises_ValueE.overlap_darr_depth_depth": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_depth_greater_than_smallest_chunk_combines_chunks_test_depth_greater_than_dim.with_pytest_raises_ValueE.overlap_darr_depth_depth", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 602, "end_line": 626, "span_ids": ["test_depth_greater_than_smallest_chunk_combines_chunks", "test_depth_greater_than_dim"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"chunks\",\n [\n ((5, 5, 2), (5, 5, 2)),\n ((3, 3, 3, 3), (11, 1)),\n ],\n)\ndef test_depth_greater_than_smallest_chunk_combines_chunks(chunks):\n a = np.arange(144).reshape(12, 12)\n darr = da.from_array(a, chunks=chunks)\n\n depth = {0: 4, 1: 2}\n output = overlap(darr, depth=depth, boundary=1)\n\n assert all(c >= depth[0] * 2 for c in output.chunks[0])\n assert all(c >= depth[1] * 2 for c in output.chunks[1])\n\n\ndef test_depth_greater_than_dim():\n a = np.arange(144).reshape(12, 12)\n darr = da.from_array(a, chunks=(3, 5))\n\n depth = {0: 13, 1: 4}\n with pytest.raises(ValueError, match=\"The overlapping depth\"):\n overlap(darr, depth=depth, boundary=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_none_boundaries_test_none_boundaries.assert_eq_exp_res_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_none_boundaries_test_none_boundaries.assert_eq_exp_res_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 629, "end_line": 640, "span_ids": ["test_none_boundaries"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_none_boundaries():\n x = da.from_array(np.arange(16).reshape(4, 4), chunks=(2, 2))\n exp = boundaries(x, 2, {0: \"none\", 1: 33})\n res = np.array(\n [\n [33, 33, 0, 1, 2, 3, 33, 33],\n [33, 33, 4, 5, 6, 7, 33, 33],\n [33, 33, 8, 9, 10, 11, 33, 33],\n [33, 33, 12, 13, 14, 15, 33, 33],\n ]\n )\n assert_eq(exp, res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_rechunks_array_if_needed_test_map_overlap_rechunks_array_along_multiple_dims_if_needed.assert_all_all_c_2_for": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_rechunks_array_if_needed_test_map_overlap_rechunks_array_along_multiple_dims_if_needed.assert_all_all_c_2_for", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 725, "end_line": 742, "span_ids": ["test_map_overlap_rechunks_array_along_multiple_dims_if_needed", "test_map_overlap_rechunks_array_if_needed"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_rechunks_array_if_needed():\n # https://github.com/dask/dask/issues/6597\n expected = np.arange(11)\n x = da.from_array(expected, chunks=5)\n y = x.map_overlap(lambda x: x, depth=2, boundary=0)\n assert all(c >= 2 for c in y.chunks[0])\n assert_eq(y, expected)\n\n\ndef test_map_overlap_rechunks_array_along_multiple_dims_if_needed():\n # https://github.com/dask/dask/issues/6688\n rand = da.random.random((860, 1024, 1024), chunks=(1, 1024, 1024))\n filtered = rand.map_overlap(\n lambda arr: arr,\n depth=(2, 2, 2),\n boundary=\"reflect\",\n )\n assert all(all(c >= 2 for c in chunks) for chunks in filtered.chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_chunk_read_file.with_fo_as_f_.return.list_reader_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_read_chunk_read_file.with_fo_as_f_.return.list_reader_f_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 140, "end_line": 168, "span_ids": ["read_chunk", "read_file"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_chunk(fobj, off, l, head):\n \"\"\"Get rows from raw bytes block\"\"\"\n import fastavro\n\n if hasattr(fastavro, \"iter_avro\"):\n reader = fastavro.iter_avro\n else:\n reader = fastavro.reader\n\n with fobj as f:\n chunk = read_block(f, off, l, head[\"sync\"])\n head_bytes = head[\"head_bytes\"]\n if not chunk.startswith(MAGIC):\n chunk = head_bytes + chunk\n i = io.BytesIO(chunk)\n return list(reader(i))\n\n\ndef read_file(fo):\n \"\"\"Get rows from file-like\"\"\"\n import fastavro\n\n if hasattr(fastavro, \"iter_avro\"):\n reader = fastavro.iter_avro\n else:\n reader = fastavro.reader\n\n with fo as f:\n return list(reader(f))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_cupyx_register_cupyx.tensordot_lookup_register": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_cupyx_register_cupyx.tensordot_lookup_register", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 20, "end_line": 46, "span_ids": ["register_cupyx"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@tensordot_lookup.register_lazy(\"cupyx\")\n@concatenate_lookup.register_lazy(\"cupyx\")\ndef register_cupyx():\n\n from cupyx.scipy.sparse import spmatrix\n\n try:\n from cupyx.scipy.sparse import hstack, vstack\n except ImportError as e:\n raise ImportError(\n \"Stacking of sparse arrays requires at least CuPy version 8.0.0\"\n ) from e\n\n def _concat_cupy_sparse(L, axis=0):\n if axis == 0:\n return vstack(L)\n elif axis == 1:\n return hstack(L)\n else:\n msg = (\n \"Can only concatenate cupy sparse matrices for axis in \"\n \"{0, 1}. Got %s\" % axis\n )\n raise ValueError(msg)\n\n concatenate_lookup.register(spmatrix, _concat_cupy_sparse)\n tensordot_lookup.register(spmatrix, _tensordot_scipy_sparse)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_sparse_register_scipy_sparse.tensordot_lookup_register": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_sparse_register_scipy_sparse.tensordot_lookup_register", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 50, "end_line": 77, "span_ids": ["register_scipy_sparse", "register_sparse"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@tensordot_lookup.register_lazy(\"sparse\")\n@concatenate_lookup.register_lazy(\"sparse\")\ndef register_sparse():\n import sparse\n\n concatenate_lookup.register(sparse.COO, sparse.concatenate)\n tensordot_lookup.register(sparse.COO, sparse.tensordot)\n\n\n@tensordot_lookup.register_lazy(\"scipy\")\n@concatenate_lookup.register_lazy(\"scipy\")\ndef register_scipy_sparse():\n import scipy.sparse\n\n def _concatenate(L, axis=0):\n if axis == 0:\n return scipy.sparse.vstack(L)\n elif axis == 1:\n return scipy.sparse.hstack(L)\n else:\n msg = (\n \"Can only concatenate scipy sparse matrices for axis in \"\n \"{0, 1}. Got %s\" % axis\n )\n raise ValueError(msg)\n\n concatenate_lookup.register(scipy.sparse.spmatrix, _concatenate)\n tensordot_lookup.register(scipy.sparse.spmatrix, _tensordot_scipy_sparse)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py__tensordot_scipy_sparse_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py__tensordot_scipy_sparse_", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 80, "end_line": 95, "span_ids": ["_tensordot_scipy_sparse"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _tensordot_scipy_sparse(a, b, axes):\n assert a.ndim == b.ndim == 2\n assert len(axes[0]) == len(axes[1]) == 1\n (a_axis,) = axes[0]\n (b_axis,) = axes[1]\n assert a_axis in (0, 1) and b_axis in (0, 1)\n assert a.shape[a_axis] == b.shape[b_axis]\n if a_axis == 0 and b_axis == 0:\n return a.T * b\n elif a_axis == 0 and b_axis == 1:\n return a.T * b.T\n elif a_axis == 1 and b_axis == 0:\n return a * b\n elif a_axis == 1 and b_axis == 1:\n return a * b.T", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__reverse_lstsq.return.x_residuals_rank_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py__reverse_lstsq.return.x_residuals_rank_s", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1358, "end_line": 1428, "span_ids": ["_reverse", "lstsq"], "tokens": 703}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _reverse(x):\n return x[::-1]\n\n\ndef lstsq(a, b):\n \"\"\"\n Return the least-squares solution to a linear matrix equation using\n QR decomposition.\n\n Solves the equation `a x = b` by computing a vector `x` that\n minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may\n be under-, well-, or over- determined (i.e., the number of\n linearly independent rows of `a` can be less than, equal to, or\n greater than its number of linearly independent columns). If `a`\n is square and of full rank, then `x` (but for round-off error) is\n the \"exact\" solution of the equation.\n\n Parameters\n ----------\n a : (M, N) array_like\n \"Coefficient\" matrix.\n b : {(M,), (M, K)} array_like\n Ordinate or \"dependent variable\" values. If `b` is two-dimensional,\n the least-squares solution is calculated for each of the `K` columns\n of `b`.\n\n Returns\n -------\n x : {(N,), (N, K)} Array\n Least-squares solution. If `b` is two-dimensional,\n the solutions are in the `K` columns of `x`.\n residuals : {(1,), (K,)} Array\n Sums of residuals; squared Euclidean 2-norm for each column in\n ``b - a*x``.\n If `b` is 1-dimensional, this is a (1,) shape array.\n Otherwise the shape is (K,).\n rank : Array\n Rank of matrix `a`.\n s : (min(M, N),) Array\n Singular values of `a`.\n \"\"\"\n q, r = qr(a)\n x = solve_triangular(r, q.T.conj().dot(b))\n residuals = b - a.dot(x)\n residuals = abs(residuals**2).sum(axis=0, keepdims=b.ndim == 1)\n\n token = tokenize(a, b)\n\n # r must be a triangular with single block\n\n # rank\n rname = \"lstsq-rank-\" + token\n rdsk = {(rname,): (np.linalg.matrix_rank, (r.name, 0, 0))}\n graph = HighLevelGraph.from_collections(rname, rdsk, dependencies=[r])\n # rank must be an integer\n rank = Array(graph, rname, shape=(), chunks=(), dtype=int)\n\n # singular\n sname = \"lstsq-singular-\" + token\n rt = r.T.conj()\n sdsk = {\n (sname, 0): (\n _reverse,\n (np.sqrt, (np.linalg.eigvalsh, (np.dot, (rt.name, 0, 0), (r.name, 0, 0)))),\n )\n }\n graph = HighLevelGraph.from_collections(sname, sdsk, dependencies=[rt, r])\n meta = meta_from_array(residuals, 1)\n s = Array(graph, sname, shape=(r.shape[0],), chunks=r.shape[0], meta=meta)\n\n return x, residuals, rank, s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_tensordot_tensordot.if_concatenate_.else_.return.intermediate_sum_axis_lef": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_tensordot_tensordot.if_concatenate_.else_.return.intermediate_sum_axis_lef", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 222, "end_line": 270, "span_ids": ["tensordot"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef tensordot(lhs, rhs, axes=2):\n if isinstance(axes, Iterable):\n left_axes, right_axes = axes\n else:\n left_axes = tuple(range(lhs.ndim - axes, lhs.ndim))\n right_axes = tuple(range(0, axes))\n\n if isinstance(left_axes, Integral):\n left_axes = (left_axes,)\n if isinstance(right_axes, Integral):\n right_axes = (right_axes,)\n if isinstance(left_axes, list):\n left_axes = tuple(left_axes)\n if isinstance(right_axes, list):\n right_axes = tuple(right_axes)\n if len(left_axes) == 1:\n concatenate = True\n else:\n concatenate = False\n\n dt = np.promote_types(lhs.dtype, rhs.dtype)\n\n left_index = list(range(lhs.ndim))\n right_index = list(range(lhs.ndim, lhs.ndim + rhs.ndim))\n out_index = left_index + right_index\n\n for l, r in zip(left_axes, right_axes):\n out_index.remove(right_index[r])\n right_index[r] = left_index[l]\n if concatenate:\n out_index.remove(left_index[l])\n\n intermediate = blockwise(\n _tensordot,\n out_index,\n lhs,\n left_index,\n rhs,\n right_index,\n dtype=dt,\n concatenate=concatenate,\n axes=(left_axes, right_axes),\n )\n\n if concatenate:\n return intermediate\n else:\n return intermediate.sum(axis=left_axes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unravel_index_unravel_index.return.unraveled_indices": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unravel_index_unravel_index.return.unraveled_indices", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1739, "end_line": 1754, "span_ids": ["unravel_index"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef unravel_index(indices, shape, order=\"C\"):\n if shape and indices.size:\n unraveled_indices = tuple(\n indices.map_blocks(\n _unravel_index_kernel,\n dtype=np.intp,\n chunks=(((len(shape),),) + indices.chunks),\n new_axis=0,\n func_kwargs={\"shape\": shape, \"order\": order},\n )\n )\n else:\n unraveled_indices = tuple(empty((0,), dtype=np.intp, chunks=1) for i in shape)\n\n return unraveled_indices", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_optimize_blockwise_annotations_test_optimize_blockwise_annotations.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_optimize_blockwise_annotations_test_optimize_blockwise_annotations.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 233, "span_ids": ["test_optimize_blockwise_annotations"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_blockwise_annotations():\n a = da.ones(10, chunks=(5,))\n b = a + 1\n\n with dask.annotate(qux=\"foo\"):\n c = b + 2\n d = c + 3\n\n with dask.annotate(qux=\"baz\"):\n e = d + 4\n f = e + 5\n\n g = f + 6\n\n dsk = da.optimization.optimize_blockwise(g.dask)\n\n annotations = (\n layer.annotations\n for layer in dsk.layers.values()\n if isinstance(layer, Blockwise)\n )\n annotations = collections.Counter(\n tuple(a.items()) if type(a) is dict else a for a in annotations\n )\n\n assert len(annotations) == 3\n assert annotations[None] == 2\n assert annotations[((\"qux\", \"baz\"),)] == 1\n assert annotations[((\"qux\", \"foo\"),)] == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_compute_test_svd_compressed_compute.assert_eq_v_vv_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_compute_test_svd_compressed_compute.assert_eq_v_vv_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 456, "end_line": 467, "span_ids": ["test_svd_compressed_compute"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"iterator\", [\"power\", \"QR\"])\ndef test_svd_compressed_compute(iterator):\n x = da.ones((100, 100), chunks=(10, 10))\n u, s, v = da.linalg.svd_compressed(\n x, k=2, iterator=iterator, n_power_iter=1, compute=True, seed=123\n )\n uu, ss, vv = da.linalg.svd_compressed(\n x, k=2, iterator=iterator, n_power_iter=1, seed=123\n )\n\n assert len(v.dask) < len(vv.dask)\n assert_eq(v, vv)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_test_svd_compressed._v_must_be_orthonormal": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_test_svd_compressed._v_must_be_orthonormal", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 466, "end_line": 492, "span_ids": ["test_svd_compressed"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"iterator\", [(\"power\", 2), (\"QR\", 2)])\ndef test_svd_compressed(iterator):\n m, n = 100, 50\n r = 5\n a = da.random.random((m, n), chunks=(m, n))\n\n # calculate approximation and true singular values\n u, s, vt = svd_compressed(\n a, 2 * r, iterator=iterator[0], n_power_iter=iterator[1], seed=4321\n ) # worst case\n s_true = scipy.linalg.svd(a.compute(), compute_uv=False)\n\n # compute the difference with original matrix\n norm = scipy.linalg.norm((a - (u[:, :r] * s[:r]) @ vt[:r, :]).compute(), 2)\n\n # ||a-a_hat||_2 <= (1+tol)s_{k+1}: based on eq. 1.10/1.11:\n # Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp.\n # \"Finding structure with randomness: Probabilistic algorithms for constructing\n # approximate matrix decompositions.\" SIAM review 53.2 (2011): 217-288.\n frac = norm / s_true[r + 1] - 1\n # Tolerance determined via simulation to be slightly above max norm of difference matrix in 10k samples.\n # See https://github.com/dask/dask/pull/6799#issuecomment-726631175 for more details.\n tol = 0.4\n assert frac < tol\n\n assert_eq(np.eye(r, r), da.dot(u[:, :r].T, u[:, :r])) # u must be orthonormal\n assert_eq(np.eye(r, r), da.dot(vt[:r, :], vt[:r, :].T)) # v must be orthonormal", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_dtype_preservation_test_svd_dtype_preservation.assert_u_dtype_s_dtype": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_dtype_preservation_test_svd_dtype_preservation.assert_u_dtype_s_dtype", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 495, "end_line": 509, "span_ids": ["test_svd_compressed_dtype_preservation", "test_svd_dtype_preservation"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"input_dtype, output_dtype\", [(np.float32, np.float32), (np.float64, np.float64)]\n)\ndef test_svd_compressed_dtype_preservation(input_dtype, output_dtype):\n x = da.random.random((50, 50), chunks=(50, 50)).astype(input_dtype)\n u, s, vt = svd_compressed(x, 1, seed=4321)\n assert u.dtype == s.dtype == vt.dtype == output_dtype\n\n\n@pytest.mark.parametrize(\"chunks\", [(10, 50), (50, 10), (-1, -1)])\n@pytest.mark.parametrize(\"dtype\", [np.float32, np.float64])\ndef test_svd_dtype_preservation(chunks, dtype):\n x = da.random.random((50, 50), chunks=chunks).astype(dtype)\n u, s, v = svd(x)\n assert u.dtype == s.dtype == v.dtype == dtype", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_deterministic_test_svd_compressed_deterministic.assert_all_da_compute_u_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_deterministic_test_svd_compressed_deterministic.assert_all_da_compute_u_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 512, "end_line": 518, "span_ids": ["test_svd_compressed_deterministic"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_svd_compressed_deterministic():\n m, n = 30, 25\n x = da.random.RandomState(1234).random_sample(size=(m, n), chunks=(5, 5))\n u, s, vt = svd_compressed(x, 3, seed=1234)\n u2, s2, vt2 = svd_compressed(x, 3, seed=1234)\n\n assert all(da.compute((u == u2).all(), (s == s2).all(), (vt == vt2).all()))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_double_dependencies_test_fuse_roots.assert_eq_zz_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_double_dependencies_test_fuse_roots.assert_eq_zz_z_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 407, "end_line": 423, "span_ids": ["test_double_dependencies", "test_fuse_roots"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_double_dependencies():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(4, 4))\n X = d + 1\n X = da.dot(X, X.T)\n\n assert_eq(X.compute(optimize_graph=False), X)\n\n\ndef test_fuse_roots():\n x = da.ones(10, chunks=(2,))\n y = da.zeros(10, chunks=(2,))\n z = (x + 1) + (2 * y**2)\n (zz,) = dask.optimize(z)\n # assert len(zz.dask) == 5\n assert sum(map(dask.istask, zz.dask.values())) == 5 # there are some aliases\n assert_eq(zz, z)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_test_overlap.u_depth.np_uint16_2_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap_test_overlap.u_depth.np_uint16_2_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 249, "span_ids": ["test_overlap"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n g = overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: \"reflect\"})\n assert g.chunks == ((8, 8), (6, 6))\n expected = np.array(\n [\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],\n [8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],\n [16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],\n [24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],\n [32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],\n [40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],\n [48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],\n [56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n ]\n )\n assert_eq(g, expected)\n assert same_keys(g, overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: \"reflect\"}))\n\n u_depth = np.uint16([2, 1])\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap.u_depth_5_test_overlap.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_overlap.u_depth_5_test_overlap.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 250, "end_line": 284, "span_ids": ["test_overlap"], "tokens": 754}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_overlap():\n # ... other code\n u_depth = {k: v for k, v in enumerate(u_depth)}\n g = overlap(d, depth=u_depth, boundary={0: 100, 1: \"reflect\"})\n assert g.chunks == ((8, 8), (6, 6))\n assert_eq(g, expected)\n assert same_keys(g, overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: \"reflect\"}))\n\n g = overlap(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: \"none\"})\n expected = np.array(\n [\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [0, 1, 2, 3, 4, 3, 4, 5, 6, 7],\n [8, 9, 10, 11, 12, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 59, 60, 61, 62, 63],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n ]\n )\n assert_eq(g, expected)\n assert g.chunks == ((8, 8), (5, 5))\n\n u_depth = np.uint16([2, 1])\n u_depth = {k: v for k, v in enumerate(u_depth)}\n g = overlap(d, depth=u_depth, boundary={0: 100, 1: \"none\"})\n assert_eq(g, expected)\n assert g.chunks == ((8, 8), (5, 5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_not_chunked_merge_test_reshape_all_not_chunked_merge.assert_eq_result_base_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_all_not_chunked_merge_test_reshape_all_not_chunked_merge.assert_eq_result_base_re", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 130, "end_line": 151, "span_ids": ["test_reshape_all_not_chunked_merge"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"inshape, inchunks, expected_inchunks, outshape, outchunks\",\n [\n # (2, 3, 4) -> (24,). This does merge, since the second dim isn't fully chunked!\n ((2, 3, 4), ((1, 1), (1, 2), (2, 2)), ((1, 1), (3,), (4,)), (24,), ((12, 12),)),\n ],\n)\ndef test_reshape_all_not_chunked_merge(\n inshape, inchunks, expected_inchunks, outshape, outchunks\n):\n base = np.arange(np.prod(inshape)).reshape(inshape)\n a = da.from_array(base, chunks=inchunks)\n\n # test directly\n inchunks2, outchunks2 = reshape_rechunk(a.shape, outshape, inchunks)\n assert inchunks2 == expected_inchunks\n assert outchunks2 == outchunks\n\n # and via reshape\n result = a.reshape(outshape)\n assert result.chunks == outchunks\n assert_eq(result, base.reshape(outshape))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_merge_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_test_reshape_merge_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 154, "end_line": 205, "span_ids": ["test_reshape_merge_chunks"], "tokens": 766}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"inshape, inchunks, outshape, outchunks\",\n [\n # (2, 3, 4) -> (6, 4)\n ((2, 3, 4), ((2,), (1, 2), (2, 2)), (6, 4), ((1, 2, 1, 2), (2, 2))),\n # (1, 2, 3, 4) -> (12, 4)\n ((1, 2, 3, 4), ((1,), (2,), (1, 2), (2, 2)), (6, 4), ((1, 2, 1, 2), (2, 2))),\n # (2, 2, 3, 4) -> (12, 4) (3 cases)\n (\n (2, 2, 3, 4),\n ((1, 1), (2,), (1, 2), (2, 2)),\n (12, 4),\n ((1, 2, 1, 2, 1, 2, 1, 2), (2, 2)),\n ),\n (\n (2, 2, 3, 4),\n ((2,), (1, 1), (1, 2), (2, 2)),\n (12, 4),\n ((1, 2, 1, 2, 1, 2, 1, 2), (2, 2)),\n ),\n (\n (2, 2, 3, 4),\n ((2,), (2,), (1, 2), (2, 2)),\n (12, 4),\n ((1, 2, 1, 2, 1, 2, 1, 2), (2, 2)),\n ),\n # (2, 2, 3, 4) -> (4, 3, 4)\n # TODO: I'm confused about the behavior in this case.\n # (\n # (2, 2, 3, 4),\n # ((2,), (2,), (1, 2), (2, 2)),\n # (4, 3, 4),\n # ((1, 1, 1, 1), (1, 2), (2, 2)),\n # ),\n # (2, 2, 3, 4) -> (4, 3, 4)\n ((2, 2, 3, 4), ((2,), (2,), (1, 2), (4,)), (4, 3, 4), ((2, 2), (1, 2), (4,))),\n ],\n)\ndef test_reshape_merge_chunks(inshape, inchunks, outshape, outchunks):\n # https://github.com/dask/dask/issues/5544#issuecomment-712280433\n # When the early axes are completely chunked then we are just moving blocks\n # and can avoid any rechunking. The outchunks will always be ...\n base = np.arange(np.prod(inshape)).reshape(inshape)\n a = da.from_array(base, chunks=inchunks)\n\n # and via reshape\n result = a.reshape(outshape, merge_chunks=False)\n assert result.chunks == outchunks\n assert_eq(result, base.reshape(outshape))\n\n assert result.chunks != a.reshape(outshape).chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_method_test_unravel_index_empty.assert_len_d_indices_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_nonzero_method_test_unravel_index_empty.assert_len_d_indices_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1451, "end_line": 1475, "span_ids": ["test_nonzero_method", "test_unravel_index_empty"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nonzero_method():\n for shape, chunks in [(0, ()), ((0, 0), (0, 0)), ((15, 16), (4, 5))]:\n x = np.random.randint(10, size=shape)\n d = da.from_array(x, chunks=chunks)\n\n x_nz = x.nonzero()\n d_nz = d.nonzero()\n\n assert isinstance(d_nz, type(x_nz))\n assert len(d_nz) == len(x_nz)\n\n for i in range(len(x_nz)):\n assert_eq(d_nz[i], x_nz[i])\n\n\ndef test_unravel_index_empty():\n shape = tuple()\n findices = np.array(0, dtype=int)\n d_findices = da.from_array(findices, chunks=1)\n\n indices = np.unravel_index(findices, shape)\n d_indices = da.unravel_index(d_findices, shape)\n\n assert isinstance(d_indices, type(indices))\n assert len(d_indices) == len(indices) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_test_ravel_multi_index.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_test_ravel_multi_index.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1850, "end_line": 1895, "span_ids": ["test_ravel_multi_index"], "tokens": 559}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"asarray\",\n [\n lambda x: x,\n lambda x: [np.asarray(a) for a in x],\n lambda x: [da.asarray(a) for a in x],\n np.asarray,\n da.from_array,\n ],\n)\n@pytest.mark.parametrize(\n \"arr, chunks, kwargs\",\n [\n # Numpy doctests:\n ([[3, 6, 6], [4, 5, 1]], (2, 3), dict(dims=(7, 6), order=\"C\")),\n ([[3, 6, 6], [4, 5, 1]], (2, 1), dict(dims=(7, 6), order=\"F\")),\n ([[3, 6, 6], [4, 5, 1]], 1, dict(dims=(4, 6), mode=\"clip\")),\n ([[3, 6, 6], [4, 5, 1]], (2, 3), dict(dims=(4, 4), mode=(\"clip\", \"wrap\"))),\n # Shape tests:\n ([[3, 6, 6]], (1, 1), dict(dims=(7), order=\"C\")),\n ([[3, 6, 6], [4, 5, 1], [8, 6, 2]], (3, 1), dict(dims=(7, 6, 9), order=\"C\")),\n # Multi-dimensional index arrays\n (\n np.arange(6).reshape(3, 2, 1).tolist(),\n (1, 2, 1),\n dict(dims=(7, 6, 9), order=\"C\"),\n ),\n # Broadcasting index arrays\n ([1, [2, 3]], None, dict(dims=(8, 9))),\n ([1, [2, 3], [[1, 2], [3, 4], [5, 6], [7, 8]]], None, dict(dims=(8, 9, 10))),\n ],\n)\ndef test_ravel_multi_index(asarray, arr, chunks, kwargs):\n if any(np.isscalar(x) for x in arr) and asarray in (np.asarray, da.from_array):\n pytest.skip()\n\n if asarray is da.from_array:\n arr = np.asarray(arr)\n input = da.from_array(arr, chunks=chunks)\n else:\n arr = input = asarray(arr)\n\n assert_eq(\n np.ravel_multi_index(arr, **kwargs),\n da.ravel_multi_index(input, **kwargs),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_nan_raises_test_nan_raises.with_pytest_raises_NotImp.func_None_nargs_na": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_nan_raises_test_nan_raises.with_pytest_raises_NotImp.func_None_nargs_na", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 131, "span_ids": ["test_nan_raises"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func, nargs\",\n [\n (dask.array.stats.ttest_1samp, 2),\n (dask.array.stats.ttest_rel, 2),\n (dask.array.stats.skewtest, 1),\n (dask.array.stats.kurtosis, 1),\n (dask.array.stats.kurtosistest, 1),\n (dask.array.stats.normaltest, 1),\n (dask.array.stats.moment, 1),\n ],\n)\n@pytest.mark.parametrize(\"nan_policy\", [\"omit\", \"raise\"])\ndef test_nan_raises(func, nargs, nan_policy):\n with pytest.raises(NotImplementedError):\n func(*(None,) * nargs, nan_policy=nan_policy)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_test_sample_k_equal_bag_size_with_unbalanced_partitions_test_sample_return_bag.assert_isinstance_random_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_test_sample_k_equal_bag_size_with_unbalanced_partitions_test_sample_return_bag.assert_isinstance_random_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 92, "end_line": 115, "span_ids": ["test_sample_k_equal_bag_size_with_unbalanced_partitions", "test_sample_return_bag", "test_weighted_sampling_without_replacement"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sample_k_equal_bag_size_with_unbalanced_partitions():\n seq = range(10)\n sut = db.from_sequence(seq, partition_size=9)\n li = list(random.sample(sut, k=10).compute())\n assert sut.map_partitions(len).compute() == (9, 1)\n assert len(li) == 10\n assert all(i in seq for i in li)\n assert len(set(li)) == len(li)\n\n\ndef test_weighted_sampling_without_replacement():\n population = range(4)\n p = [0.01, 0.33, 0.33, 0.33]\n k = 3\n sampled = random._weighted_sampling_without_replacement(\n population=population, weights=p, k=k\n )\n assert len(set(sampled)) == k\n\n\ndef test_sample_return_bag():\n seq = range(20)\n sut = db.from_sequence(seq, npartitions=3)\n assert isinstance(random.sample(sut, k=2), db.Bag)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_test_partitions_are_coerced_to_lists_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_random.py_test_partitions_are_coerced_to_lists_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_random.py", "file_name": "test_random.py", "file_type": "text/x-python", "category": "test", "start_line": 118, "end_line": 127, "span_ids": ["test_partitions_are_coerced_to_lists"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partitions_are_coerced_to_lists():\n # https://github.com/dask/dask/issues/6906\n A = db.from_sequence([[1, 2], [3, 4, 5], [6], [7]])\n B = db.from_sequence([\"a\", \"b\", \"c\", \"d\"])\n\n a = random.choices(A.flatten(), k=B.count().compute()).repartition(4)\n\n C = db.zip(B, a).compute()\n assert len(C) == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_annotate_annotate.with_config_set_new_annot.yield": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_annotate_annotate.with_config_set_new_annot.yield", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 145, "span_ids": ["annotate"], "tokens": 788}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef annotate(**annotations):\n \"\"\"Context Manager for setting HighLevelGraph Layer annotations.\n\n Annotations are metadata or soft constraints associated with\n tasks that dask schedulers may choose to respect: They signal intent\n without enforcing hard constraints. As such, they are\n primarily designed for use with the distributed scheduler.\n\n Almost any object can serve as an annotation, but small Python objects\n are preferred, while large objects such as NumPy arrays are discouraged.\n\n Callables supplied as an annotation should take a single *key* argument and\n produce the appropriate annotation. Individual task keys in the annotated collection\n are supplied to the callable.\n\n Parameters\n ----------\n **annotations : key-value pairs\n\n Examples\n --------\n\n All tasks within array A should have priority 100 and be retried 3 times\n on failure.\n\n >>> import dask\n >>> import dask.array as da\n >>> with dask.annotate(priority=100, retries=3):\n ... A = da.ones((10000, 10000))\n\n Prioritise tasks within Array A on flattened block ID.\n\n >>> nblocks = (10, 10)\n >>> with dask.annotate(priority=lambda k: k[1]*nblocks[1] + k[2]):\n ... A = da.ones((1000, 1000), chunks=(100, 100))\n\n Annotations may be nested.\n\n >>> with dask.annotate(priority=1):\n ... with dask.annotate(retries=3):\n ... A = da.ones((1000, 1000))\n ... B = A + 1\n \"\"\"\n\n # Sanity check annotations used in place of\n # legacy distributed Client.{submit, persist, compute} keywords\n if \"workers\" in annotations:\n if isinstance(annotations[\"workers\"], (list, set, tuple)):\n annotations[\"workers\"] = list(annotations[\"workers\"])\n elif isinstance(annotations[\"workers\"], str):\n annotations[\"workers\"] = [annotations[\"workers\"]]\n elif callable(annotations[\"workers\"]):\n pass\n else:\n raise TypeError(\n \"'workers' annotation must be a sequence of str, a str or a callable, but got %s.\"\n % annotations[\"workers\"]\n )\n\n if (\n \"priority\" in annotations\n and not isinstance(annotations[\"priority\"], Number)\n and not callable(annotations[\"priority\"])\n ):\n raise TypeError(\n \"'priority' annotation must be a Number or a callable, but got %s\"\n % annotations[\"priority\"]\n )\n\n if (\n \"retries\" in annotations\n and not isinstance(annotations[\"retries\"], Number)\n and not callable(annotations[\"retries\"])\n ):\n raise TypeError(\n \"'retries' annotation must be a Number or a callable, but got %s\"\n % annotations[\"retries\"]\n )\n\n if (\n \"resources\" in annotations\n and not isinstance(annotations[\"resources\"], dict)\n and not callable(annotations[\"resources\"])\n ):\n raise TypeError(\n \"'resources' annotation must be a dict, but got %s\"\n % annotations[\"resources\"]\n )\n\n if (\n \"allow_other_workers\" in annotations\n and not isinstance(annotations[\"allow_other_workers\"], bool)\n and not callable(annotations[\"allow_other_workers\"])\n ):\n raise TypeError(\n \"'allow_other_workers' annotations must be a bool or a callable, but got %s\"\n % annotations[\"allow_other_workers\"]\n )\n\n prev_annotations = config.get(\"annotations\", {})\n new_annotations = {\n **prev_annotations,\n **{f\"annotations.{k}\": v for k, v in annotations.items()},\n }\n\n with config.set(new_annotations):\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_is_dask_collection_DaskMethodsMixin.visualize.return.visualize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_is_dask_collection_DaskMethodsMixin.visualize.return.visualize_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 215, "span_ids": ["is_dask_collection", "DaskMethodsMixin", "DaskMethodsMixin.visualize"], "tokens": 470}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def is_dask_collection(x):\n \"\"\"Returns ``True`` if ``x`` is a dask collection\"\"\"\n try:\n return x.__dask_graph__() is not None\n except (AttributeError, TypeError):\n return False\n\n\nclass DaskMethodsMixin:\n \"\"\"A mixin adding standard dask collection methods\"\"\"\n\n __slots__ = ()\n\n def visualize(self, filename=\"mydask\", format=None, optimize_graph=False, **kwargs):\n \"\"\"Render the computation of this object's task graph using graphviz.\n\n Requires ``graphviz`` to be installed.\n\n Parameters\n ----------\n filename : str or None, optional\n The name of the file to write to disk. If the provided `filename`\n doesn't include an extension, '.png' will be used by default.\n If `filename` is None, no file will be written, and we communicate\n with dot using only pipes.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n optimize_graph : bool, optional\n If True, the graph is optimized before rendering. Otherwise,\n the graph is displayed as is. Default is False.\n color: {None, 'order'}, optional\n Options to color nodes. Provide ``cmap=`` keyword for additional\n colormap\n **kwargs\n Additional keyword arguments to forward to ``to_graphviz``.\n\n Examples\n --------\n >>> x.visualize(filename='dask.pdf') # doctest: +SKIP\n >>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP\n\n Returns\n -------\n result : IPython.diplay.Image, IPython.display.SVG, or None\n See dask.dot.dot_graph for more information.\n\n See Also\n --------\n dask.base.visualize\n dask.dot.dot_graph\n\n Notes\n -----\n For more information on optimization see here:\n\n https://docs.dask.org/en/latest/optimize.html\n \"\"\"\n return visualize(\n self,\n filename=filename,\n format=format,\n optimize_graph=optimize_graph,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.get_output_keys_Blockwise.is_materialized.return.hasattr_self__cached_di": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.get_output_keys_Blockwise.is_materialized.return.hasattr_self__cached_di", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 457, "end_line": 485, "span_ids": ["Blockwise.get_output_keys", "Blockwise.__getitem__", "Blockwise.__iter__", "Blockwise.is_materialized", "Blockwise.__len__"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n def get_output_keys(self):\n if self.output_blocks:\n # Culling has already generated a list of output blocks\n return {(self.output, *p) for p in self.output_blocks}\n\n # Return all possible output keys (no culling)\n return {\n (self.output, *p)\n for p in itertools.product(\n *[range(self.dims[i]) for i in self.output_indices]\n )\n }\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self) -> int:\n # same method as `get_output_keys`, without manifesting the keys themselves\n return (\n len(self.output_blocks)\n if self.output_blocks\n else prod(self.dims[i] for i in self.output_indices)\n )\n\n def is_materialized(self):\n return hasattr(self, \"_cached_dict\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._cull_dependencies_Blockwise._cull_dependencies.return.key_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._cull_dependencies_Blockwise._cull_dependencies.return.key_deps", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 638, "end_line": 687, "span_ids": ["Blockwise._cull_dependencies"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n def _cull_dependencies(self, all_hlg_keys, output_blocks):\n \"\"\"Determine the necessary dependencies to produce `output_blocks`.\n\n This method does not require graph materialization.\n \"\"\"\n\n # Check `concatenate` option\n concatenate = None\n if self.concatenate is True:\n from dask.array.core import concatenate_axes as concatenate\n\n # Generate coordinate map\n (coord_maps, concat_axes, dummies) = _get_coord_mapping(\n self.dims,\n self.output,\n self.output_indices,\n self.numblocks,\n self.indices,\n concatenate,\n )\n\n # Gather constant dependencies (for all output keys)\n const_deps = set()\n for (arg, ind) in self.indices:\n if ind is None:\n try:\n if arg in all_hlg_keys:\n const_deps.add(arg)\n except TypeError:\n pass # unhashable\n\n # Get dependencies for each output block\n key_deps = {}\n for out_coords in output_blocks:\n deps = set()\n coords = out_coords + dummies\n for cmap, axes, (arg, ind) in zip(coord_maps, concat_axes, self.indices):\n if ind is not None and arg not in self.io_deps:\n arg_coords = tuple(coords[c] for c in cmap)\n if axes:\n tups = lol_product((arg,), arg_coords)\n deps.update(flatten(tups))\n if concatenate:\n tups = (concatenate, tups, axes)\n else:\n tups = (arg,) + arg_coords\n deps.add(tups)\n key_deps[(self.output,) + out_coords] = deps | const_deps\n\n return key_deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__get_coord_mapping_make_blockwise_graph": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__get_coord_mapping_make_blockwise_graph", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 770, "end_line": 1101, "span_ids": ["_get_coord_mapping", "make_blockwise_graph"], "tokens": 821}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_coord_mapping(\n dims,\n output,\n out_indices,\n numblocks,\n argpairs,\n concatenate,\n):\n \"\"\"Calculate coordinate mapping for graph construction.\n\n This function handles the high-level logic behind Blockwise graph\n construction. The output is a tuple containing: The mapping between\n input and output block coordinates (`coord_maps`), the axes along\n which to concatenate for each input (`concat_axes`), and the dummy\n indices needed for broadcasting (`dummies`).\n\n Used by `make_blockwise_graph` and `Blockwise._cull_dependencies`.\n\n Parameters\n ----------\n dims : dict\n Mapping between each index specified in `argpairs` and\n the number of output blocks for that index. Corresponds\n to the Blockwise `dims` attribute.\n output : str\n Corresponds to the Blockwise `output` attribute.\n out_indices : tuple\n Corresponds to the Blockwise `output_indices` attribute.\n numblocks : dict\n Corresponds to the Blockwise `numblocks` attribute.\n argpairs : tuple\n Corresponds to the Blockwise `indices` attribute.\n concatenate : bool\n Corresponds to the Blockwise `concatenate` attribute.\n \"\"\"\n\n block_names = set()\n all_indices = set()\n for name, ind in argpairs:\n if ind is not None:\n block_names.add(name)\n for x in ind:\n all_indices.add(x)\n assert set(numblocks) == block_names\n\n dummy_indices = all_indices - set(out_indices)\n\n # For each position in the output space, we'll construct a\n # \"coordinate set\" that consists of\n # - the output indices\n # - the dummy indices\n # - the dummy indices, with indices replaced by zeros (for broadcasting), we\n # are careful to only emit a single dummy zero when concatenate=True to not\n # concatenate the same array with itself several times.\n # - a 0 to assist with broadcasting.\n\n index_pos, zero_pos = {}, {}\n for i, ind in enumerate(out_indices):\n index_pos[ind] = i\n zero_pos[ind] = -1\n\n _dummies_list = []\n for i, ind in enumerate(dummy_indices):\n index_pos[ind] = 2 * i + len(out_indices)\n zero_pos[ind] = 2 * i + 1 + len(out_indices)\n reps = 1 if concatenate else dims[ind]\n _dummies_list.append([list(range(dims[ind])), [0] * reps])\n\n # ([0, 1, 2], [0, 0, 0], ...) For a dummy index of dimension 3\n dummies = tuple(itertools.chain.from_iterable(_dummies_list))\n dummies += (0,)\n\n # For each coordinate position in each input, gives the position in\n # the coordinate set.\n coord_maps = []\n\n # Axes along which to concatenate, for each input\n concat_axes = []\n for arg, ind in argpairs:\n if ind is not None:\n coord_maps.append(\n [\n zero_pos[i] if nb == 1 else index_pos[i]\n for i, nb in zip(ind, numblocks[arg])\n ]\n )\n concat_axes.append([n for n, i in enumerate(ind) if i in dummy_indices])\n else:\n coord_maps.append(None)\n concat_axes.append(None)\n\n return coord_maps, concat_axes, dummies\n\n\ndef make_blockwise_graph(\n func,\n output,\n out_indices,\n *arrind_pairs,\n numblocks=None,\n concatenate=None,\n new_axes=None,\n output_blocks=None,\n dims=None,\n deserializing=False,\n func_future_args=None,\n return_key_deps=False,\n io_deps=None,\n **kwargs,\n):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__make_dims_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__make_dims_", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1513, "end_line": 1579, "span_ids": ["_make_dims", "fuse_roots"], "tokens": 503}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _make_dims(indices, numblocks, new_axes):\n \"\"\"Returns a dictionary mapping between each index specified in\n `indices` and the number of output blocks for that indice.\n \"\"\"\n dims = broadcast_dimensions(indices, numblocks)\n for k, v in new_axes.items():\n dims[k] = len(v) if isinstance(v, tuple) else 1\n return dims\n\n\ndef fuse_roots(graph: HighLevelGraph, keys: list):\n \"\"\"\n Fuse nearby layers if they don't have dependencies\n\n Often Blockwise sections of the graph fill out all of the computation\n except for the initial data access or data loading layers::\n\n Large Blockwise Layer\n | | |\n X Y Z\n\n This can be troublesome because X, Y, and Z tasks may be executed on\n different machines, and then require communication to move around.\n\n This optimization identifies this situation, lowers all of the graphs to\n concrete dicts, and then calls ``fuse`` on them, with a width equal to the\n number of layers like X, Y, and Z.\n\n This is currently used within array and dataframe optimizations.\n\n Parameters\n ----------\n graph : HighLevelGraph\n The full graph of the computation\n keys : list\n The output keys of the computation, to be passed on to fuse\n\n See Also\n --------\n Blockwise\n fuse\n \"\"\"\n layers = ensure_dict(graph.layers, copy=True)\n dependencies = ensure_dict(graph.dependencies, copy=True)\n dependents = reverse_dict(dependencies)\n\n for name, layer in graph.layers.items():\n deps = graph.dependencies[name]\n if (\n isinstance(layer, Blockwise)\n and len(deps) > 1\n and not any(dependencies[dep] for dep in deps) # no need to fuse if 0 or 1\n and all(len(dependents[dep]) == 1 for dep in deps)\n and all(layer.annotations == graph.layers[dep].annotations for dep in deps)\n ):\n new = toolz.merge(layer, *[layers[dep] for dep in deps])\n new, _ = fuse(new, keys, ave_width=len(deps))\n\n for dep in deps:\n del layers[dep]\n del dependencies[dep]\n\n layers[name] = new\n dependencies[name] = set()\n\n return HighLevelGraph(layers, dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_check_deprecations_check_deprecations.if_key_in_deprecations_.else_.return.key": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_check_deprecations_check_deprecations.if_key_in_deprecations_.else_.return.key", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 610, "end_line": 651, "span_ids": ["check_deprecations"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_deprecations(key: str, deprecations: dict = deprecations) -> str:\n \"\"\"Check if the provided value has been renamed or removed\n\n Parameters\n ----------\n key : str\n The configuration key to check\n deprecations : Dict[str, str]\n The mapping of aliases\n\n Examples\n --------\n >>> deprecations = {\"old_key\": \"new_key\", \"invalid\": None}\n >>> check_deprecations(\"old_key\", deprecations=deprecations) # doctest: +SKIP\n UserWarning: Configuration key \"old_key\" has been deprecated. Please use \"new_key\" instead.\n\n >>> check_deprecations(\"invalid\", deprecations=deprecations)\n Traceback (most recent call last):\n ...\n ValueError: Configuration value \"invalid\" has been removed\n\n >>> check_deprecations(\"another_key\", deprecations=deprecations)\n 'another_key'\n\n Returns\n -------\n new: str\n The proper key, whether the original (if no deprecation) or the aliased\n value\n \"\"\"\n if key in deprecations:\n new = deprecations[key]\n if new:\n warnings.warn(\n 'Configuration key \"{}\" has been deprecated. '\n 'Please use \"{}\" instead'.format(key, new)\n )\n return new\n else:\n raise ValueError(f'Configuration value \"{key}\" has been removed')\n else:\n return key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_serialize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_serialize_", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 654, "end_line": 703, "span_ids": ["impl:22", "serialize", "deserialize", "_initialize"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def serialize(data: Any) -> str:\n \"\"\"Serialize config data into a string.\n\n Typically used to pass config via the ``DASK_INTERNAL_INHERIT_CONFIG`` environment variable.\n\n Parameters\n ----------\n data: json-serializable object\n The data to serialize\n\n Returns\n -------\n serialized_data: str\n The serialized data as a string\n\n \"\"\"\n return base64.urlsafe_b64encode(json.dumps(data).encode()).decode()\n\n\ndef deserialize(data: str) -> Any:\n \"\"\"De-serialize config data into the original object.\n\n Typically when receiving config via the ``DASK_INTERNAL_INHERIT_CONFIG`` environment variable.\n\n Parameters\n ----------\n data: str\n String serialized by :func:`dask.config.serialize`\n\n Returns\n -------\n deserialized_data: obj\n The de-serialized data\n\n \"\"\"\n return json.loads(base64.urlsafe_b64decode(data.encode()).decode())\n\n\ndef _initialize() -> None:\n fn = os.path.join(os.path.dirname(__file__), \"dask.yaml\")\n\n with open(fn) as f:\n _defaults = yaml.safe_load(f)\n\n update_defaults(_defaults)\n\n\nrefresh()\n_initialize()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.skew__Frame.skew.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.skew__Frame.skew.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2203, "end_line": 2244, "span_ids": ["_Frame.skew"], "tokens": 364}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def skew(\n self, axis=None, bias=True, nan_policy=\"propagate\", out=None, numeric_only=None\n ):\n \"\"\"\n .. note::\n\n This implementation follows the dask.array.stats implementation\n of skewness and calculates skewness without taking into account\n a bias term for finite sample size, which corresponds to the\n default settings of the scipy.stats skewness calculation. However,\n Pandas corrects for this, so the values differ by a factor of\n (n * (n - 1)) ** 0.5 / (n - 2), where n is the number of samples.\n\n Further, this method currently does not support filtering out NaN\n values, which is again a difference to Pandas.\n \"\"\"\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"skew\")\n meta = self._meta_nonempty.skew()\n if axis == 1:\n result = map_partitions(\n M.skew,\n self,\n meta=meta,\n token=self._token_prefix + \"skew\",\n axis=axis,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n if self.ndim == 1:\n result = self._skew_1d(self, bias=bias, nan_policy=nan_policy)\n return handle_out(out, result)\n else:\n result = self._skew_numeric(bias=bias, nan_policy=nan_policy)\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._skew_1d__Frame._skew_1d.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._skew_1d__Frame._skew_1d.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2090, "end_line": 2115, "span_ids": ["_Frame._skew_1d"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _skew_1d(self, column, bias=True, nan_policy=\"propagate\"):\n \"\"\"1D version of the skew calculation.\n\n Uses the array version from da.stats in case we are passing in a single series\n \"\"\"\n # import depends on scipy, not installed by default\n from ..array import stats as da_stats\n\n if pd.Int64Dtype.is_dtype(column._meta_nonempty):\n column = column.astype(\"f8\")\n\n if not np.issubdtype(column.dtype, np.number):\n column = column.astype(\"f8\")\n\n name = self._token_prefix + \"skew-1d-\" + tokenize(column)\n\n array_skew = da_stats.skew(\n column.values, axis=0, bias=bias, nan_policy=nan_policy\n )\n\n layer = {(name, 0): (methods.wrap_skew_reduction, (array_skew._name,), None)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew])\n\n return new_dd_object(\n graph, name, column._meta_nonempty.skew(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._skew_numeric__Frame._skew_numeric.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._skew_numeric__Frame._skew_numeric.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2117, "end_line": 2148, "span_ids": ["_Frame._skew_numeric"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _skew_numeric(self, bias=True, nan_policy=\"propagate\"):\n \"\"\"Method for dataframes with numeric columns.\n\n Maps the array version from da.stats onto the numeric array of columns.\n \"\"\"\n # import depends on scipy, not installed by default\n from ..array import stats as da_stats\n\n num = self.select_dtypes(include=[\"number\", \"bool\"], exclude=[np.timedelta64])\n\n values_dtype = num.values.dtype\n array_values = num.values\n\n if not np.issubdtype(values_dtype, np.number):\n array_values = num.values.astype(\"f8\")\n\n array_skew = da_stats.skew(\n array_values, axis=0, bias=bias, nan_policy=nan_policy\n )\n\n name = self._token_prefix + \"var-numeric\" + tokenize(num)\n cols = num._meta.columns if is_dataframe_like(num) else None\n\n skew_shape = num._meta_nonempty.values.var(axis=0).shape\n array_skew_name = (array_skew._name,) + (0,) * len(skew_shape)\n\n layer = {(name, 0): (methods.wrap_skew_reduction, array_skew_name, cols)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew])\n\n return new_dd_object(\n graph, name, num._meta_nonempty.skew(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.drop_DataFrame.drop.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.drop_DataFrame.drop.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4140, "end_line": 4150, "span_ids": ["DataFrame.drop"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def drop(self, labels=None, axis=0, columns=None, errors=\"raise\"):\n axis = self._validate_axis(axis)\n if axis == 0 and columns is not None:\n # Columns must be specified if axis==0\n return self.map_partitions(drop_by_shallow_copy, columns, errors=errors)\n elif axis == 1:\n return self.map_partitions(drop_by_shallow_copy, labels, errors=errors)\n raise NotImplementedError(\n \"Drop currently only works for axis=1 or when columns is not None\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.aggregate_SeriesGroupBy.unique.return.self__aca_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.aggregate_SeriesGroupBy.unique.return.self__aca_agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1915, "end_line": 1950, "span_ids": ["SeriesGroupBy.aggregate", "SeriesGroupBy.unique", "SeriesGroupBy.value_counts", "SeriesGroupBy.agg"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SeriesGroupBy(_GroupBy):\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def aggregate(self, arg, split_every=None, split_out=1):\n result = super().aggregate(arg, split_every=split_every, split_out=split_out)\n if self._slice:\n result = result[self._slice]\n\n if not isinstance(arg, (list, dict)) and isinstance(result, DataFrame):\n result = result[result.columns[0]]\n\n return result\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def agg(self, arg, split_every=None, split_out=1):\n return self.aggregate(arg, split_every=split_every, split_out=split_out)\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def value_counts(self, split_every=None, split_out=1):\n return self._aca_agg(\n token=\"value_counts\",\n func=_value_counts,\n aggfunc=_value_counts_aggregate,\n split_every=split_every,\n split_out=split_out,\n )\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def unique(self, split_every=None, split_out=1):\n name = self._meta.obj.name\n return self._aca_agg(\n token=\"unique\",\n func=M.unique,\n aggfunc=_unique_aggregate,\n aggregate_kwargs={\"name\": name},\n split_every=split_every,\n split_out=split_out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__unique_aggregate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__unique_aggregate_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2140, "end_line": 2179, "span_ids": ["_value_counts", "_head_chunk", "_head_aggregate", "_value_counts_aggregate", "_tail_aggregate", "_tail_chunk", "_unique_aggregate"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _unique_aggregate(series_gb, name=None):\n ret = type(series_gb.obj)(\n {k: v.explode().unique() for k, v in series_gb}, name=name\n )\n ret.index.names = series_gb.obj.index.names\n return ret\n\n\ndef _value_counts(x, **kwargs):\n if len(x):\n return M.value_counts(x, **kwargs)\n else:\n return pd.Series(dtype=int)\n\n\ndef _value_counts_aggregate(series_gb):\n to_concat = {k: v.groupby(level=1).sum() for k, v in series_gb}\n names = list(series_gb.obj.index.names)\n return pd.Series(pd.concat(to_concat, names=names))\n\n\ndef _tail_chunk(series_gb, **kwargs):\n keys, groups = zip(*series_gb) if len(series_gb) else ((True,), (series_gb,))\n return pd.concat([group.tail(**kwargs) for group in groups], keys=keys)\n\n\ndef _tail_aggregate(series_gb, **kwargs):\n levels = kwargs.pop(\"index_levels\")\n return series_gb.tail(**kwargs).droplevel(list(range(levels)))\n\n\ndef _head_chunk(series_gb, **kwargs):\n keys, groups = zip(*series_gb) if len(series_gb) else ((True,), (series_gb,))\n return pd.concat([group.head(**kwargs) for group in groups], keys=keys)\n\n\ndef _head_aggregate(series_gb, **kwargs):\n levels = kwargs.pop(\"index_levels\")\n return series_gb.head(**kwargs).droplevel(list(range(levels)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas_read_pandas.if_compression_not_in_com.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas_read_pandas.if_compression_not_in_com.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 457, "end_line": 537, "span_ids": ["read_pandas"], "tokens": 771}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_pandas(\n reader,\n urlpath,\n blocksize=\"default\",\n lineterminator=None,\n compression=\"infer\",\n sample=256000,\n sample_rows=10,\n enforce=False,\n assume_missing=False,\n storage_options=None,\n include_path_column=False,\n **kwargs,\n):\n reader_name = reader.__name__\n if lineterminator is not None and len(lineterminator) == 1:\n kwargs[\"lineterminator\"] = lineterminator\n else:\n lineterminator = \"\\n\"\n if include_path_column and isinstance(include_path_column, bool):\n include_path_column = \"path\"\n if \"index\" in kwargs or \"index_col\" in kwargs:\n raise ValueError(\n \"Keywords 'index' and 'index_col' not supported. \"\n f\"Use dd.{reader_name}(...).set_index('my-index') instead\"\n )\n for kw in [\"iterator\", \"chunksize\"]:\n if kw in kwargs:\n raise ValueError(f\"{kw} not supported for dd.{reader_name}\")\n if kwargs.get(\"nrows\", None):\n raise ValueError(\n \"The 'nrows' keyword is not supported by \"\n \"`dd.{0}`. To achieve the same behavior, it's \"\n \"recommended to use `dd.{0}(...).\"\n \"head(n=nrows)`\".format(reader_name)\n )\n if isinstance(kwargs.get(\"skiprows\"), int):\n skiprows = lastskiprow = firstrow = kwargs.get(\"skiprows\")\n elif kwargs.get(\"skiprows\") is None:\n skiprows = lastskiprow = firstrow = 0\n else:\n # When skiprows is a list, we expect more than max(skiprows) to\n # be included in the sample. This means that [0,2] will work well,\n # but [0, 440] might not work.\n skiprows = set(kwargs.get(\"skiprows\"))\n lastskiprow = max(skiprows)\n # find the firstrow that is not skipped, for use as header\n firstrow = min(set(range(len(skiprows) + 1)) - set(skiprows))\n if isinstance(kwargs.get(\"header\"), list):\n raise TypeError(f\"List of header rows not supported for dd.{reader_name}\")\n if isinstance(kwargs.get(\"converters\"), dict) and include_path_column:\n path_converter = kwargs.get(\"converters\").get(include_path_column, None)\n else:\n path_converter = None\n\n # If compression is \"infer\", inspect the (first) path suffix and\n # set the proper compression option if the suffix is recongnized.\n if compression == \"infer\":\n # Translate the input urlpath to a simple path list\n paths = get_fs_token_paths(urlpath, mode=\"rb\", storage_options=storage_options)[\n 2\n ]\n\n # Infer compression from first path\n compression = infer_compression(paths[0])\n\n if blocksize == \"default\":\n blocksize = AUTO_BLOCKSIZE\n if isinstance(blocksize, str):\n blocksize = parse_bytes(blocksize)\n if blocksize and compression:\n # NONE of the compressions should use chunking\n warn(\n \"Warning %s compression does not support breaking apart files\\n\"\n \"Please ensure that each individual file can fit in memory and\\n\"\n \"use the keyword ``blocksize=None to remove this message``\\n\"\n \"Setting ``blocksize=None``\" % compression\n )\n blocksize = None\n if compression not in compr:\n raise NotImplementedError(\"Compression format %s not installed\" % compression)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas_from_pandas._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas_from_pandas._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 134, "end_line": 200, "span_ids": ["from_pandas"], "tokens": 752}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):\n \"\"\"\n Construct a Dask DataFrame from a Pandas DataFrame\n\n This splits an in-memory Pandas dataframe into several parts and constructs\n a dask.dataframe from those parts on which Dask.dataframe can operate in\n parallel. By default, the input dataframe will be sorted by the index to\n produce cleanly-divided partitions (with known divisions). To preserve the\n input ordering, make sure the input index is monotonically-increasing. The\n ``sort=False`` option will also avoid reordering, but will not result in\n known divisions.\n\n Note that, despite parallelism, Dask.dataframe may not always be faster\n than Pandas. We recommend that you stay with Pandas for as long as\n possible before switching to Dask.dataframe.\n\n Parameters\n ----------\n data : pandas.DataFrame or pandas.Series\n The DataFrame/Series with which to construct a Dask DataFrame/Series\n npartitions : int, optional\n The number of partitions of the index to create. Note that depending on\n the size and index of the dataframe, the output may have fewer\n partitions than requested.\n chunksize : int, optional\n The number of rows per index partition to use.\n sort: bool\n Sort the input by index first to obtain cleanly divided partitions\n (with known divisions). If False, the input will not be sorted, and\n all divisions will be set to None. Default is True.\n name: string, optional\n An optional keyname for the dataframe. Defaults to hashing the input\n\n Returns\n -------\n dask.DataFrame or dask.Series\n A dask DataFrame/Series partitioned along the index\n\n Examples\n --------\n >>> from dask.dataframe import from_pandas\n >>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),\n ... index=pd.date_range(start='20100101', periods=6))\n >>> ddf = from_pandas(df, npartitions=3)\n >>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE\n (Timestamp('2010-01-01 00:00:00', freq='D'),\n Timestamp('2010-01-03 00:00:00', freq='D'),\n Timestamp('2010-01-05 00:00:00', freq='D'),\n Timestamp('2010-01-06 00:00:00', freq='D'))\n >>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!\n >>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE\n (Timestamp('2010-01-01 00:00:00', freq='D'),\n Timestamp('2010-01-03 00:00:00', freq='D'),\n Timestamp('2010-01-05 00:00:00', freq='D'),\n Timestamp('2010-01-06 00:00:00', freq='D'))\n\n Raises\n ------\n TypeError\n If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is\n passed in.\n\n See Also\n --------\n from_array : Construct a dask.DataFrame from an array that has record dtype\n read_csv : Construct a dask.DataFrame from a CSV file\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas.if_isinstance_getattr_dat_from_pandas.return.new_dd_object_dsk_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_pandas.if_isinstance_getattr_dat_from_pandas.return.new_dd_object_dsk_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 201, "end_line": 234, "span_ids": ["from_pandas"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_pandas(data, npartitions=None, chunksize=None, sort=True, name=None):\n if isinstance(getattr(data, \"index\", None), pd.MultiIndex):\n raise NotImplementedError(\"Dask does not support MultiIndex Dataframes.\")\n\n if not has_parallel_type(data):\n raise TypeError(\"Input must be a pandas DataFrame or Series\")\n\n if (npartitions is None) == (chunksize is None):\n raise ValueError(\"Exactly one of npartitions and chunksize must be specified.\")\n\n nrows = len(data)\n\n if chunksize is None:\n chunksize = int(ceil(nrows / npartitions))\n\n name = name or (\"from_pandas-\" + tokenize(data, chunksize))\n\n if not nrows:\n return new_dd_object({(name, 0): data}, name, data, [None, None])\n\n if sort and not data.index.is_monotonic_increasing:\n data = data.sort_index(ascending=True)\n if sort:\n divisions, locations = sorted_division_locations(\n data.index, chunksize=chunksize\n )\n else:\n locations = list(range(0, nrows, chunksize)) + [len(data)]\n divisions = [None] * len(locations)\n\n dsk = {\n (name, i): data.iloc[start:stop]\n for i, (start, stop) in enumerate(zip(locations[:-1], locations[1:]))\n }\n return new_dd_object(dsk, name, data, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py____append_row_groups.try_.except_RuntimeError_as_er.if_requires_equal_schema.else_.raise_err": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py____append_row_groups.try_.except_RuntimeError_as_er.if_requires_equal_schema.else_.raise_err", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 37, "end_line": 59, "span_ids": ["imports", "_append_row_groups"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#\n# Helper Utilities\n#\n\n\ndef _append_row_groups(metadata, md):\n \"\"\"Append row-group metadata and include a helpful\n error message if an inconsistent schema is detected.\n\n Used by `ArrowDatasetEngine` and `ArrowLegacyEngine`.\n \"\"\"\n try:\n metadata.append_row_groups(md)\n except RuntimeError as err:\n if \"requires equal schemas\" in str(err):\n raise RuntimeError(\n \"Schemas are inconsistent, try using \"\n '`to_parquet(..., schema=\"infer\")`, or pass an explicit '\n \"pyarrow schema. Such as \"\n '`to_parquet(..., schema={\"column1\": pa.string()})`'\n ) from err\n else:\n raise err", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__write_partitioned__write_partitioned.return.md_list": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__write_partitioned__write_partitioned.return.md_list", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 71, "end_line": 134, "span_ids": ["_write_partitioned"], "tokens": 481}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _write_partitioned(\n table,\n df,\n root_path,\n filename,\n partition_cols,\n fs,\n pandas_to_arrow_table,\n preserve_index,\n index_cols=(),\n **kwargs,\n):\n \"\"\"Write table to a partitioned dataset with pyarrow.\n\n Logic copied from pyarrow.parquet.\n (arrow/python/pyarrow/parquet.py::write_to_dataset)\n\n Used by `ArrowDatasetEngine` (and by `ArrowLegacyEngine`,\n through inherited `write_partition` method).\n\n TODO: Remove this in favor of pyarrow's `write_to_dataset`\n once ARROW-8244 is addressed.\n \"\"\"\n fs.mkdirs(root_path, exist_ok=True)\n\n if preserve_index:\n df.reset_index(inplace=True)\n df = df[table.schema.names]\n\n index_cols = list(index_cols) if index_cols else []\n preserve_index = False\n if index_cols:\n df.set_index(index_cols, inplace=True)\n preserve_index = True\n\n partition_keys = [df[col] for col in partition_cols]\n data_df = df.drop(partition_cols, axis=\"columns\")\n data_cols = df.columns.drop(partition_cols)\n if len(data_cols) == 0 and not index_cols:\n raise ValueError(\"No data left to save outside partition columns\")\n\n subschema = table.schema\n for col in table.schema.names:\n if col in partition_cols:\n subschema = subschema.remove(subschema.get_field_index(col))\n\n md_list = []\n for keys, subgroup in data_df.groupby(partition_keys):\n if not isinstance(keys, tuple):\n keys = (keys,)\n subdir = fs.sep.join(\n [f\"{name}={val}\" for name, val in zip(partition_cols, keys)]\n )\n subtable = pandas_to_arrow_table(\n subgroup, preserve_index=preserve_index, schema=subschema\n )\n prefix = fs.sep.join([root_path, subdir])\n fs.mkdirs(prefix, exist_ok=True)\n full_path = fs.sep.join([prefix, filename])\n with fs.open(full_path, \"wb\") as f:\n pq.write_table(subtable, f, metadata_collector=md_list, **kwargs)\n md_list[-1].set_file_path(fs.sep.join([subdir, filename]))\n\n return md_list", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__index_in_schema__index_in_schema.if_index_and_schema_is_no.else_._No_index_to_check": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__index_in_schema__index_in_schema.if_index_and_schema_is_no.else_._No_index_to_check", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 119, "end_line": 132, "span_ids": ["_index_in_schema"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _index_in_schema(index, schema):\n \"\"\"Simple utility to check if all `index` columns are included\n in the known `schema`.\n\n Used by `ArrowDatasetEngine` (and by `ArrowLegacyEngine`,\n through inherited `write_partition` method).\n \"\"\"\n if index and schema is not None:\n # Make sure all index columns are in user-defined schema\n return len(set(index).intersection(schema.names)) == len(index)\n elif index:\n return True # Schema is not user-specified, all good\n else:\n return False # No index to check", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_PartitionObj_PartitionObj.__init__.self.keys.sorted_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_PartitionObj_PartitionObj.__init__.self.keys.sorted_keys_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 135, "end_line": 148, "span_ids": ["PartitionObj", "PartitionObj.__init__"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PartitionObj:\n \"\"\"Simple object to provide a `name` and `keys` attribute\n for a single partition column. `ArrowDatasetEngine` will use\n a list of these objects to \"duck type\" a `ParquetPartitions`\n object (used in `ArrowLegacyEngine`). The larger purpose of this\n class is to allow the same `read_partition` definition to handle\n both Engine instances.\n\n Used by `ArrowDatasetEngine` only.\n \"\"\"\n\n def __init__(self, name, keys):\n self.name = name\n self.keys = sorted(keys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__get_rg_statistics__get_rg_statistics.if_subset_stats_supported.else_.return.row_group_statistics": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__get_rg_statistics__get_rg_statistics.if_subset_stats_supported.else_.return.row_group_statistics", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 417, "end_line": 453, "span_ids": ["_get_rg_statistics"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_rg_statistics(row_group, col_indices):\n \"\"\"Custom version of pyarrow's RowGroupInfo.statistics method\n (https://github.com/apache/arrow/blob/master/python/pyarrow/_dataset.pyx)\n\n We use col_indices to specify the specific subset of columns\n that we need statistics for. This is more optimal than the\n upstream `RowGroupInfo.statistics` method, which will return\n statistics for all columns.\n \"\"\"\n\n if subset_stats_supported:\n\n def name_stats(i):\n col = row_group.metadata.column(i)\n\n stats = col.statistics\n if stats is None or not stats.has_min_max:\n return None, None\n\n name = col.path_in_schema\n field_index = row_group.schema.get_field_index(name)\n if field_index < 0:\n return None, None\n\n return col.path_in_schema, {\n \"min\": stats.min,\n \"max\": stats.max,\n }\n\n return {\n name: stats\n for name, stats in map(name_stats, col_indices.values())\n if stats is not None\n }\n\n else:\n return row_group.statistics", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__need_fragments__need_fragments.return.bool_filtered_cols_part": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__need_fragments__need_fragments.return.bool_filtered_cols_part", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 284, "end_line": 298, "span_ids": ["_need_fragments"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _need_fragments(filters, partition_keys):\n # Check if we need to generate a fragment for filtering.\n # We only need to do this if we are applying filters to\n # columns that were not already filtered by \"partition\".\n\n partition_cols = (\n {v[0] for v in flatten(partition_keys, container=list) if len(v)}\n if partition_keys\n else set()\n )\n filtered_cols = (\n {v[0] for v in flatten(filters, container=list) if len(v)} if filters else set()\n )\n\n return bool(filtered_cols - partition_cols)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.read_partition_ArrowDatasetEngine.read_partition.index_in_columns_and_parts.set_df_index_names_issub": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.read_partition_ArrowDatasetEngine.read_partition.index_in_columns_and_parts.set_df_index_names_issub", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 364, "end_line": 465, "span_ids": ["ArrowDatasetEngine.read_partition"], "tokens": 698}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def read_partition(\n cls,\n fs,\n pieces,\n columns,\n index,\n categories=(),\n partitions=(),\n filters=None,\n schema=None,\n **kwargs,\n ):\n \"\"\"Read in a single output partition.\n\n This method is also used by `ArrowLegacyEngine`.\n \"\"\"\n if isinstance(index, list):\n for level in index:\n # unclear if we can use set ops here. I think the order matters.\n # Need the membership test to avoid duplicating index when\n # we slice with `columns` later on.\n if level not in columns:\n columns.append(level)\n\n # Ensure `columns` and `partitions` do not overlap\n columns_and_parts = columns.copy()\n if not isinstance(partitions, (list, tuple)):\n if columns_and_parts and partitions:\n for part_name in partitions.partition_names:\n if part_name in columns:\n columns.remove(part_name)\n else:\n columns_and_parts.append(part_name)\n columns = columns or None\n\n # Always convert pieces to list\n if not isinstance(pieces, list):\n pieces = [pieces]\n\n tables = []\n multi_read = len(pieces) > 1\n for piece in pieces:\n\n if isinstance(piece, str):\n # `piece` is a file-path string\n path_or_frag = piece\n row_group = None\n partition_keys = None\n else:\n # `piece` contains (path, row_group, partition_keys)\n (path_or_frag, row_group, partition_keys) = piece\n\n # Convert row_group to a list and be sure to\n # check if msgpack converted it to a tuple\n if isinstance(row_group, tuple):\n row_group = list(row_group)\n if not isinstance(row_group, list):\n row_group = [row_group]\n\n # Read in arrow table and convert to pandas\n arrow_table = cls._read_table(\n path_or_frag,\n fs,\n row_group,\n columns,\n schema,\n filters,\n partitions,\n partition_keys,\n **kwargs,\n )\n if multi_read:\n tables.append(arrow_table)\n\n if multi_read:\n arrow_table = pa.concat_tables(tables)\n\n # Convert to pandas\n df = cls._arrow_table_to_pandas(arrow_table, categories, **kwargs)\n\n # For pyarrow.dataset api, need to convert partition columns\n # to categorigal manually for integer types.\n if partitions and isinstance(partitions, list):\n for partition in partitions:\n if df[partition.name].dtype.name != \"category\":\n # We read directly from fragments, so the partition\n # columns are already in our dataframe. We just\n # need to convert non-categorical types.\n df[partition.name] = pd.Series(\n pd.Categorical(\n categories=partition.keys,\n values=df[partition.name].values,\n ),\n index=df.index,\n )\n\n # Note that `to_pandas(ignore_metadata=False)` means\n # pyarrow will use the pandas metadata to set the index.\n index_in_columns_and_parts = set(df.index.names).issubset(\n set(columns_and_parts)\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.read_partition.if_not_index__ArrowDatasetEngine.read_partition.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.read_partition.if_not_index__ArrowDatasetEngine.read_partition.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 466, "end_line": 488, "span_ids": ["ArrowDatasetEngine.read_partition"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def read_partition(\n cls,\n fs,\n pieces,\n columns,\n index,\n categories=(),\n partitions=(),\n filters=None,\n schema=None,\n **kwargs,\n ):\n # ... other code\n if not index:\n if index_in_columns_and_parts:\n # User does not want to set index and a desired\n # column/partition has been set to the index\n df.reset_index(drop=False, inplace=True)\n else:\n # User does not want to set index and an\n # \"unwanted\" column has been set to the index\n df.reset_index(drop=True, inplace=True)\n else:\n if set(df.index.names) != set(index) and index_in_columns_and_parts:\n # The wrong index has been set and it contains\n # one or more desired columns/partitions\n df.reset_index(drop=False, inplace=True)\n elif index_in_columns_and_parts:\n # The correct index has already been set\n index = False\n columns_and_parts = list(set(columns_and_parts) - set(df.index.names))\n df = df[list(columns_and_parts)]\n\n if index:\n df = df.set_index(index)\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._pandas_to_arrow_table_ArrowDatasetEngine.write_partition.if_return_metadata_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._pandas_to_arrow_table_ArrowDatasetEngine.write_partition.if_return_metadata_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 682, "end_line": 761, "span_ids": ["ArrowDatasetEngine.write_partition", "ArrowDatasetEngine._pandas_to_arrow_table"], "tokens": 489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _pandas_to_arrow_table(\n cls, df: pd.DataFrame, preserve_index=False, schema=None\n ) -> pa.Table:\n table = pa.Table.from_pandas(\n df, nthreads=1, preserve_index=preserve_index, schema=schema\n )\n return table\n\n @classmethod\n def write_partition(\n cls,\n df,\n path,\n fs,\n filename,\n partition_on,\n return_metadata,\n fmd=None,\n compression=None,\n index_cols=None,\n schema=None,\n head=False,\n custom_metadata=None,\n **kwargs,\n ):\n _meta = None\n preserve_index = False\n if _index_in_schema(index_cols, schema):\n df.set_index(index_cols, inplace=True)\n preserve_index = True\n else:\n index_cols = []\n\n t = cls._pandas_to_arrow_table(df, preserve_index=preserve_index, schema=schema)\n if custom_metadata:\n _md = t.schema.metadata\n _md.update(custom_metadata)\n t = t.replace_schema_metadata(metadata=_md)\n\n if partition_on:\n md_list = _write_partitioned(\n t,\n df,\n path,\n filename,\n partition_on,\n fs,\n cls._pandas_to_arrow_table,\n preserve_index,\n index_cols=index_cols,\n compression=compression,\n **kwargs,\n )\n if md_list:\n _meta = md_list[0]\n for i in range(1, len(md_list)):\n _append_row_groups(_meta, md_list[i])\n else:\n md_list = []\n with fs.open(fs.sep.join([path, filename]), \"wb\") as fil:\n pq.write_table(\n t,\n fil,\n compression=compression,\n metadata_collector=md_list,\n **kwargs,\n )\n if md_list:\n _meta = md_list[0]\n _meta.set_file_path(filename)\n # Return the schema needed to write the metadata\n if return_metadata:\n d = {\"meta\": _meta}\n if head:\n # Only return schema if this is the \"head\" partition\n d[\"schema\"] = t.schema\n return [d]\n else:\n return []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.write_metadata_ArrowDatasetEngine.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.write_metadata_ArrowDatasetEngine.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 763, "end_line": 791, "span_ids": ["ArrowDatasetEngine.write_metadata"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs):\n schema = parts[0][0].get(\"schema\", None)\n parts = [p for p in parts if p[0][\"meta\"] is not None]\n if parts:\n if not append:\n # Get only arguments specified in the function\n common_metadata_path = fs.sep.join([path, \"_common_metadata\"])\n keywords = getargspec(pq.write_metadata).args\n kwargs_meta = {k: v for k, v in kwargs.items() if k in keywords}\n with fs.open(common_metadata_path, \"wb\") as fil:\n pq.write_metadata(schema, fil, **kwargs_meta)\n\n # Aggregate metadata and write to _metadata file\n metadata_path = fs.sep.join([path, \"_metadata\"])\n if append and meta is not None:\n _meta = meta\n i_start = 0\n else:\n _meta = parts[0][0][\"meta\"]\n i_start = 1\n for i in range(i_start, len(parts)):\n _append_row_groups(_meta, parts[i][0][\"meta\"])\n with fs.open(metadata_path, \"wb\") as fil:\n _meta.write_metadata_file(fil)\n\n #\n # Private Class Methods\n #", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_None_7__get_dataset_object.return.dataset_base_fns": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_None_7__get_dataset_object.return.dataset_base_fns", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1629, "end_line": 1679, "span_ids": ["ArrowDatasetEngine.aggregate_metadata", "_get_dataset_object"], "tokens": 560}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#\n# PyArrow Legacy API [PyArrow<1.0.0]\n#\n\n\ndef _get_dataset_object(paths, fs, filters, dataset_kwargs):\n \"\"\"Generate a ParquetDataset object\"\"\"\n kwargs = dataset_kwargs.copy()\n ignore_metadata_file = kwargs.pop(\"ignore_metadata_file\", False)\n if ignore_metadata_file:\n raise ValueError(\"ignore_metadata_file not supported for ArrowLegacyEngine.\")\n\n if \"validate_schema\" not in kwargs:\n kwargs[\"validate_schema\"] = False\n if len(paths) > 1:\n # This is a list of files\n paths, base, fns = _sort_and_analyze_paths(paths, fs)\n proxy_metadata = None\n if \"_metadata\" in fns:\n # We have a _metadata file. PyArrow cannot handle\n # \"_metadata\" when `paths` is a list. So, we shuld\n # open \"_metadata\" separately.\n paths.remove(fs.sep.join([base, \"_metadata\"]))\n fns.remove(\"_metadata\")\n with fs.open(fs.sep.join([base, \"_metadata\"]), mode=\"rb\") as fil:\n proxy_metadata = pq.ParquetFile(fil).metadata\n # Create our dataset from the list of data files.\n # Note #1: that this will not parse all the files (yet)\n # Note #2: Cannot pass filters for legacy pyarrow API (see issue#6512).\n # We can handle partitions + filtering for list input after\n # adopting new pyarrow.dataset API.\n dataset = pq.ParquetDataset(paths, filesystem=fs, **kwargs)\n if proxy_metadata:\n dataset.metadata = proxy_metadata\n elif fs.isdir(paths[0]):\n # This is a directory. We can let pyarrow do its thing.\n # Note: In the future, it may be best to avoid listing the\n # directory if we can get away with checking for the\n # existence of _metadata. Listing may be much more\n # expensive in storage systems like S3.\n allpaths = fs.glob(paths[0] + fs.sep + \"*\")\n allpaths, base, fns = _sort_and_analyze_paths(allpaths, fs)\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, filters=filters, **kwargs)\n else:\n # This is a single file. No danger in gathering statistics\n # and/or splitting row-groups without a \"_metadata\" file\n base = paths[0]\n fns = [None]\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, **kwargs)\n\n return dataset, base, fns", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._gather_metadata.if_dataset_metadata__ArrowLegacyEngine._gather_metadata.if_dataset_metadata_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._gather_metadata.if_dataset_metadata__ArrowLegacyEngine._gather_metadata.if_dataset_metadata_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1750, "end_line": 1829, "span_ids": ["ArrowLegacyEngine._gather_metadata"], "tokens": 579}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _gather_metadata(\n cls,\n paths,\n fs,\n split_row_groups,\n gather_statistics,\n filters,\n index,\n dataset_kwargs,\n ):\n # ... other code\n if dataset.metadata:\n # We have a _metadata file.\n # PyArrow already did the work for us\n schema = dataset.metadata.schema.to_arrow_schema()\n if gather_statistics is None:\n gather_statistics = True\n if split_row_groups is None:\n split_row_groups = True\n return (\n schema,\n dataset.metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n )\n else:\n # No _metadata file.\n # May need to collect footer metadata manually\n if dataset.schema is not None:\n schema = dataset.schema.to_arrow_schema()\n else:\n schema = None\n if gather_statistics is None:\n gather_statistics = False\n if split_row_groups is None:\n split_row_groups = False\n metadata = None\n if not (split_row_groups or gather_statistics):\n # Don't need to construct real metadata if\n # we are not gathering statistics or splitting\n # by row-group\n metadata = [p.path for p in dataset.pieces]\n if schema is None:\n schema = dataset.pieces[0].get_metadata().schema.to_arrow_schema()\n return (\n schema,\n metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n )\n # We have not detected a _metadata file, and the user has specified\n # that they want to split by row-group and/or gather statistics.\n # This is the only case where we MUST scan all files to collect\n # metadata.\n if len(dataset.pieces) > 1:\n # Perform metadata collection in parallel.\n metadata = create_metadata_file(\n [p.path for p in dataset.pieces],\n root_dir=base,\n engine=cls,\n out_dir=False,\n fs=fs,\n )\n if schema is None:\n schema = metadata.schema.to_arrow_schema()\n else:\n for piece, fn in zip(dataset.pieces, fns):\n md = piece.get_metadata()\n if schema is None:\n schema = md.schema.to_arrow_schema()\n if fn_partitioned:\n md.set_file_path(piece.path.replace(base + fs.sep, \"\"))\n elif fn:\n md.set_file_path(fn)\n if metadata:\n _append_row_groups(metadata, md)\n else:\n metadata = md\n\n return (\n schema,\n metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.collect_file_metadata_Engine.collect_file_metadata.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.collect_file_metadata_Engine.collect_file_metadata.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 198, "end_line": 216, "span_ids": ["Engine.collect_file_metadata"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def collect_file_metadata(cls, path, fs, file_path):\n \"\"\"\n Collect parquet metadata from a file and set the file_path.\n\n Parameters\n ----------\n path: str\n Parquet-file path to extract metadata from.\n fs: FileSystem\n file_path: str\n Relative path to set as `file_path` in the metadata.\n\n Returns\n -------\n A metadata object. The specific type should be recognized\n by the aggregate_metadata method.\n \"\"\"\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.aggregate_metadata_Engine.aggregate_metadata.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py_Engine.aggregate_metadata_Engine.aggregate_metadata.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 218, "end_line": 241, "span_ids": ["Engine.aggregate_metadata"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Engine:\n\n @classmethod\n def aggregate_metadata(cls, meta_list, fs, out_path):\n \"\"\"\n Aggregate a list of metadata objects and optionally\n write out the final result as a _metadata file.\n\n Parameters\n ----------\n meta_list: list\n List of metadata objects to be aggregated into a single\n metadata object, and optionally written to disk. The\n specific element type can be engine specific.\n fs: FileSystem\n out_path: str or None\n Directory to write the final _metadata file. If None\n is specified, the aggregated metadata will be returned,\n and nothing will be written to disk.\n\n Returns\n -------\n If out_path is None, an aggregate metadata object is returned.\n Otherwise, None is returned.\n \"\"\"\n raise NotImplementedError()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_with_multiple_partitions_per_file_test_read_csv_include_path_column_with_multiple_partitions_per_file.with_filetexts_files_mod.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_include_path_column_with_multiple_partitions_per_file_test_read_csv_include_path_column_with_multiple_partitions_per_file.with_filetexts_files_mod.None_4", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 480, "end_line": 493, "span_ids": ["test_read_csv_include_path_column_with_multiple_partitions_per_file"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,files\", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]\n)\ndef test_read_csv_include_path_column_with_multiple_partitions_per_file(dd_read, files):\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\", blocksize=\"10B\", include_path_column=True)\n assert df.npartitions > 3\n assert df.path.dtype == \"category\"\n assert has_known_categories(df.path)\n\n dfs = dd_read(\"2014-01-*.csv\", blocksize=\"10B\", include_path_column=True)\n result = dfs.compute()\n assert result.path.dtype == \"category\"\n assert has_known_categories(result.path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_compression_test_read_csv_compression.with_filetexts_renamed_fi.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_compression_test_read_csv_compression.with_filetexts_renamed_fi.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 720, "end_line": 740, "span_ids": ["test_read_csv_compression"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"blocksize\", [None, 10])\n@pytest.mark.parametrize(\"fmt\", compression_fmts)\ndef test_read_csv_compression(fmt, blocksize):\n if fmt and fmt not in compress:\n pytest.skip(\"compress function not provided for %s\" % fmt)\n suffix = {\"gzip\": \".gz\", \"bz2\": \".bz2\", \"zip\": \".zip\", \"xz\": \".xz\"}.get(fmt, \"\")\n files2 = valmap(compress[fmt], csv_files) if fmt else csv_files\n renamed_files = {k + suffix: v for k, v in files2.items()}\n with filetexts(renamed_files, mode=\"b\"):\n # This test is using `compression=\"infer\"` (the default) for\n # read_csv. The paths must have the appropriate extension.\n if fmt and blocksize:\n with pytest.warns(UserWarning):\n df = dd.read_csv(\"2014-01-*.csv\" + suffix, blocksize=blocksize)\n else:\n df = dd.read_csv(\"2014-01-*.csv\" + suffix, blocksize=blocksize)\n assert_eq(\n df.compute(scheduler=\"sync\").reset_index(drop=True),\n expected.reset_index(drop=True),\n check_dtype=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_engine_write_read_engines.return.pytest_mark_parametrize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_engine_write_read_engines.return.pytest_mark_parametrize_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 98, "end_line": 141, "span_ids": ["write_read_engines", "engine"], "tokens": 389}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture(\n params=[\n pytest.param(\"fastparquet\", marks=FASTPARQUET_MARK),\n pytest.param(\"pyarrow-legacy\", marks=PYARROW_LE_MARK),\n pytest.param(\"pyarrow-dataset\", marks=PYARROW_DS_MARK),\n ]\n)\ndef engine(request):\n return request.param\n\n\ndef write_read_engines(**kwargs):\n \"\"\"Product of both engines for write/read:\n\n To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,\n or `mark_engine=reason` to apply to all parameters with that engine.\"\"\"\n backends = {\"pyarrow-dataset\", \"pyarrow-legacy\", \"fastparquet\"}\n\n # Skip if uninstalled\n skip_marks = {\n \"fastparquet\": FASTPARQUET_MARK,\n \"pyarrow-legacy\": PYARROW_LE_MARK,\n \"pyarrow-dataset\": PYARROW_DS_MARK,\n }\n marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}\n\n # Custom marks\n for kw, val in kwargs.items():\n kind, rest = kw.split(\"_\", 1)\n key = tuple(rest.split(\"_\"))\n if kind not in (\"xfail\", \"skip\") or len(key) > 2 or set(key) - backends:\n raise ValueError(\"unknown keyword %r\" % kw)\n val = getattr(pytest.mark, kind)(reason=val)\n if len(key) == 2:\n marks[key].append(val)\n else:\n for k in marks:\n if key in k:\n marks[k].append(val)\n\n return pytest.mark.parametrize(\n (\"write_engine\", \"read_engine\"),\n [pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pyarrow_fastparquet_msg_if_.else_.fp_pandas_xfail.write_read_engines_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_pyarrow_fastparquet_msg_if_.else_.fp_pandas_xfail.write_read_engines_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 144, "end_line": 173, "span_ids": ["impl:65"], "tokens": 335}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "pyarrow_fastparquet_msg = \"pyarrow schema and pandas metadata may disagree\"\nwrite_read_engines_xfail = write_read_engines(\n **{\n \"xfail_pyarrow-dataset_fastparquet\": pyarrow_fastparquet_msg,\n \"xfail_pyarrow-legacy_fastparquet\": pyarrow_fastparquet_msg,\n }\n)\n\nif (\n fastparquet\n and fastparquet_version < parse_version(\"0.5\")\n and PANDAS_GT_110\n and not PANDAS_GT_121\n):\n # a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned\n # categorical columns when using fastparquet 0.4.x, but this was (accidentally)\n # fixed in fastparquet 0.5.0\n fp_pandas_msg = \"pandas with fastparquet engine does not preserve index\"\n fp_pandas_xfail = write_read_engines(\n **{\n \"xfail_pyarrow-dataset_fastparquet\": pyarrow_fastparquet_msg,\n \"xfail_pyarrow-legacy_fastparquet\": pyarrow_fastparquet_msg,\n \"xfail_fastparquet_fastparquet\": fp_pandas_msg,\n \"xfail_fastparquet_pyarrow-dataset\": fp_pandas_msg,\n \"xfail_fastparquet_pyarrow-legacy\": fp_pandas_msg,\n }\n )\nelse:\n fp_pandas_msg = \"pandas with fastparquet engine does not preserve index\"\n fp_pandas_xfail = write_read_engines()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_local_test_local.for_column_in_df_columns_.assert_data_column_o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_local_test_local.for_column_in_df_columns_.assert_data_column_o", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 197, "end_line": 226, "span_ids": ["test_local"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_local(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df = dd.from_pandas(data, chunksize=500)\n\n df.to_parquet(tmp, write_index=False, engine=write_engine)\n\n files = os.listdir(tmp)\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"part.0.parquet\" in files\n\n df2 = dd.read_parquet(tmp, index=False, engine=read_engine)\n\n assert len(df2.divisions) > 1\n\n out = df2.compute(scheduler=\"sync\").reset_index()\n\n for column in df.columns:\n assert (data[column] == out[column]).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categorical_test_categorical.assert_df_x_ddf2_x_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categorical_test_categorical.assert_df_x_ddf2_x_co", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 586, "end_line": 611, "span_ids": ["test_categorical"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_categorical(tmpdir, write_engine, read_engine):\n tmp = str(tmpdir)\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\"] * 100}, dtype=\"category\")\n ddf = dd.from_pandas(df, npartitions=3)\n dd.to_parquet(ddf, tmp, engine=write_engine)\n\n ddf2 = dd.read_parquet(tmp, categories=\"x\", engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n ddf2 = dd.read_parquet(tmp, categories=[\"x\"], engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n # autocat\n if read_engine == \"fastparquet\":\n ddf2 = dd.read_parquet(tmp, engine=read_engine)\n assert ddf2.compute().x.cat.categories.tolist() == [\"a\", \"b\", \"c\"]\n\n ddf2.loc[:1000].compute()\n assert assert_eq(df, ddf2)\n\n # dereference cats\n ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)\n\n ddf2.loc[:1000].compute()\n assert (df.x == ddf2.x.compute()).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_unnamed_index_test_categories_unnamed_index.assert_eq_ddf_index_ddf2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_categories_unnamed_index_test_categories_unnamed_index.assert_eq_ddf_index_ddf2", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1027, "end_line": 1042, "span_ids": ["test_categories_unnamed_index"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categories_unnamed_index(tmpdir, engine):\n # Check that we can handle an unnamed categorical index\n # https://github.com/dask/dask/issues/6885\n\n tmpdir = str(tmpdir)\n\n df = pd.DataFrame(\n data={\"A\": [1, 2, 3], \"B\": [\"a\", \"a\", \"b\"]}, index=[\"x\", \"y\", \"y\"]\n )\n ddf = dd.from_pandas(df, npartitions=1)\n ddf = ddf.categorize(columns=[\"B\"])\n\n ddf.to_parquet(tmpdir, engine=engine)\n ddf2 = dd.read_parquet(tmpdir, engine=engine)\n\n assert_eq(ddf.index, ddf2.index, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filtering_pyarrow_dataset_test_filtering_pyarrow_dataset.assert_eq_df_ddf2_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filtering_pyarrow_dataset_test_filtering_pyarrow_dataset.assert_eq_df_ddf2_comput", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1391, "end_line": 1414, "span_ids": ["test_filtering_pyarrow_dataset"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_filtering_pyarrow_dataset(tmpdir, engine):\n pytest.importorskip(\"pyarrow\", minversion=\"1.0.0\")\n\n fn = str(tmpdir)\n df = pd.DataFrame({\"aa\": range(100), \"bb\": [\"cat\", \"dog\"] * 50})\n ddf = dd.from_pandas(df, npartitions=10)\n ddf.to_parquet(fn, write_index=False, engine=engine)\n\n # Filtered read\n aa_lim = 40\n bb_val = \"dog\"\n filters = [[(\"aa\", \"<\", aa_lim), (\"bb\", \"==\", bb_val)]]\n ddf2 = dd.read_parquet(fn, index=False, engine=\"pyarrow-dataset\", filters=filters)\n\n # Check that partitions are filetered for \"aa\" filter\n nonempty = 0\n for part in ddf[ddf[\"aa\"] < aa_lim].partitions:\n nonempty += int(len(part.compute()) > 0)\n assert ddf2.npartitions == nonempty\n\n # Check that rows are filtered for \"aa\" and \"bb\" filters\n df = df[df[\"aa\"] < aa_lim]\n df = df[df[\"bb\"] == bb_val]\n assert_eq(df, ddf2.compute(), check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_no_meta_test_read_glob_yes_meta.assert_eq_ddf_ddf2_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_no_meta_test_read_glob_yes_meta.assert_eq_ddf_ddf2_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2234, "end_line": 2252, "span_ids": ["test_read_glob_yes_meta", "test_read_glob_no_meta"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_read_glob_no_meta(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n\n ddf2 = dd.read_parquet(\n os.path.join(tmp_path, \"*.parquet\"), engine=read_engine, gather_statistics=False\n )\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\n@write_read_engines()\ndef test_read_glob_yes_meta(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n paths = glob.glob(os.path.join(tmp_path, \"*.parquet\"))\n paths.append(os.path.join(tmp_path, \"_metadata\"))\n ddf2 = dd.read_parquet(paths, engine=read_engine, gather_statistics=False)\n assert_eq(ddf, ddf2, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_multi_test_getitem_optimization_multi.assert_eq_a3_b3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_multi_test_getitem_optimization_multi.assert_eq_a3_b3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2454, "end_line": 2469, "span_ids": ["test_getitem_optimization_multi"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_optimization_multi(tmpdir, engine):\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n fn = os.path.join(str(tmpdir))\n ddf.to_parquet(fn, engine=engine)\n\n a = dd.read_parquet(fn, engine=engine)[\"B\"]\n b = dd.read_parquet(fn, engine=engine)[[\"C\"]]\n c = dd.read_parquet(fn, engine=engine)[[\"C\", \"A\"]]\n\n a1, a2, a3 = dask.compute(a, b, c)\n b1, b2, b3 = dask.compute(a, b, c, optimize_graph=False)\n\n assert_eq(a1, b1)\n assert_eq(a2, b2)\n assert_eq(a3, b3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_getitem_skip_when_getting_read_parquet_test_read_parquet_getitem_skip_when_getting_read_parquet.assert_subgraph_columns_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_parquet_getitem_skip_when_getting_read_parquet_test_read_parquet_getitem_skip_when_getting_read_parquet.assert_subgraph_columns_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2796, "end_line": 2812, "span_ids": ["test_read_parquet_getitem_skip_when_getting_read_parquet"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_parquet_getitem_skip_when_getting_read_parquet(tmpdir, engine):\n # https://github.com/dask/dask/issues/5893\n pdf = pd.DataFrame({\"A\": [1, 2, 3, 4, 5, 6], \"B\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]})\n path = os.path.join(str(tmpdir), \"data.parquet\")\n pd_engine = \"pyarrow\" if engine.startswith(\"pyarrow\") else \"fastparquet\"\n pdf.to_parquet(path, engine=pd_engine)\n\n ddf = dd.read_parquet(path, engine=engine)\n a, b = dask.optimize(ddf[\"A\"], ddf)\n\n # Make sure we are still allowing the getitem optimization\n ddf = ddf[\"A\"]\n dsk = optimize_dataframe_getitem(ddf.dask, keys=[(ddf._name, 0)])\n read = [key for key in dsk.layers if key.startswith(\"read-parquet\")][0]\n subgraph = dsk.layers[read]\n assert isinstance(subgraph, DataFrameIOLayer)\n assert subgraph.columns == [\"A\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_multi_partition_none_index_false_test_multi_partition_none_index_false.assert_eq_ddf1_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_multi_partition_none_index_false_test_multi_partition_none_index_false.assert_eq_ddf1_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2998, "end_line": 3014, "span_ids": ["test_multi_partition_none_index_false"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_multi_partition_none_index_false(tmpdir, engine):\n if engine.startswith(\"pyarrow\"):\n pytest.importorskip(\"pyarrow\", minversion=\"0.15.0\")\n write_engine = \"pyarrow\"\n else:\n assert engine == \"fastparquet\"\n write_engine = \"fastparquet\"\n\n # Write dataset without dask.to_parquet\n ddf1 = ddf.reset_index(drop=True)\n for i, part in enumerate(ddf1.partitions):\n path = tmpdir.join(f\"test.{i}.parquet\")\n part.compute().to_parquet(str(path), engine=write_engine)\n\n # Read back with index=False\n ddf2 = dd.read_parquet(str(tmpdir), index=False, engine=engine)\n assert_eq(ddf1, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_simple_test_pyarrow_dataset_simple.assert_eq_ddf_read_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_simple_test_pyarrow_dataset_simple.assert_eq_ddf_read_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3252, "end_line": 3261, "span_ids": ["test_pyarrow_dataset_simple"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_pyarrow_dataset_simple(tmpdir, engine):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [4, 5, 6], \"b\": [\"a\", \"b\", \"b\"]})\n df.set_index(\"a\", inplace=True, drop=True)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=engine)\n read_df = dd.read_parquet(fn, engine=\"pyarrow-dataset\")\n read_df.compute()\n assert_eq(ddf, read_df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_partitioned_test_pyarrow_dataset_partitioned.if_test_filter_.else_.assert_eq_ddf_read_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_partitioned_test_pyarrow_dataset_partitioned.if_test_filter_.else_.assert_eq_ddf_read_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3264, "end_line": 3281, "span_ids": ["test_pyarrow_dataset_partitioned"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\"test_filter\", [True, False])\ndef test_pyarrow_dataset_partitioned(tmpdir, engine, test_filter):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [4, 5, 6], \"b\": [\"a\", \"b\", \"b\"]})\n df[\"b\"] = df[\"b\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=engine, partition_on=\"b\")\n read_df = dd.read_parquet(\n fn,\n engine=\"pyarrow\",\n filters=[(\"b\", \"==\", \"a\")] if test_filter else None,\n )\n\n if test_filter:\n assert_eq(ddf[ddf[\"b\"] == \"a\"].compute(), read_df.compute())\n else:\n assert_eq(ddf, read_df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_test_parquet_pyarrow_write_empty_metadata.assert_pandas_metadata_ge": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_test_parquet_pyarrow_write_empty_metadata.assert_pandas_metadata_ge", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3163, "end_line": 3202, "span_ids": ["test_parquet_pyarrow_write_empty_metadata"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_parquet_pyarrow_write_empty_metadata(tmpdir):\n # https://github.com/dask/dask/issues/6600\n tmpdir = str(tmpdir)\n\n df_a = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [], \"y\": []}, dtype=(\"int\", \"int\")\n )\n df_b = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 1, 2, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n df_c = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 2, 1, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df = dd.from_delayed([df_a, df_b, df_c])\n\n try:\n df.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=False,\n )\n\n except AttributeError:\n pytest.fail(\"Unexpected AttributeError\")\n\n # Check that metadata files where written\n files = os.listdir(tmpdir)\n assert \"_metadata\" in files\n assert \"_common_metadata\" in files\n\n # Check that the schema includes pandas_metadata\n schema_common = pq.ParquetFile(\n os.path.join(tmpdir, \"_common_metadata\")\n ).schema.to_arrow_schema()\n pandas_metadata = schema_common.pandas_metadata\n assert pandas_metadata\n assert pandas_metadata.get(\"index_columns\", False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_append_test_parquet_pyarrow_write_empty_metadata_append.df2_to_parquet_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_parquet_pyarrow_write_empty_metadata_append_test_parquet_pyarrow_write_empty_metadata_append.df2_to_parquet_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3205, "end_line": 3239, "span_ids": ["test_parquet_pyarrow_write_empty_metadata_append"], "tokens": 335}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_parquet_pyarrow_write_empty_metadata_append(tmpdir):\n # https://github.com/dask/dask/issues/6600\n tmpdir = str(tmpdir)\n\n df_a = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 1, 2, 2], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n df_b = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [1, 2, 1, 2], \"y\": [2, 0, 2, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df1 = dd.from_delayed([df_a, df_b])\n df1.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=False,\n )\n\n df_c = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [], \"y\": []}, dtype=(\"int64\", \"int64\")\n )\n df_d = dask.delayed(pd.DataFrame.from_dict)(\n {\"x\": [3, 3, 4, 4], \"y\": [1, 0, 1, 0]}, dtype=(\"int64\", \"int64\")\n )\n\n df2 = dd.from_delayed([df_c, df_d])\n df2.to_parquet(\n tmpdir,\n engine=\"pyarrow\",\n partition_on=[\"x\"],\n append=True,\n ignore_divisions=True,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_create_metadata_file_test_create_metadata_file.assert_fmd_num_row_groups": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_create_metadata_file_test_create_metadata_file.assert_fmd_num_row_groups", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3242, "end_line": 3298, "span_ids": ["test_create_metadata_file"], "tokens": 505}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\"partition_on\", [None, \"a\"])\n@write_read_engines()\ndef test_create_metadata_file(tmpdir, write_engine, read_engine, partition_on):\n tmpdir = str(tmpdir)\n\n # Write ddf without a _metadata file\n df1 = pd.DataFrame({\"b\": range(100), \"a\": [\"A\", \"B\", \"C\", \"D\"] * 25})\n df1.index.name = \"myindex\"\n ddf1 = dd.from_pandas(df1, npartitions=10)\n ddf1.to_parquet(\n tmpdir,\n write_metadata_file=False,\n partition_on=partition_on,\n engine=write_engine,\n )\n\n # Add global _metadata file\n if partition_on:\n fns = glob.glob(os.path.join(tmpdir, partition_on + \"=*/*.parquet\"))\n else:\n fns = glob.glob(os.path.join(tmpdir, \"*.parquet\"))\n dd.io.parquet.create_metadata_file(\n fns,\n engine=\"pyarrow\",\n split_every=3, # Force tree reduction\n )\n\n # Check that we can now read the ddf\n # with the _metadata file present\n ddf2 = dd.read_parquet(\n tmpdir,\n gather_statistics=True,\n split_row_groups=False,\n engine=read_engine,\n index=\"myindex\", # python-3.6 CI\n )\n if partition_on:\n ddf1 = df1.sort_values(\"b\")\n ddf2 = ddf2.compute().sort_values(\"b\")\n ddf2.a = ddf2.a.astype(\"object\")\n assert_eq(ddf1, ddf2)\n\n # Check if we can avoid writing an actual file\n fmd = dd.io.parquet.create_metadata_file(\n fns,\n engine=\"pyarrow\",\n split_every=3, # Force tree reduction\n out_dir=False, # Avoid writing file\n )\n\n # Check that the in-memory metadata is the same as\n # the metadata in the file.\n fmd_file = pq.ParquetFile(os.path.join(tmpdir, \"_metadata\")).metadata\n assert fmd.num_rows == fmd_file.num_rows\n assert fmd.num_columns == fmd_file.num_columns\n assert fmd.num_row_groups == fmd_file.num_row_groups", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_write_overwrite_is_true_test_read_write_overwrite_is_true.assert_len_files_ddf2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_write_overwrite_is_true_test_read_write_overwrite_is_true.assert_len_files_ddf2", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3205, "end_line": 3230, "span_ids": ["test_read_write_overwrite_is_true"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_write_overwrite_is_true(tmpdir, engine):\n # https://github.com/dask/dask/issues/6824\n\n # Create a Dask DataFrame if size (100, 10) with 5 partitions and write to local\n ddf = dd.from_pandas(\n pd.DataFrame(\n np.random.randint(low=0, high=100, size=(100, 10)),\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"],\n ),\n npartitions=5,\n )\n ddf = ddf.reset_index(drop=True)\n dd.to_parquet(ddf, tmpdir, engine=engine, overwrite=True)\n\n # Keep the contents of the DataFrame constatn but change the # of partitions\n ddf2 = ddf.repartition(npartitions=3)\n\n # Overwrite the existing Dataset with the new dataframe and evaluate\n # the number of files against the number of dask partitions\n dd.to_parquet(ddf2, tmpdir, engine=engine, overwrite=True)\n\n # Assert the # of files written are identical to the number of\n # Dask DataFrame partitions (we exclude _metadata and _common_metadata)\n files = os.listdir(tmpdir)\n files = [f for f in files if f not in [\"_common_metadata\", \"_metadata\"]]\n assert len(files) == ddf2.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_write_partition_on_overwrite_is_true_test_read_write_partition_on_overwrite_is_true.assert_len_files2_len_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_write_partition_on_overwrite_is_true_test_read_write_partition_on_overwrite_is_true.assert_len_files2_len_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3233, "end_line": 3265, "span_ids": ["test_read_write_partition_on_overwrite_is_true"], "tokens": 382}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_write_partition_on_overwrite_is_true(tmpdir, engine):\n # https://github.com/dask/dask/issues/6824\n from pathlib import Path\n\n # Create a Dask DataFrame with 5 partitions and write to local, partitioning on the column A and column B\n df = pd.DataFrame(\n np.vstack(\n (\n np.full((50, 3), 0),\n np.full((50, 3), 1),\n np.full((20, 3), 2),\n )\n )\n )\n df.columns = [\"A\", \"B\", \"C\"]\n ddf = dd.from_pandas(df, npartitions=5)\n dd.to_parquet(ddf, tmpdir, engine=engine, partition_on=[\"A\", \"B\"], overwrite=True)\n\n # Get the total number of files and directories from the original write\n files_ = Path(tmpdir).rglob(\"*\")\n files = [f.as_posix() for f in files_]\n # Keep the contents of the DataFrame constant but change the # of partitions\n ddf2 = ddf.repartition(npartitions=3)\n\n # Overwrite the existing Dataset with the new dataframe and evaluate\n # the number of files against the number of dask partitions\n # Get the total number of files and directories from the original write\n dd.to_parquet(ddf2, tmpdir, engine=engine, partition_on=[\"A\", \"B\"], overwrite=True)\n files2_ = Path(tmpdir).rglob(\"*\")\n files2 = [f.as_posix() for f in files2_]\n # After reducing the # of partitions and overwriting, we expect\n # there to be fewer total files than were originally written\n assert len(files2) < len(files)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_overwrite_raises_test_to_parquet_overwrite_raises.None_1.dd_to_parquet_ddf_tmpdir": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_to_parquet_overwrite_raises_test_to_parquet_overwrite_raises.None_1.dd_to_parquet_ddf_tmpdir", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3268, "end_line": 3277, "span_ids": ["test_to_parquet_overwrite_raises"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_parquet_overwrite_raises(tmpdir, engine):\n # https://github.com/dask/dask/issues/6824\n # Check that overwrite=True will raise an error if the\n # specified path is the current working directory\n df = pd.DataFrame({\"a\": range(12)})\n ddf = dd.from_pandas(df, npartitions=3)\n with pytest.raises(ValueError):\n dd.to_parquet(ddf, \"./\", engine=engine, overwrite=True)\n with pytest.raises(ValueError):\n dd.to_parquet(ddf, tmpdir, engine=engine, append=True, overwrite=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_dir_filter_test_dir_filter.assert_all": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_dir_filter_test_dir_filter.assert_all", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3280, "end_line": 3308, "span_ids": ["test_dir_filter"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dir_filter(tmpdir, engine):\n # github #6898\n df = pd.DataFrame.from_dict(\n {\n \"A\": {\n 0: 351.0,\n 1: 355.0,\n 2: 358.0,\n 3: 266.0,\n 4: 266.0,\n 5: 268.0,\n 6: np.nan,\n },\n \"B\": {\n 0: 2063.0,\n 1: 2051.0,\n 2: 1749.0,\n 3: 4281.0,\n 4: 3526.0,\n 5: 3462.0,\n 6: np.nan,\n },\n \"year\": {0: 2019, 1: 2019, 2: 2020, 3: 2020, 4: 2020, 5: 2020, 6: 2020},\n }\n )\n ddf = dask.dataframe.from_pandas(df, npartitions=1)\n ddf.to_parquet(tmpdir, partition_on=\"year\", engine=engine)\n dd.read_parquet(tmpdir, filters=[(\"year\", \"==\", 2020)], engine=engine)\n assert all", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_decimal_dtype_test_roundtrip_decimal_dtype.assert_eq_ddf1_ddf2_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_decimal_dtype_test_roundtrip_decimal_dtype.assert_eq_ddf1_ddf2_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3407, "end_line": 3425, "span_ids": ["test_roundtrip_decimal_dtype"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_roundtrip_decimal_dtype(tmpdir):\n # https://github.com/dask/dask/issues/6948\n tmpdir = str(tmpdir)\n\n data = [\n {\n \"ts\": pd.to_datetime(\"2021-01-01\", utc=\"Europe/Berlin\"),\n \"col1\": Decimal(\"123.00\"),\n }\n for i in range(23)\n ]\n ddf1 = dd.from_pandas(pd.DataFrame(data), npartitions=1)\n\n ddf1.to_parquet(path=tmpdir, engine=\"pyarrow\")\n ddf2 = dd.read_parquet(tmpdir, engine=\"pyarrow\")\n\n assert ddf1[\"col1\"].dtype == ddf2[\"col1\"].dtype\n assert_eq(ddf1, ddf2, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_padded_get_unsorted_columns.return.order": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_merge_asof_padded_get_unsorted_columns.return.order", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 772, "end_line": 803, "span_ids": ["get_unsorted_columns", "merge_asof_padded"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_asof_padded(left, right, prev=None, next=None, **kwargs):\n \"\"\"merge_asof but potentially adding rows to the beginning/end of right\"\"\"\n frames = []\n if prev is not None:\n frames.append(prev)\n frames.append(right)\n if next is not None:\n frames.append(next)\n\n frame = pd.concat(frames)\n result = pd.merge_asof(left, frame, **kwargs)\n # pd.merge_asof() resets index name (and dtype) if left is empty df\n if result.index.name != left.index.name:\n result.index.name = left.index.name\n return result\n\n\ndef get_unsorted_columns(frames):\n \"\"\"\n Determine the unsorted column order.\n\n This should match the output of concat([frames], sort=False)\n \"\"\"\n new_columns = pd.concat([frame._meta for frame in frames]).columns\n order = []\n for frame in frames:\n order.append(new_columns.get_indexer_for(frame.columns))\n\n order = np.concatenate(order)\n order = pd.unique(order)\n order = new_columns.take(order)\n return order", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_sum_intna_test_divmod.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_sum_intna_test_divmod.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1315, "end_line": 1336, "span_ids": ["test_divmod", "test_sum_intna"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sum_intna():\n a = pd.Series([1, None, 2], dtype=pd.Int32Dtype())\n b = dd.from_pandas(a, 2)\n assert_eq(a.sum(), b.sum())\n\n\ndef test_divmod():\n df1 = pd.Series(np.random.rand(10))\n df2 = pd.Series(np.random.rand(10))\n\n ddf1 = dd.from_pandas(df1, npartitions=3)\n ddf2 = dd.from_pandas(df2, npartitions=3)\n\n result = divmod(ddf1, 2.0)\n expected = divmod(df1, 2.0)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])\n\n result = divmod(ddf1, ddf2)\n expected = divmod(df1, df2)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_with_agg_test_groupby_dropna_with_agg.assert_eq_expected_actua": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_dropna_with_agg_test_groupby_dropna_with_agg.assert_eq_expected_actua", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2285, "end_line": 2298, "span_ids": ["test_groupby_dropna_with_agg"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n not dask.dataframe.utils.PANDAS_GT_110,\n reason=\"Should work starting from pandas 1.1.0\",\n)\ndef test_groupby_dropna_with_agg():\n # https://github.com/dask/dask/issues/6986\n df = pd.DataFrame(\n {\"id1\": [\"a\", None, \"b\"], \"id2\": [1, 2, None], \"v1\": [4.5, 5.5, None]}\n )\n expected = df.groupby([\"id1\", \"id2\"], dropna=False).agg(\"sum\")\n\n ddf = dd.from_pandas(df, 1, sort=False)\n actual = ddf.groupby([\"id1\", \"id2\"], dropna=False).agg(\"sum\")\n assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_observed_with_agg_test_rounding_negative_var.assert_eq_ddf_groupby_id": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_observed_with_agg_test_rounding_negative_var.assert_eq_ddf_groupby_id", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2301, "end_line": 2323, "span_ids": ["test_rounding_negative_var", "test_groupby_observed_with_agg"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_observed_with_agg():\n df = pd.DataFrame(\n {\n \"cat_1\": pd.Categorical(list(\"AB\"), categories=list(\"ABCDE\")),\n \"cat_2\": pd.Categorical([1, 2], categories=[1, 2, 3]),\n \"value_1\": np.random.uniform(size=2),\n }\n )\n expected = df.groupby([\"cat_1\", \"cat_2\"], observed=True).agg(\"sum\")\n\n ddf = dd.from_pandas(df, 2)\n actual = ddf.groupby([\"cat_1\", \"cat_2\"], observed=True).agg(\"sum\")\n assert_eq(expected, actual)\n\n\ndef test_rounding_negative_var():\n x = [-0.00179999999 for _ in range(10)]\n ids = [1 for _ in range(5)] + [2 for _ in range(5)]\n\n df = pd.DataFrame({\"ids\": ids, \"x\": x})\n\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.groupby(\"ids\").x.std(), df.groupby(\"ids\").x.std())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_split_out_multiindex_test_groupby_split_out_multiindex.assert_eq_ddf_result_ddf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_split_out_multiindex_test_groupby_split_out_multiindex.assert_eq_ddf_result_ddf", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2326, "end_line": 2348, "span_ids": ["test_groupby_split_out_multiindex"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_out\", [2, 3])\n@pytest.mark.parametrize(\"column\", [[\"b\", \"c\"], [\"b\", \"d\"], [\"b\", \"e\"]])\ndef test_groupby_split_out_multiindex(split_out, column):\n df = pd.DataFrame(\n {\n \"a\": np.arange(8),\n \"b\": [1, 0, 0, 2, 1, 1, 2, 0],\n \"c\": [0, 1] * 4,\n \"d\": [\"dog\", \"cat\", \"cat\", \"dog\", \"dog\", \"dog\", \"cat\", \"bird\"],\n }\n ).fillna(0)\n df[\"e\"] = df[\"d\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf_result_so1 = (\n ddf.groupby(column).a.mean(split_out=1).compute().sort_values().dropna()\n )\n\n ddf_result = (\n ddf.groupby(column).a.mean(split_out=split_out).compute().sort_values().dropna()\n )\n\n assert_eq(ddf_result, ddf_result_so1, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_true_split_out_test_groupby_sort_true_split_out.with_pytest_raises_NotImp.M_sum_ddf_groupby_x_so": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_sort_true_split_out_test_groupby_sort_true_split_out.with_pytest_raises_NotImp.M_sum_ddf_groupby_x_so", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2428, "end_line": 2439, "span_ids": ["test_groupby_sort_true_split_out"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_sort_true_split_out():\n df = pd.DataFrame({\"x\": [4, 2, 1, 2, 3, 1], \"y\": [1, 2, 3, 4, 5, 6]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n # Works fine for split_out==1 or sort=False/None\n M.sum(ddf.groupby(\"x\", sort=True), split_out=1)\n M.sum(ddf.groupby(\"x\", sort=False), split_out=2)\n M.sum(ddf.groupby(\"x\"), split_out=2)\n\n with pytest.raises(NotImplementedError):\n # Cannot use sort=True with split_out>1 (for now)\n M.sum(ddf.groupby(\"x\", sort=True), split_out=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_aggregate_categorical_observed_test_groupby_aggregate_categorical_observed.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_aggregate_categorical_observed_test_groupby_aggregate_categorical_observed.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2428, "end_line": 2472, "span_ids": ["test_groupby_aggregate_categorical_observed"], "tokens": 475}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not PANDAS_GT_110, reason=\"observed only supported for newer pandas\"\n)\n@pytest.mark.parametrize(\"known_cats\", [True, False])\n@pytest.mark.parametrize(\"ordered_cats\", [True, False])\n@pytest.mark.parametrize(\"groupby\", [\"cat_1\", [\"cat_1\", \"cat_2\"]])\n@pytest.mark.parametrize(\"observed\", [True, False])\ndef test_groupby_aggregate_categorical_observed(\n known_cats, ordered_cats, agg_func, groupby, observed\n):\n if agg_func in [\"cov\", \"corr\", \"nunique\"]:\n pytest.skip(\"Not implemented for DataFrameGroupBy yet.\")\n if agg_func in [\"sum\", \"count\", \"prod\"] and groupby != \"cat_1\":\n pytest.skip(\"Gives zeros rather than nans.\")\n if agg_func in [\"std\", \"var\"] and observed:\n pytest.skip(\"Can't calculate observed with all nans\")\n\n pdf = pd.DataFrame(\n {\n \"cat_1\": pd.Categorical(\n list(\"AB\"), categories=list(\"ABCDE\"), ordered=ordered_cats\n ),\n \"cat_2\": pd.Categorical([1, 2], categories=[1, 2, 3], ordered=ordered_cats),\n \"value_1\": np.random.uniform(size=2),\n }\n )\n ddf = dd.from_pandas(pdf, 2)\n\n if not known_cats:\n ddf[\"cat_1\"] = ddf[\"cat_1\"].cat.as_unknown()\n ddf[\"cat_2\"] = ddf[\"cat_2\"].cat.as_unknown()\n\n def agg(grp, **kwargs):\n return getattr(grp, agg_func)(**kwargs)\n\n # only include numeric columns when passing to \"min\" or \"max\"\n # pandas default is numeric_only=False\n if ordered_cats is False and agg_func in [\"min\", \"max\"] and groupby == \"cat_1\":\n pdf = pdf[[\"cat_1\", \"value_1\"]]\n ddf = ddf[[\"cat_1\", \"value_1\"]]\n\n assert_eq(\n agg(pdf.groupby(groupby, observed=observed)),\n agg(ddf.groupby(groupby, observed=observed)),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_timestamp_str_test_getitem_timestamp_str.assert_eq_df_2011_2015": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_getitem_timestamp_str_test_getitem_timestamp_str.assert_eq_df_2011_2015", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 424, "end_line": 450, "span_ids": ["test_getitem_timestamp_str"], "tokens": 333}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_timestamp_str():\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n if PANDAS_GT_120:\n with pytest.warns(\n FutureWarning, match=\"Indexing a DataFrame with a datetimelike\"\n ):\n assert_eq(df.loc[\"2011-01-02\"], ddf[\"2011-01-02\"])\n else:\n assert_eq(df.loc[\"2011-01-02\"], ddf[\"2011-01-02\"])\n assert_eq(df[\"2011-01-02\":\"2011-01-10\"], ddf[\"2011-01-02\":\"2011-01-10\"])\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.date_range(\"2011-01-01\", freq=\"D\", periods=100),\n )\n ddf = dd.from_pandas(df, 50)\n assert_eq(df.loc[\"2011-01\"], ddf.loc[\"2011-01\"])\n assert_eq(df.loc[\"2011\"], ddf.loc[\"2011\"])\n\n assert_eq(df[\"2011-01\":\"2012-05\"], ddf[\"2011-01\":\"2012-05\"])\n assert_eq(df[\"2011\":\"2015\"], ddf[\"2011\":\"2015\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_period_str_test_loc_period_str.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_period_str_test_loc_period_str.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 451, "end_line": 478, "span_ids": ["test_loc_period_str"], "tokens": 366}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n not PANDAS_GT_110, reason=\".loc partial index with PeriodIndex not yet supported\"\n)\ndef test_loc_period_str():\n # .loc with PeriodIndex doesn't support partial string indexing\n # https://github.com/pydata/pandas/issues/13429\n # -> this started working in pandas 1.1\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.period_range(\"2011-01-01\", freq=\"H\", periods=100),\n )\n ddf = dd.from_pandas(df, 10)\n\n # partial string slice\n assert_eq(df.loc[\"2011-01-02\"], ddf.loc[\"2011-01-02\"])\n assert_eq(df.loc[\"2011-01-02\":\"2011-01-10\"], ddf.loc[\"2011-01-02\":\"2011-01-10\"])\n # same reso, dask result is always DataFrame\n\n df = pd.DataFrame(\n {\"A\": np.random.randn(100), \"B\": np.random.randn(100)},\n index=pd.period_range(\"2011-01-01\", freq=\"D\", periods=100),\n )\n ddf = dd.from_pandas(df, 50)\n assert_eq(df.loc[\"2011-01\"], ddf.loc[\"2011-01\"])\n assert_eq(df.loc[\"2011\"], ddf.loc[\"2011\"])\n\n assert_eq(df.loc[\"2011-01\":\"2012-05\"], ddf.loc[\"2011-01\":\"2012-05\"])\n assert_eq(df.loc[\"2011\":\"2015\"], ddf.loc[\"2011\":\"2015\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_cov_test_rolling_cov.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_cov_test_rolling_cov.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 178, "end_line": 189, "span_ids": ["test_rolling_cov"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"window\", [1, 2, 4, 5])\n@pytest.mark.parametrize(\"center\", [True, False])\ndef test_rolling_cov(window, center):\n # DataFrame\n prolling = df.drop(\"a\", axis=1).rolling(window, center=center)\n drolling = ddf.drop(\"a\", axis=1).rolling(window, center=center)\n assert_eq(prolling.cov(), drolling.cov())\n\n # Series\n prolling = df.b.rolling(window, center=center)\n drolling = ddf.b.rolling(window, center=center)\n assert_eq(prolling.cov(), drolling.cov())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timezone_test_set_index_timezone.if_PANDAS_GT_120_.else_.with_pytest_raises_TypeEr.d2_divisions_0_s2badt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_timezone_test_set_index_timezone.if_PANDAS_GT_120_.else_.with_pytest_raises_TypeEr.d2_divisions_0_s2badt", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 630, "end_line": 656, "span_ids": ["test_set_index_timezone"], "tokens": 395}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_timezone():\n s_naive = pd.Series(pd.date_range(\"20130101\", periods=3))\n s_aware = pd.Series(pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\"))\n df = pd.DataFrame({\"tz\": s_aware, \"notz\": s_naive})\n d = dd.from_pandas(df, 2)\n\n d1 = d.set_index(\"notz\", npartitions=1)\n s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)\n assert d1.divisions[0] == s_naive[0] == s1[0]\n assert d1.divisions[-1] == s_naive[2] == s1[2]\n\n # We currently lose \"freq\". Converting data with pandas-defined dtypes\n # to numpy or pure Python can be lossy like this.\n d2 = d.set_index(\"tz\", npartitions=1)\n s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)\n assert d2.divisions[0] == s2[0]\n assert d2.divisions[-1] == s2[2]\n assert d2.divisions[0].tz == s2[0].tz\n assert d2.divisions[0].tz is not None\n s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)\n if PANDAS_GT_120:\n # starting with pandas 1.2.0, comparing equality of timestamps with different\n # timezones returns False instead of raising an error\n assert not d2.divisions[0] == s2badtype[0]\n else:\n with pytest.raises(TypeError):\n d2.divisions[0] == s2badtype[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_npartitions_test_set_index_npartitions.assert_output_npartitions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_npartitions_test_set_index_npartitions.assert_output_npartitions", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 659, "end_line": 668, "span_ids": ["test_set_index_npartitions"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_npartitions():\n # https://github.com/dask/dask/issues/6974\n data = pd.DataFrame(\n index=pd.Index(\n [\"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"C\"]\n )\n )\n data = dd.from_pandas(data, npartitions=2)\n output = data.reset_index().set_index(\"index\", npartitions=1)\n assert output.npartitions == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_datetime_precision_test_set_index_datetime_precision.assert_eq_ddf_df_set_ind": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_datetime_precision_test_set_index_datetime_precision.assert_eq_ddf_df_set_ind", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 671, "end_line": 688, "span_ids": ["test_set_index_datetime_precision"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"unit\", [\"ns\", \"us\"])\ndef test_set_index_datetime_precision(unit):\n # https://github.com/dask/dask/issues/6864\n\n df = pd.DataFrame(\n [\n [1567703791155681, 1],\n [1567703792155681, 2],\n [1567703790155681, 0],\n [1567703793155681, 3],\n ],\n columns=[\"ts\", \"rank\"],\n )\n df.ts = pd.to_datetime(df.ts, unit=unit)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf = ddf.set_index(\"ts\")\n\n assert_eq(ddf, df.set_index(\"ts\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_overlap_test_set_index_overlap_2.assert_ddf2_npartitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_overlap_test_set_index_overlap_2.assert_ddf2_npartitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1043, "end_line": 1062, "span_ids": ["test_set_index_overlap", "test_set_index_overlap_2"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_overlap():\n A = pd.DataFrame({\"key\": [1, 2, 3, 4, 4, 5, 6, 7], \"value\": list(\"abcd\" * 2)})\n a = dd.from_pandas(A, npartitions=2)\n a = a.set_index(\"key\", sorted=True)\n b = a.repartition(divisions=a.divisions)\n assert_eq(a, b)\n\n\ndef test_set_index_overlap_2():\n data = pd.DataFrame(\n index=pd.Index(\n [\"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"C\"],\n name=\"index\",\n )\n )\n ddf1 = dd.from_pandas(data, npartitions=2)\n ddf2 = ddf1.reset_index().repartition(8).set_index(\"index\", sorted=True)\n\n assert_eq(ddf1, ddf2)\n assert ddf2.npartitions == 8", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_is_dataframe_like_test_is_dataframe_like.assert_is_index_like_wrap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_is_dataframe_like_test_is_dataframe_like.assert_is_index_like_wrap", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 393, "end_line": 451, "span_ids": ["test_is_dataframe_like"], "tokens": 443}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"frame_value_counts\", [True, False])\ndef test_is_dataframe_like(monkeypatch, frame_value_counts):\n # When we drop support for pandas 1.0, this compat check can\n # be dropped\n if frame_value_counts:\n monkeypatch.setattr(pd.DataFrame, \"value_counts\", lambda x: None, raising=False)\n\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n assert is_dataframe_like(df)\n assert is_dataframe_like(ddf)\n assert not is_dataframe_like(df.x)\n assert not is_dataframe_like(ddf.x)\n assert not is_dataframe_like(df.index)\n assert not is_dataframe_like(ddf.index)\n assert not is_dataframe_like(pd.DataFrame)\n\n assert not is_series_like(df)\n assert not is_series_like(ddf)\n assert is_series_like(df.x)\n assert is_series_like(ddf.x)\n assert not is_series_like(df.index)\n assert not is_series_like(ddf.index)\n assert not is_series_like(pd.Series)\n\n assert not is_index_like(df)\n assert not is_index_like(ddf)\n assert not is_index_like(df.x)\n assert not is_index_like(ddf.x)\n assert is_index_like(df.index)\n assert is_index_like(ddf.index)\n assert not is_index_like(pd.Index)\n\n # The following checks support of class wrappers, which\n # requires the comparions of `x.__class__` instead of `type(x)`\n class DataFrameWrapper:\n __class__ = pd.DataFrame\n\n wrap = DataFrameWrapper()\n wrap.dtypes = None\n wrap.columns = None\n assert is_dataframe_like(wrap)\n\n class SeriesWrapper:\n __class__ = pd.Series\n\n wrap = SeriesWrapper()\n wrap.dtype = None\n wrap.name = None\n assert is_series_like(wrap)\n\n class IndexWrapper:\n __class__ = pd.Index\n\n wrap = IndexWrapper()\n wrap.dtype = None\n wrap.name = None\n assert is_index_like(wrap)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_ProgressBar_ProgressBar._timer_func.while_self__running_.time_sleep_self__dt_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_ProgressBar_ProgressBar._timer_func.while_self__running_.time_sleep_self__dt_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/progress.py", "file_name": "progress.py", "file_type": "text/x-python", "category": "implementation", "start_line": 28, "end_line": 125, "span_ids": ["ProgressBar._finish", "ProgressBar._pretask", "ProgressBar._timer_func", "ProgressBar", "ProgressBar.__init__", "ProgressBar._start"], "tokens": 804}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ProgressBar(Callback):\n \"\"\"A progress bar for dask.\n\n Parameters\n ----------\n minimum : int, optional\n Minimum time threshold in seconds before displaying a progress bar.\n Default is 0 (always display)\n width : int, optional\n Width of the bar\n dt : float, optional\n Update resolution in seconds, default is 0.1 seconds\n out : file object, optional\n File object to which the progress bar will be written\n It can be ``sys.stdout``, ``sys.stderr`` or any other file object able to write ``str`` objects\n Default is ``sys.stdout``\n\n Examples\n --------\n\n Below we create a progress bar with a minimum threshold of 1 second before\n displaying. For cheap computations nothing is shown:\n\n >>> with ProgressBar(minimum=1.0): # doctest: +SKIP\n ... out = some_fast_computation.compute()\n\n But for expensive computations a full progress bar is displayed:\n\n >>> with ProgressBar(minimum=1.0): # doctest: +SKIP\n ... out = some_slow_computation.compute()\n [########################################] | 100% Completed | 10.4 s\n\n The duration of the last computation is available as an attribute\n\n >>> pbar = ProgressBar() # doctest: +SKIP\n >>> with pbar: # doctest: +SKIP\n ... out = some_computation.compute()\n [########################################] | 100% Completed | 10.4 s\n >>> pbar.last_duration # doctest: +SKIP\n 10.4\n\n You can also register a progress bar so that it displays for all\n computations:\n\n >>> pbar = ProgressBar() # doctest: +SKIP\n >>> pbar.register() # doctest: +SKIP\n >>> some_slow_computation.compute() # doctest: +SKIP\n [########################################] | 100% Completed | 10.4 s\n \"\"\"\n\n def __init__(self, minimum=0, width=40, dt=0.1, out=None):\n if out is None:\n # Warning, on windows, stdout can still be None if\n # an application is started as GUI Application\n # https://docs.python.org/3/library/sys.html#sys.__stderr__\n out = sys.stdout\n self._minimum = minimum\n self._width = width\n self._dt = dt\n self._file = out\n self.last_duration = 0\n\n def _start(self, dsk):\n self._state = None\n self._start_time = default_timer()\n # Start background thread\n self._running = True\n self._timer = threading.Thread(target=self._timer_func)\n self._timer.daemon = True\n self._timer.start()\n\n def _pretask(self, key, dsk, state):\n self._state = state\n if self._file is not None:\n self._file.flush()\n\n def _finish(self, dsk, state, errored):\n self._running = False\n self._timer.join()\n elapsed = default_timer() - self._start_time\n self.last_duration = elapsed\n if elapsed < self._minimum:\n return\n if not errored:\n self._draw_bar(1, elapsed)\n else:\n self._update_bar(elapsed)\n if self._file is not None:\n self._file.write(\"\\n\")\n self._file.flush()\n\n def _timer_func(self):\n \"\"\"Background thread for updating the progress bar\"\"\"\n while self._running:\n elapsed = default_timer() - self._start_time\n if elapsed > self._minimum:\n self._update_bar(elapsed)\n time.sleep(self._dt)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_ProgressBar._update_bar_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_ProgressBar._update_bar_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/progress.py", "file_name": "progress.py", "file_type": "text/x-python", "category": "implementation", "start_line": 123, "end_line": 144, "span_ids": ["ProgressBar._draw_bar", "ProgressBar._update_bar"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ProgressBar(Callback):\n\n def _update_bar(self, elapsed):\n s = self._state\n if not s:\n self._draw_bar(0, elapsed)\n return\n ndone = len(s[\"finished\"])\n ntasks = sum(len(s[k]) for k in [\"ready\", \"waiting\", \"running\"]) + ndone\n if ndone < ntasks:\n self._draw_bar(ndone / ntasks if ntasks else 0, elapsed)\n\n def _draw_bar(self, frac, elapsed):\n bar = \"#\" * int(self._width * frac)\n percent = int(100 * frac)\n elapsed = format_time(elapsed)\n msg = \"\\r[{0:<{1}}] | {2}% Completed | {3}\".format(\n bar, self._width, percent, elapsed\n )\n with contextlib.suppress(ValueError):\n if self._file is not None:\n self._file.write(msg)\n self._file.flush()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_None_2_order.init_stack._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_None_2_order.init_stack._", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 81, "end_line": 162, "span_ids": ["imports", "order"], "tokens": 781}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": " # noqa: F401\n\n\ndef order(dsk, dependencies=None):\n \"\"\"Order nodes in dask graph\n\n This produces an ordering over our tasks that we use to break ties when\n executing. We do this ahead of time to reduce a bit of stress on the\n scheduler and also to assist in static analysis.\n\n This currently traverses the graph as a single-threaded scheduler would\n traverse it. It breaks ties in the following ways:\n\n 1. Begin at a leaf node that is a dependency of a root node that has the\n largest subgraph (start hard things first)\n 2. Prefer tall branches with few dependents (start hard things first and\n try to avoid memory usage)\n 3. Prefer dependents that are dependencies of root nodes that have\n the smallest subgraph (do small goals that can terminate quickly)\n\n Examples\n --------\n >>> dsk = {'a': 1, 'b': 2, 'c': (inc, 'a'), 'd': (add, 'b', 'c')}\n >>> order(dsk)\n {'a': 0, 'c': 1, 'b': 2, 'd': 3}\n \"\"\"\n if not dsk:\n return {}\n\n if dependencies is None:\n dependencies = {k: get_dependencies(dsk, k) for k in dsk}\n\n dependents = reverse_dict(dependencies)\n num_needed, total_dependencies = ndependencies(dependencies, dependents)\n metrics = graph_metrics(dependencies, dependents, total_dependencies)\n\n if len(metrics) != len(dsk):\n cycle = getcycle(dsk, None)\n raise RuntimeError(\n \"Cycle detected between the following keys:\\n -> %s\"\n % \"\\n -> \".join(str(x) for x in cycle)\n )\n\n # Single root nodes that depend on everything. These cause issues for\n # the current ordering algorithm, since we often hit the root node\n # and fell back to the key tie-breaker to choose which immediate dependency\n # to finish next, rather than finishing off subtrees.\n # So under the special case of a single root node that depends on the entire\n # tree, we skip processing it normally.\n # See https://github.com/dask/dask/issues/6745\n root_nodes = {k for k, v in dependents.items() if not v}\n skip_root_node = len(root_nodes) == 1 and len(dsk) > 1\n\n # Leaf nodes. We choose one--the initial node--for each weakly connected subgraph.\n # Let's calculate the `initial_stack_key` as we determine `init_stack` set.\n init_stack = {\n # First prioritize large, tall groups, then prioritize the same as ``dependents_key``.\n key: (\n # at a high-level, work towards a large goal (and prefer tall and narrow)\n -max_dependencies,\n num_dependents - max_heights,\n # tactically, finish small connected jobs first\n min_dependencies,\n num_dependents - min_heights, # prefer tall and narrow\n -total_dependents, # take a big step\n # try to be memory efficient\n num_dependents,\n # tie-breaker\n StrComparable(key),\n )\n for key, num_dependents, (\n total_dependents,\n min_dependencies,\n max_dependencies,\n min_heights,\n max_heights,\n ) in (\n (key, len(dependents[key]), metrics[key])\n for key, val in dependencies.items()\n if not val\n )\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order._initial_stack_key_cho_order.dependents_key.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order._initial_stack_key_cho_order.dependents_key.return._", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 163, "end_line": 179, "span_ids": ["order"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n # `initial_stack_key` chooses which task to run at the very beginning.\n # This value is static, so we pre-compute as the value of this dict.\n initial_stack_key = init_stack.__getitem__\n\n def dependents_key(x):\n \"\"\"Choose a path from our starting task to our tactical goal\n\n This path is connected to a large goal, but focuses on completing\n a small goal and being memory efficient.\n \"\"\"\n return (\n # Focus on being memory-efficient\n len(dependents[x]) - len(dependencies[x]) + num_needed[x],\n -metrics[x][3], # min_heights\n # tie-breaker\n StrComparable(x),\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.dependencies_key_order.dependencies_key.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.dependencies_key_order.dependencies_key.return._", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 181, "end_line": 210, "span_ids": ["order"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n # ... other code\n\n def dependencies_key(x):\n \"\"\"Choose which dependency to run as part of a reverse DFS\n\n This is very similar to both ``initial_stack_key``.\n \"\"\"\n num_dependents = len(dependents[x])\n (\n total_dependents,\n min_dependencies,\n max_dependencies,\n min_heights,\n max_heights,\n ) = metrics[x]\n # Prefer short and narrow instead of tall in narrow, because we're going in\n # reverse along dependencies.\n return (\n # at a high-level, work towards a large goal (and prefer short and narrow)\n -max_dependencies,\n num_dependents + max_heights,\n # tactically, finish small connected jobs first\n min_dependencies,\n num_dependents + min_heights, # prefer short and narrow\n -total_dependencies[x], # go where the work is\n # try to be memory efficient\n num_dependents - len(dependencies[x]) + num_needed[x],\n num_dependents,\n total_dependents, # already found work, so don't add more\n # tie-breaker\n StrComparable(x),\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_schema_is_complete_test_schema_is_complete.test_matches_config_sche": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_schema_is_complete_test_schema_is_complete.test_matches_config_sche", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 440, "end_line": 468, "span_ids": ["test_schema_is_complete"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_schema_is_complete():\n config_fn = os.path.join(os.path.dirname(__file__), \"..\", \"dask.yaml\")\n schema_fn = os.path.join(os.path.dirname(__file__), \"..\", \"dask-schema.yaml\")\n\n with open(config_fn) as f:\n config = yaml.safe_load(f)\n\n with open(schema_fn) as f:\n schema = yaml.safe_load(f)\n\n def test_matches(c, s):\n for k, v in c.items():\n if list(c) != list(s[\"properties\"]):\n raise ValueError(\n \"\\nThe dask.yaml and dask-schema.yaml files are not in sync.\\n\"\n \"This usually happens when we add a new configuration value,\\n\"\n \"but don't add the schema of that value to the dask-schema.yaml file\\n\"\n \"Please modify these files to include the missing values: \\n\\n\"\n \" dask.yaml: {}\\n\"\n \" dask-schema.yaml: {}\\n\\n\"\n \"Examples in these files should be a good start, \\n\"\n \"even if you are not familiar with the jsonschema spec\".format(\n sorted(c), sorted(s[\"properties\"])\n )\n )\n if isinstance(v, dict):\n test_matches(c[k], s[\"properties\"][k])\n\n test_matches(config, schema)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_deprecations_test_get_override_with.with_dask_config_set_fo.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_deprecations_test_get_override_with.with_dask_config_set_fo.None_7", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 471, "end_line": 493, "span_ids": ["test_deprecations", "test_get_override_with"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deprecations():\n with pytest.warns(Warning) as info:\n with dask.config.set(fuse_ave_width=123):\n assert dask.config.get(\"optimization.fuse.ave-width\") == 123\n\n assert \"optimization.fuse.ave-width\" in str(info[0].message)\n\n\ndef test_get_override_with():\n with dask.config.set({\"foo\": \"bar\"}):\n # If override_with is None get the config key\n assert dask.config.get(\"foo\") == \"bar\"\n assert dask.config.get(\"foo\", override_with=None) == \"bar\"\n\n # Otherwise pass the default straight through\n assert dask.config.get(\"foo\", override_with=\"baz\") == \"baz\"\n assert dask.config.get(\"foo\", override_with=False) is False\n assert dask.config.get(\"foo\", override_with=True) is True\n assert dask.config.get(\"foo\", override_with=123) == 123\n assert dask.config.get(\"foo\", override_with={\"hello\": \"world\"}) == {\n \"hello\": \"world\"\n }\n assert dask.config.get(\"foo\", override_with=[\"one\"]) == [\"one\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_pytest_test_with_get.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_context.py_pytest_test_with_get.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_context.py", "file_name": "test_context.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 26, "span_ids": ["imports", "test_with_get"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask\nfrom dask.context import globalmethod\n\n\ndef test_with_get():\n da = pytest.importorskip(\"dask.array\")\n var = [0]\n\n def myget(dsk, keys, **kwargs):\n var[0] = var[0] + 1\n return dask.get(dsk, keys, **kwargs)\n\n x = da.ones(10, chunks=(5,))\n\n assert x.sum().compute() == 10\n assert var[0] == 0\n\n with dask.config.set(scheduler=myget):\n assert x.sum().compute() == 10\n assert var[0] == 1\n\n # Make sure we've cleaned up\n assert x.sum().compute() == 10\n assert var[0] == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_zarr_distributed_roundtrip_test_local_scheduler.asyncio_get_event_loop_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_zarr_distributed_roundtrip_test_local_scheduler.asyncio_get_event_loop_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 216, "end_line": 262, "span_ids": ["test_scheduler_equals_client", "test_local_scheduler", "test_zarr_distributed_roundtrip", "test_zarr_in_memory_distributed_err", "test_await"], "tokens": 356}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_zarr_distributed_roundtrip(c):\n da = pytest.importorskip(\"dask.array\")\n pytest.importorskip(\"zarr\")\n\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n a.to_zarr(d)\n a2 = da.from_zarr(d)\n da.assert_eq(a, a2, scheduler=c)\n assert a2.chunks == a.chunks\n\n\ndef test_zarr_in_memory_distributed_err(c):\n da = pytest.importorskip(\"dask.array\")\n zarr = pytest.importorskip(\"zarr\")\n\n chunks = (1, 1)\n a = da.ones((3, 3), chunks=chunks)\n z = zarr.zeros_like(a, chunks=chunks)\n\n with pytest.raises(RuntimeError):\n a.to_zarr(z)\n\n\ndef test_scheduler_equals_client(c):\n x = delayed(lambda: 1)()\n assert x.compute(scheduler=c) == 1\n assert c.run_on_scheduler(lambda dask_scheduler: dask_scheduler.story(x.key))\n\n\n@gen_cluster(client=True)\nasync def test_await(c, s, a, b):\n x = dask.delayed(inc)(1)\n x = await x.persist()\n assert x.key in s.tasks\n assert a.data or b.data\n assert all(f.done() for f in futures_of(x))\n\n\ndef test_local_scheduler():\n async def f():\n x = dask.delayed(inc)(1)\n y = x + 1\n z = await y.persist()\n assert len(z.dask) == 1\n\n asyncio.get_event_loop().run_until_complete(f())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_os_test_basic.assert_all_isinstance_lay": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_os_test_basic.assert_all_isinstance_lay", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 33, "span_ids": ["imports", "test_visualize", "test_basic"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport xml.etree.ElementTree\nfrom collections.abc import Set\n\nimport pytest\n\nimport dask\nfrom dask.blockwise import Blockwise, blockwise_token\nfrom dask.highlevelgraph import HighLevelGraph, Layer, MaterializedLayer, to_graphviz\nfrom dask.utils_test import inc\n\n\ndef test_visualize(tmpdir):\n pytest.importorskip(\"graphviz\")\n da = pytest.importorskip(\"dask.array\")\n fn = str(tmpdir)\n a = da.ones(10, chunks=(5,))\n b = a + 1\n c = a + 2\n d = b + c\n d.dask.visualize(fn)\n assert os.path.exists(fn)\n\n\ndef test_basic():\n a = {\"x\": 1}\n b = {\"y\": (inc, \"x\")}\n layers = {\"a\": a, \"b\": b}\n dependencies = {\"a\": set(), \"b\": {\"a\"}}\n hg = HighLevelGraph(layers, dependencies)\n\n assert dict(hg) == {\"x\": 1, \"y\": (inc, \"x\")}\n assert all(isinstance(layer, Layer) for layer in hg.layers.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_with_equal_dependents_test_order_with_equal_dependents.None_7.for_i_in_range_len_abc_.assert_o_x_5_i_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_order_with_equal_dependents_test_order_with_equal_dependents.None_7.for_i_in_range_len_abc_.assert_o_x_5_i_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 716, "end_line": 793, "span_ids": ["test_order_with_equal_dependents"], "tokens": 881}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_order_with_equal_dependents(abcde):\n \"\"\"From https://github.com/dask/dask/issues/5859#issuecomment-608422198\n\n See the visualization of `(maxima, argmax)` example from the above comment.\n\n This DAG has enough structure to exercise more parts of `order`\n\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {}\n abc = [a, b, c, d]\n for x in abc:\n dsk.update(\n {\n (x, 0): 0,\n (x, 1): (f, (x, 0)),\n (x, 2, 0): (f, (x, 0)),\n (x, 2, 1): (f, (x, 1)),\n }\n )\n for i, y in enumerate(abc):\n dsk.update(\n {\n (x, 3, i): (f, (x, 2, 0), (y, 2, 1)), # cross x and y\n (x, 4, i): (f, (x, 3, i)),\n (x, 5, i, 0): (f, (x, 4, i)),\n (x, 5, i, 1): (f, (x, 4, i)),\n (x, 6, i, 0): (f, (x, 5, i, 0)),\n (x, 6, i, 1): (f, (x, 5, i, 1)),\n }\n )\n o = order(dsk)\n total = 0\n for x in abc:\n for i in range(len(abc)):\n val = o[(x, 6, i, 1)] - o[(x, 6, i, 0)]\n assert val > 0 # ideally, val == 2\n total += val\n assert total <= 110 # ideally, this should be 2 * 16 = 32\n\n # Add one to the end of the nine bundles\n dsk2 = dict(dsk)\n for x in abc:\n for i in range(len(abc)):\n dsk2[(x, 7, i, 0)] = (f, (x, 6, i, 0))\n o = order(dsk2)\n total = 0\n for x in abc:\n for i in range(len(abc)):\n val = o[(x, 7, i, 0)] - o[(x, 6, i, 1)]\n assert val > 0 # ideally, val == 3\n total += val\n assert total <= 138 # ideally, this should be 3 * 16 == 48\n\n # Remove one from each of the nine bundles\n dsk3 = dict(dsk)\n for x in abc:\n for i in range(len(abc)):\n del dsk3[(x, 6, i, 1)]\n o = order(dsk3)\n total = 0\n for x in abc:\n for i in range(len(abc)):\n val = o[(x, 6, i, 0)] - o[(x, 5, i, 1)]\n assert val > 0 # ideally, val == 2\n total += val\n assert total <= 98 # ideally, this should be 2 * 16 == 32\n\n # Remove another one from each of the nine bundles\n dsk4 = dict(dsk3)\n for x in abc:\n for i in range(len(abc)):\n del dsk4[(x, 6, i, 0)]\n o = order(dsk4)\n total = 0\n for x in abc:\n for i in range(len(abc)):\n assert o[(x, 5, i, 1)] - o[(x, 5, i, 0)] == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_terminal_node_backtrack_test_terminal_node_backtrack.assert_o_a_2_o_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_terminal_node_backtrack_test_terminal_node_backtrack.assert_o_a_2_o_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 796, "end_line": 850, "span_ids": ["test_terminal_node_backtrack"], "tokens": 597}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_terminal_node_backtrack():\n r\"\"\"\n https://github.com/dask/dask/issues/6745\n\n We have\n\n 1. A terminal node that depends on the entire graph ('s')\n 2. Some shared dependencies near the roots ('a1', 'a4')\n 3. But the left and right halves are disconnected, other\n than the terminal node.\n\n s\n / / \\ \\\n / / \\ \\\n s00 s10 s01 s11\n | | | |\n b00 b10 b01 b11\n / \\ / \\ / \\ / \\\n a0 a1 a2 a3 a4 a5\n\n Previously we started at 'a', and worked up to 's00'. We'd like to finish\n 's00' completely, so we progress to 's' and work through its dependencies.\n\n Ideally, we would choose 's10', since we've already computed one of its\n (eventual) dependencies: 'a1'. However, all of 's00' through 's11' had\n equal metrics so we fell back to the name tie-breaker and started on\n 's01' (via 'a3', a4', 'b01', ...).\n \"\"\"\n dsk = {\n # left half\n (\"a\", 0): (0,),\n (\"a\", 1): (1,),\n (\"a\", 2): (2,),\n (\"b\", 0): (f, (\"a\", 0), (\"a\", 1)),\n (\"b\", 1): (f, (\"a\", 1), (\"a\", 2)),\n (\"store\", 0, 0): (\"b\", 0),\n (\"store\", 1, 0): (\"b\", 1),\n # right half\n (\"a\", 3): (3,),\n (\"a\", 4): (4,),\n (\"a\", 5): (5,),\n (\"b\", 2): (f, (\"a\", 3), (\"a\", 4)),\n (\"b\", 3): (f, (\"a\", 4), (\"a\", 5)),\n (\"store\", 0, 1): (\"b\", 2),\n (\"store\", 1, 1): (\"b\", 3),\n \"store\": (\n f,\n (\"store\", 0, 0),\n (\"store\", 1, 0),\n (\"store\", 0, 1),\n (\"store\", 1, 1),\n ),\n }\n o = order(dsk)\n assert o[(\"a\", 2)] < o[(\"a\", 3)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_is_arraylike_test_iter_chunks.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_is_arraylike_test_iter_chunks.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 564, "end_line": 590, "span_ids": ["test_iter_chunks", "test_is_arraylike"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_is_arraylike():\n np = pytest.importorskip(\"numpy\")\n\n assert is_arraylike(0) is False\n assert is_arraylike(()) is False\n assert is_arraylike(0) is False\n assert is_arraylike([]) is False\n assert is_arraylike([0]) is False\n\n assert is_arraylike(np.empty(())) is True\n assert is_arraylike(np.empty((0,))) is True\n assert is_arraylike(np.empty((0, 0))) is True\n\n\ndef test_iter_chunks():\n sizes = [14, 8, 5, 9, 7, 9, 1, 19, 8, 19]\n assert list(iter_chunks(sizes, 19)) == [\n [14],\n [8, 5],\n [9, 7],\n [9, 1],\n [19],\n [8],\n [19],\n ]\n assert list(iter_chunks(sizes, 28)) == [[14, 8, 5], [9, 7, 9, 1], [19, 8], [19]]\n assert list(iter_chunks(sizes, 67)) == [[14, 8, 5, 9, 7, 9, 1], [19, 8, 19]]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_time_ago_format_time_ago.return._Just_now_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_time_ago_format_time_ago.return._Just_now_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1444, "end_line": 1511, "span_ids": ["format_time_ago"], "tokens": 509}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_time_ago(n: datetime) -> str:\n \"\"\"Calculate a '3 hours ago' type string from a Python datetime.\n\n Examples\n --------\n >>> from datetime import datetime, timedelta\n\n >>> now = datetime.now()\n >>> format_time_ago(now)\n 'Just now'\n\n >>> past = datetime.now() - timedelta(minutes=1)\n >>> format_time_ago(past)\n '1 minute ago'\n\n >>> past = datetime.now() - timedelta(minutes=2)\n >>> format_time_ago(past)\n '2 minutes ago'\n\n >>> past = datetime.now() - timedelta(hours=1)\n >>> format_time_ago(past)\n '1 hour ago'\n\n >>> past = datetime.now() - timedelta(hours=6)\n >>> format_time_ago(past)\n '6 hours ago'\n\n >>> past = datetime.now() - timedelta(days=1)\n >>> format_time_ago(past)\n '1 day ago'\n\n >>> past = datetime.now() - timedelta(days=5)\n >>> format_time_ago(past)\n '5 days ago'\n\n >>> past = datetime.now() - timedelta(days=8)\n >>> format_time_ago(past)\n '1 week ago'\n\n >>> past = datetime.now() - timedelta(days=16)\n >>> format_time_ago(past)\n '2 weeks ago'\n\n >>> past = datetime.now() - timedelta(days=190)\n >>> format_time_ago(past)\n '6 months ago'\n\n >>> past = datetime.now() - timedelta(days=800)\n >>> format_time_ago(past)\n '2 years ago'\n\n \"\"\"\n units = {\n \"years\": lambda diff: diff.days / 365,\n \"months\": lambda diff: diff.days / 30.436875, # Average days per month\n \"weeks\": lambda diff: diff.days / 7,\n \"days\": lambda diff: diff.days,\n \"hours\": lambda diff: diff.seconds / 3600,\n \"minutes\": lambda diff: diff.seconds % 3600 / 60,\n }\n diff = datetime.now() - n\n for unit in units:\n dur = int(units[unit](diff))\n if dur > 0:\n if dur == 1: # De-pluralize\n unit = unit[:-1]\n return f\"{dur} {unit} ago\"\n return \"Just now\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_key_split_key_split.try_.except_Exception_.return._Other_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_key_split_key_split.try_.except_Exception_.return._Other_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1497, "end_line": 1548, "span_ids": ["key_split"], "tokens": 402}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def key_split(s):\n \"\"\"\n >>> key_split('x')\n 'x'\n >>> key_split('x-1')\n 'x'\n >>> key_split('x-1-2-3')\n 'x'\n >>> key_split(('x-2', 1))\n 'x'\n >>> key_split(\"('x-2', 1)\")\n 'x'\n >>> key_split('hello-world-1')\n 'hello-world'\n >>> key_split(b'hello-world-1')\n 'hello-world'\n >>> key_split('ae05086432ca935f6eba409a8ecd4896')\n 'data'\n >>> key_split('>> key_split(None)\n 'Other'\n >>> key_split('x-abcdefab') # ignores hex\n 'x'\n >>> key_split('_(x)') # strips unpleasant characters\n 'x'\n \"\"\"\n if type(s) is bytes:\n s = s.decode()\n if type(s) is tuple:\n s = s[0]\n try:\n words = s.split(\"-\")\n if not words[0][0].isalpha():\n result = words[0].strip(\"_'()\\\"\")\n else:\n result = words[0]\n for word in words[1:]:\n if word.isalpha() and not (\n len(word) == 8 and hex_pattern.match(word) is not None\n ):\n result += \"-\" + word\n else:\n break\n if len(result) == 32 and re.match(r\"[a-f0-9]{32}\", result):\n return \"data\"\n else:\n if result[0] == \"<\":\n result = result.strip(\"<>\").split()[0].split(\".\")[-1]\n return result\n except Exception:\n return \"Other\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_stringify_stringify.return.obj": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_stringify_stringify.return.obj", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1715, "end_line": 1783, "span_ids": ["stringify"], "tokens": 549}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def stringify(obj, exclusive: Iterable | None = None):\n \"\"\"Convert an object to a string\n\n If ``exclusive`` is specified, search through `obj` and convert\n values that are in ``exclusive``.\n\n Note that when searching through dictionaries, only values are\n converted, not the keys.\n\n Parameters\n ----------\n obj : Any\n Object (or values within) to convert to string\n exclusive: Iterable, optional\n Set of values to search for when converting values to strings\n\n Returns\n -------\n result : type(obj)\n Stringified copy of ``obj`` or ``obj`` itself if it is already a\n string or bytes.\n\n Examples\n --------\n >>> stringify(b'x')\n b'x'\n >>> stringify('x')\n 'x'\n >>> stringify({('a',0):('a',0), ('a',1): ('a',1)})\n \"{('a', 0): ('a', 0), ('a', 1): ('a', 1)}\"\n >>> stringify({('a',0):('a',0), ('a',1): ('a',1)}, exclusive={('a',0)})\n {('a', 0): \"('a', 0)\", ('a', 1): ('a', 1)}\n \"\"\"\n\n typ = type(obj)\n if typ is str or typ is bytes:\n return obj\n elif exclusive is None:\n return str(obj)\n\n if typ is tuple and obj:\n from .optimization import SubgraphCallable\n\n obj0 = obj[0]\n if type(obj0) is SubgraphCallable:\n obj0 = obj0\n return (\n SubgraphCallable(\n stringify(obj0.dsk, exclusive),\n obj0.outkey,\n stringify(obj0.inkeys, exclusive),\n obj0.name,\n ),\n ) + tuple(stringify(x, exclusive) for x in obj[1:])\n elif callable(obj0):\n return (obj0,) + tuple(stringify(x, exclusive) for x in obj[1:])\n\n if typ is list:\n return [stringify(v, exclusive) for v in obj]\n if typ is dict:\n return {k: stringify(v, exclusive) for k, v in obj.items()}\n try:\n if obj in exclusive:\n return stringify(obj)\n except TypeError: # `obj` not hashable\n pass\n if typ is tuple: # If the tuple itself isn't a key, check its elements\n return tuple(stringify(v, exclusive) for v in obj)\n return obj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_stringify_collection_keys_stringify_collection_keys.return.obj": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_stringify_collection_keys_stringify_collection_keys.return.obj", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1622, "end_line": 1642, "span_ids": ["stringify_collection_keys"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def stringify_collection_keys(obj):\n \"\"\"Convert all collection keys in ``obj`` to strings.\n\n This is a specialized version of ``stringify()`` that only converts keys\n of the form: ``(\"a string\", ...)``\n \"\"\"\n\n typ = type(obj)\n if typ is tuple and obj:\n obj0 = obj[0]\n if type(obj0) is str or type(obj0) is bytes:\n return stringify(obj)\n if callable(obj0):\n return (obj0,) + tuple(stringify_collection_keys(x) for x in obj[1:])\n if typ is list:\n return [stringify_collection_keys(v) for v in obj]\n if typ is dict:\n return {k: stringify_collection_keys(v) for k, v in obj.items()}\n if typ is tuple: # If the tuple itself isn't a key, check its elements\n return tuple(stringify_collection_keys(v) for v in obj)\n return obj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_getter.return.c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_getter.return.c", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 98, "end_line": 121, "span_ids": ["getter"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getter(a, b, asarray=True, lock=None):\n if isinstance(b, tuple) and any(x is None for x in b):\n b2 = tuple(x for x in b if x is not None)\n b3 = tuple(\n None if x is None else slice(None, None)\n for x in b\n if not isinstance(x, Integral)\n )\n return getter(a, b2, asarray=asarray, lock=lock)[b3]\n\n if lock:\n lock.acquire()\n try:\n c = a[b]\n # Below we special-case `np.matrix` to force a conversion to\n # `np.ndarray` and preserve original Dask behavior for `getter`,\n # as for all purposes `np.matrix` is array-like and thus\n # `is_arraylike` evaluates to `True` in that case.\n if asarray and (not is_arraylike(c) or isinstance(c, np.matrix)):\n c = np.asarray(c)\n finally:\n if lock:\n lock.release()\n return c", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_nofancy_getter_inline.return.getter_a_b_asarray_asar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_getter_nofancy_getter_inline.return.getter_a_b_asarray_asar", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 152, "span_ids": ["getter_nofancy", "getter_inline"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getter_nofancy(a, b, asarray=True, lock=None):\n \"\"\"A simple wrapper around ``getter``.\n\n Used to indicate to the optimization passes that the backend doesn't\n support fancy indexing.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\ndef getter_inline(a, b, asarray=True, lock=None):\n \"\"\"A getter function that optimizations feel comfortable inlining\n\n Slicing operations with this function may be inlined into a graph, such as\n in the following rewrite\n\n **Before**\n\n >>> a = x[:10] # doctest: +SKIP\n >>> b = a + 1 # doctest: +SKIP\n >>> c = a * 2 # doctest: +SKIP\n\n **After**\n\n >>> b = x[:10] + 1 # doctest: +SKIP\n >>> c = x[:10] * 2 # doctest: +SKIP\n\n This inlining can be relevant to operations when running off of disk.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_round_to_from_array": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_round_to_from_array", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3016, "end_line": 3275, "span_ids": ["round_to", "from_array", "_get_chunk_shape"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def round_to(c, s):\n \"\"\"Return a chunk dimension that is close to an even multiple or factor\n\n We want values for c that are nicely aligned with s.\n\n If c is smaller than s then we want the largest factor of s that is less than the\n desired chunk size, but not less than half, which is too much. If no such\n factor exists then we just go with the original chunk size and accept an\n uneven chunk at the end.\n\n If c is larger than s then we want the largest multiple of s that is still\n smaller than c.\n \"\"\"\n if c <= s:\n try:\n return max(f for f in factors(s) if c / 2 <= f <= c)\n except ValueError: # no matching factors within factor of two\n return max(1, int(c))\n else:\n return c // s * s\n\n\ndef _get_chunk_shape(a):\n s = np.asarray(a.shape, dtype=int)\n return s[len(s) * (None,) + (slice(None),)]\n\n\ndef from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n inline_array=False,\n):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array._Create_dask_array_from_from_array._Create_dask_array_from": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from_array._Create_dask_array_from_from_array._Create_dask_array_from", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2999, "end_line": 3143, "span_ids": ["from_array"], "tokens": 1633}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n inline_array=False,\n):\n \"\"\"Create dask array from something that looks like an array.\n\n Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.\n\n Parameters\n ----------\n x : array_like\n chunks : int, tuple\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A blockshape like (1000, 1000).\n - Explicit sizes of all blocks along all dimensions like\n ((1000, 1000, 500), (400, 400)).\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n\n -1 or None as a blocksize indicate the size of the corresponding\n dimension.\n name : str or bool, optional\n The key name to use for the array. Defaults to a hash of ``x``.\n\n Hashing is useful if the same value of ``x`` is used to create multiple\n arrays, as Dask can then recognise that they're the same and\n avoid duplicate computations. However, it can also be slow, and if the\n array is not contiguous it is copied for hashing. If the array uses\n stride tricks (such as :func:`numpy.broadcast_to` or\n :func:`skimage.util.view_as_windows`) to have a larger logical\n than physical size, this copy can cause excessive memory usage.\n\n If you don't need the deduplication provided by hashing, use\n ``name=False`` to generate a random name instead of hashing, which\n avoids the pitfalls described above. Using ``name=True`` is\n equivalent to the default.\n\n By default, hashing uses python's standard sha1. This behaviour can be\n changed by installing cityhash, xxhash or murmurhash. If installed,\n a large-factor speedup can be obtained in the tokenisation step.\n\n .. note::\n\n Because this ``name`` is used as the key in task graphs, you should\n ensure that it uniquely identifies the data contained within. If\n you'd like to provide a descriptive name that is still unique, combine\n the descriptive name with :func:`dask.base.tokenize` of the\n ``array_like``. See :ref:`graphs` for more.\n\n lock : bool or Lock, optional\n If ``x`` doesn't support concurrent reads then provide a lock here, or\n pass in True to have dask.array create one for you.\n asarray : bool, optional\n If True then call np.asarray on chunks to convert them to numpy arrays.\n If False then chunks are passed through unchanged.\n If None (default) then we use True if the ``__array_function__`` method\n is undefined.\n fancy : bool, optional\n If ``x`` doesn't support fancy indexing (e.g. indexing with lists or\n arrays) then set to False. Default is True.\n meta : Array-like, optional\n The metadata for the resulting dask array. This is the kind of array\n that will result from slicing the input array.\n Defaults to the input array.\n inline_array : bool, default False\n How to include the array in the task graph. By default\n (``inline_array=False``) the array is included in a task by itself,\n and each chunk refers to that task by its key.\n\n .. code-block:: python\n\n >>> x = h5py.File(\"data.h5\")[\"/x\"] # doctest: +SKIP\n >>> a = da.from_array(x, chunks=500) # doctest: +SKIP\n >>> dict(a.dask) # doctest: +SKIP\n {\n 'array-original-': ,\n ('array-', 0): (getitem, \"array-original-\", ...),\n ('array-', 1): (getitem, \"array-original-\", ...)\n }\n\n With ``inline_array=True``, Dask will instead inline the array directly\n in the values of the task graph.\n\n .. code-block:: python\n\n >>> a = da.from_array(x, chunks=500, inline_array=True) # doctest: +SKIP\n >>> dict(a.dask) # doctest: +SKIP\n {\n ('array-', 0): (getitem, , ...),\n ('array-', 1): (getitem, , ...)\n }\n\n Note that there's no key in the task graph with just the array `x`\n anymore. Instead it's placed directly in the values.\n\n The right choice for ``inline_array`` depends on several factors,\n including the size of ``x``, how expensive it is to create, which\n scheduler you're using, and the pattern of downstream computations.\n As a heuristic, ``inline_array=True`` may be the right choice when\n the array ``x`` is cheap to serialize and deserialize (since it's\n included in the graph many times) and if you're experiencing ordering\n issues (see :ref:`order` for more).\n\n This has no effect when ``x`` is a NumPy array.\n\n Examples\n --------\n\n >>> x = h5py.File('...')['/data/path'] # doctest: +SKIP\n >>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP\n\n If your underlying datastore does not support concurrent reads then include\n the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple\n arrays to coordinate around the same lock.\n\n >>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP\n\n If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr\n datasets do) then a multiple of that chunk shape will be used if you\n do not provide a chunk shape.\n\n >>> a = da.from_array(x, chunks='auto') # doctest: +SKIP\n >>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP\n >>> a = da.from_array(x) # doctest: +SKIP\n\n If providing a name, ensure that it is unique\n\n >>> import dask.base\n >>> token = dask.base.tokenize(x) # doctest: +SKIP\n >>> a = da.from_array('myarray-' + token) # doctest: +SKIP\n\n NumPy ndarrays are eagerly sliced and then embedded in the graph.\n\n >>> import dask.array\n >>> a = dask.array.from_array(np.array([[1, 2], [3, 4]]), chunks=(1,1))\n >>> a.dask[a.name, 0, 0][0]\n array([1])\n\n Chunks with exactly-specified, different sizes can be created.\n\n >>> import numpy as np\n >>> import dask.array as da\n >>> x = np.random.random((100, 6))\n >>> a = da.from_array(x, chunks=((67, 33), (6,)))\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__bincount_agg_bincount.return.output": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__bincount_agg_bincount.return.output", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 713, "end_line": 766, "span_ids": ["_bincount_agg", "bincount"], "tokens": 417}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _bincount_agg(bincounts, dtype, **kwargs):\n if not isinstance(bincounts, list):\n return bincounts\n\n n = max(map(len, bincounts))\n out = np.zeros_like(bincounts[0], shape=n, dtype=dtype)\n for b in bincounts:\n out[: len(b)] += b\n return out\n\n\n@derived_from(np)\ndef bincount(x, weights=None, minlength=0, split_every=None):\n if x.ndim != 1:\n raise ValueError(\"Input array must be one dimensional. Try using x.ravel()\")\n if weights is not None:\n if weights.chunks != x.chunks:\n raise ValueError(\"Chunks of input array x and weights must match.\")\n\n token = tokenize(x, weights, minlength)\n args = [x, \"i\"]\n if weights is not None:\n meta = array_safe(np.bincount([1], weights=[1]), like=meta_from_array(x))\n args.extend([weights, \"i\"])\n else:\n meta = array_safe(np.bincount([]), like=meta_from_array(x))\n\n if minlength == 0:\n output_size = (np.nan,)\n else:\n output_size = (minlength,)\n\n chunked_counts = blockwise(\n partial(np.bincount, minlength=minlength), \"i\", *args, token=token, meta=meta\n )\n chunked_counts._chunks = (\n output_size * len(chunked_counts.chunks[0]),\n *chunked_counts.chunks[1:],\n )\n\n from .reductions import _tree_reduce\n\n output = _tree_reduce(\n chunked_counts,\n aggregate=partial(_bincount_agg, dtype=meta.dtype),\n axis=(0,),\n keepdims=True,\n dtype=meta.dtype,\n split_every=split_every,\n concatenate=False,\n )\n output._chunks = (output_size, *chunked_counts.chunks[1:])\n output._meta = meta\n return output", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_aligned_coarsen_chunks_aligned_coarsen_chunks.return.tuple_new_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_aligned_coarsen_chunks_aligned_coarsen_chunks.return.tuple_new_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2236, "end_line": 2271, "span_ids": ["aligned_coarsen_chunks"], "tokens": 425}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def aligned_coarsen_chunks(chunks: list[int], multiple: int) -> tuple[int, ...]:\n \"\"\"\n Returns a new chunking aligned with the coarsening multiple.\n Any excess is at the end of the array.\n\n Examples\n --------\n >>> aligned_coarsen_chunks(chunks=(1, 2, 3), multiple=4)\n (4, 2)\n >>> aligned_coarsen_chunks(chunks=(1, 20, 3, 4), multiple=4)\n (4, 20, 4)\n >>> aligned_coarsen_chunks(chunks=(20, 10, 15, 23, 24), multiple=10)\n (20, 10, 20, 20, 20, 2)\n \"\"\"\n overflow = np.array(chunks) % multiple\n excess = overflow.sum()\n new_chunks = np.array(chunks) - overflow\n # valid chunks are those that are already factorizable by `multiple`\n chunk_validity = new_chunks == chunks\n valid_inds, invalid_inds = np.where(chunk_validity)[0], np.where(~chunk_validity)[0]\n # sort the invalid chunks by size (ascending), then concatenate the results of\n # sorting the valid chunks by size (ascending)\n chunk_modification_order = [\n *invalid_inds[np.argsort(new_chunks[invalid_inds])],\n *valid_inds[np.argsort(new_chunks[valid_inds])],\n ]\n partitioned_excess, remainder = _partition(excess, multiple)\n # add elements the partitioned excess to the smallest invalid chunks,\n # then smallest valid chunks if needed.\n for idx, extra in enumerate(partitioned_excess):\n new_chunks[chunk_modification_order[idx]] += extra\n # create excess chunk with remainder, if any remainder exists\n new_chunks = np.array([*new_chunks, *remainder])\n # remove 0-sized chunks\n new_chunks = new_chunks[new_chunks > 0]\n return tuple(new_chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_delete_append.return.concatenate_arr_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_delete_append.return.concatenate_arr_values_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1607, "end_line": 1647, "span_ids": ["append", "delete"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef delete(arr, obj, axis):\n \"\"\"\n NOTE: If ``obj`` is a dask array it is implicitly computed when this function\n is called.\n \"\"\"\n # axis is a required argument here to avoid needing to deal with the numpy\n # default case (which reshapes the array to make it flat)\n axis = validate_axis(axis, arr.ndim)\n\n if isinstance(obj, slice):\n tmp = np.arange(*obj.indices(arr.shape[axis]))\n obj = tmp[::-1] if obj.step and obj.step < 0 else tmp\n else:\n obj = np.asarray(obj)\n obj = np.where(obj < 0, obj + arr.shape[axis], obj)\n obj = np.unique(obj)\n\n target_arr = split_at_breaks(arr, obj, axis)\n\n target_arr = [\n arr[\n tuple(slice(1, None) if axis == n else slice(None) for n in range(arr.ndim))\n ]\n if i != 0\n else arr\n for i, arr in enumerate(target_arr)\n ]\n return concatenate(target_arr, axis=axis)\n\n\n@derived_from(np)\ndef append(arr, values, axis=None):\n # based on numpy.append\n arr = asanyarray(arr)\n if axis is None:\n if arr.ndim != 1:\n arr = arr.ravel()\n values = ravel(asanyarray(values))\n axis = arr.ndim - 1\n return concatenate((arr, values), axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__average__average.if_returned_.else_.return.avg": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__average__average.if_returned_.else_.return.avg", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1650, "end_line": 1698, "span_ids": ["_average"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _average(a, axis=None, weights=None, returned=False, is_masked=False):\n # This was minimally modified from numpy.average\n a = asanyarray(a)\n\n if weights is None:\n avg = a.mean(axis)\n scl = avg.dtype.type(a.size / avg.size)\n else:\n wgt = asanyarray(weights)\n\n if issubclass(a.dtype.type, (np.integer, np.bool_)):\n result_dtype = result_type(a.dtype, wgt.dtype, \"f8\")\n else:\n result_dtype = result_type(a.dtype, wgt.dtype)\n\n # Sanity checks\n if a.shape != wgt.shape:\n if axis is None:\n raise TypeError(\n \"Axis must be specified when shapes of a and weights differ.\"\n )\n if wgt.ndim != 1:\n raise TypeError(\n \"1D weights expected when shapes of a and weights differ.\"\n )\n if wgt.shape[0] != a.shape[axis]:\n raise ValueError(\n \"Length of weights not compatible with specified axis.\"\n )\n\n # setup wgt to broadcast along axis\n wgt = broadcast_to(wgt, (a.ndim - 1) * (1,) + wgt.shape)\n wgt = wgt.swapaxes(-1, axis)\n if is_masked:\n from .ma import getmaskarray\n\n wgt = wgt * (~getmaskarray(a))\n scl = wgt.sum(axis=axis, dtype=result_dtype)\n avg = multiply(a, wgt, dtype=result_dtype).sum(axis) / scl\n\n if returned:\n if scl.shape != avg.shape:\n scl = broadcast_to(scl, avg.shape).copy()\n return avg, scl\n else:\n return avg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_average_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_average_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2448, "end_line": 2503, "span_ids": ["average", "triu_indices_from", "tril", "triu_indices", "tril_indices", "triu", "tril_indices_from"], "tokens": 427}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef average(a, axis=None, weights=None, returned=False):\n return _average(a, axis, weights, returned, is_masked=False)\n\n\n@derived_from(np)\ndef tril(m, k=0):\n m = asarray_safe(m, like=m)\n mask = tri(\n *m.shape[-2:],\n k=k,\n dtype=bool,\n chunks=m.chunks[-2:],\n like=meta_from_array(m) if _numpy_120 else None,\n )\n\n return where(mask, m, np.zeros_like(m, shape=(1,)))\n\n\n@derived_from(np)\ndef triu(m, k=0):\n m = asarray_safe(m, like=m)\n mask = tri(\n *m.shape[-2:],\n k=k - 1,\n dtype=bool,\n chunks=m.chunks[-2:],\n like=meta_from_array(m) if _numpy_120 else None,\n )\n\n return where(mask, np.zeros_like(m, shape=(1,)), m)\n\n\n@derived_from(np)\ndef tril_indices(n, k=0, m=None, chunks=\"auto\"):\n return nonzero(tri(n, m, k=k, dtype=bool, chunks=chunks))\n\n\n@derived_from(np)\ndef tril_indices_from(arr, k=0):\n if arr.ndim != 2:\n raise ValueError(\"input array must be 2-d\")\n return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)\n\n\n@derived_from(np)\ndef triu_indices(n, k=0, m=None, chunks=\"auto\"):\n return nonzero(~tri(n, m, k=k - 1, dtype=bool, chunks=chunks))\n\n\n@derived_from(np)\ndef triu_indices_from(arr, k=0):\n if arr.ndim != 2:\n raise ValueError(\"input array must be 2-d\")\n return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_test_chunked_transpose_plus_one.assert_eq_concatenate3_ou": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_top_test_chunked_transpose_plus_one.assert_eq_concatenate3_ou", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 89, "end_line": 244, "span_ids": ["test_blockwise_1_in_shape_I", "test_top_supports_broadcasting_rules", "test_blockwise_1_in_shape_II", "test_concatenate3_on_scalars", "test_chunked_transpose_plus_one", "test_top", "test_top_literals", "test_blockwise_1_in_shape_III", "test_blockwise_literals", "test_top_with_kwargs", "test_chunked_dot_product"], "tokens": 1953}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_top():\n assert top(inc, \"z\", \"ij\", \"x\", \"ij\", numblocks={\"x\": (2, 2)}) == {\n (\"z\", 0, 0): (inc, (\"x\", 0, 0)),\n (\"z\", 0, 1): (inc, (\"x\", 0, 1)),\n (\"z\", 1, 0): (inc, (\"x\", 1, 0)),\n (\"z\", 1, 1): (inc, (\"x\", 1, 1)),\n }\n\n assert top(\n add, \"z\", \"ij\", \"x\", \"ij\", \"y\", \"ij\", numblocks={\"x\": (2, 2), \"y\": (2, 2)}\n ) == {\n (\"z\", 0, 0): (add, (\"x\", 0, 0), (\"y\", 0, 0)),\n (\"z\", 0, 1): (add, (\"x\", 0, 1), (\"y\", 0, 1)),\n (\"z\", 1, 0): (add, (\"x\", 1, 0), (\"y\", 1, 0)),\n (\"z\", 1, 1): (add, (\"x\", 1, 1), (\"y\", 1, 1)),\n }\n\n assert top(\n dotmany, \"z\", \"ik\", \"x\", \"ij\", \"y\", \"jk\", numblocks={\"x\": (2, 2), \"y\": (2, 2)}\n ) == {\n (\"z\", 0, 0): (dotmany, [(\"x\", 0, 0), (\"x\", 0, 1)], [(\"y\", 0, 0), (\"y\", 1, 0)]),\n (\"z\", 0, 1): (dotmany, [(\"x\", 0, 0), (\"x\", 0, 1)], [(\"y\", 0, 1), (\"y\", 1, 1)]),\n (\"z\", 1, 0): (dotmany, [(\"x\", 1, 0), (\"x\", 1, 1)], [(\"y\", 0, 0), (\"y\", 1, 0)]),\n (\"z\", 1, 1): (dotmany, [(\"x\", 1, 0), (\"x\", 1, 1)], [(\"y\", 0, 1), (\"y\", 1, 1)]),\n }\n\n assert top(identity, \"z\", \"\", \"x\", \"ij\", numblocks={\"x\": (2, 2)}) == {\n (\"z\",): (identity, [[(\"x\", 0, 0), (\"x\", 0, 1)], [(\"x\", 1, 0), (\"x\", 1, 1)]])\n }\n\n\ndef test_top_with_kwargs():\n assert top(add, \"z\", \"i\", \"x\", \"i\", numblocks={\"x\": (2, 0)}, b=100) == {\n (\"z\", 0): (apply, add, [(\"x\", 0)], {\"b\": 100}),\n (\"z\", 1): (apply, add, [(\"x\", 1)], {\"b\": 100}),\n }\n\n\ndef test_top_supports_broadcasting_rules():\n assert top(\n add, \"z\", \"ij\", \"x\", \"ij\", \"y\", \"ij\", numblocks={\"x\": (1, 2), \"y\": (2, 1)}\n ) == {\n (\"z\", 0, 0): (add, (\"x\", 0, 0), (\"y\", 0, 0)),\n (\"z\", 0, 1): (add, (\"x\", 0, 1), (\"y\", 0, 0)),\n (\"z\", 1, 0): (add, (\"x\", 0, 0), (\"y\", 1, 0)),\n (\"z\", 1, 1): (add, (\"x\", 0, 1), (\"y\", 1, 0)),\n }\n\n\ndef test_top_literals():\n assert top(add, \"z\", \"ij\", \"x\", \"ij\", 123, None, numblocks={\"x\": (2, 2)}) == {\n (\"z\", 0, 0): (add, (\"x\", 0, 0), 123),\n (\"z\", 0, 1): (add, (\"x\", 0, 1), 123),\n (\"z\", 1, 0): (add, (\"x\", 1, 0), 123),\n (\"z\", 1, 1): (add, (\"x\", 1, 1), 123),\n }\n\n\ndef test_blockwise_literals():\n x = da.ones((10, 10), chunks=(5, 5))\n z = da.blockwise(add, \"ij\", x, \"ij\", 100, None, dtype=x.dtype)\n assert_eq(z, x + 100)\n\n z = da.blockwise(\n lambda x, y, z: x * y + z, \"ij\", 2, None, x, \"ij\", 100, None, dtype=x.dtype\n )\n assert_eq(z, 2 * x + 100)\n\n z = da.blockwise(getitem, \"ij\", x, \"ij\", slice(None), None, dtype=x.dtype)\n assert_eq(z, x)\n\n\ndef test_blockwise_1_in_shape_I():\n def test_f(a, b):\n assert 1 in b.shape\n\n p, k, N = 7, 2, 5\n da.blockwise(\n test_f,\n \"x\",\n da.zeros((2 * p, 9, k * N), chunks=(p, 3, k)),\n \"xzt\",\n da.zeros((2 * p, 9, 1), chunks=(p, 3, -1)),\n \"xzt\",\n concatenate=True,\n dtype=float,\n ).compute()\n\n\ndef test_blockwise_1_in_shape_II():\n def test_f(a, b):\n assert 1 in b.shape\n\n p, k, N = 7, 2, 5\n da.blockwise(\n test_f,\n \"x\",\n da.zeros((2 * p, 9, k * N, 8), chunks=(p, 9, k, 4)),\n \"xztu\",\n da.zeros((2 * p, 9, 1, 8), chunks=(p, 9, -1, 4)),\n \"xztu\",\n concatenate=True,\n dtype=float,\n ).compute()\n\n\ndef test_blockwise_1_in_shape_III():\n def test_f(a, b):\n assert 1 in b.shape\n\n k, N = 2, 5\n da.blockwise(\n test_f,\n \"x\",\n da.zeros((k * N, 9, 8), chunks=(k, 3, 4)),\n \"xtu\",\n da.zeros((1, 9, 8), chunks=(-1, 3, 4)),\n \"xtu\",\n concatenate=True,\n dtype=float,\n ).compute()\n\n\ndef test_concatenate3_on_scalars():\n assert_eq(concatenate3([1, 2]), np.array([1, 2]))\n\n\ndef test_chunked_dot_product():\n x = np.arange(400).reshape((20, 20))\n o = np.ones((20, 20))\n\n getx = graph_from_arraylike(x, (5, 5), shape=(20, 20), name=\"x\")\n geto = graph_from_arraylike(o, (5, 5), shape=(20, 20), name=\"o\")\n\n result = top(\n dotmany, \"out\", \"ik\", \"x\", \"ij\", \"o\", \"jk\", numblocks={\"x\": (4, 4), \"o\": (4, 4)}\n )\n\n dsk = merge(getx, geto, result)\n out = dask.get(dsk, [[(\"out\", i, j) for j in range(4)] for i in range(4)])\n\n assert_eq(np.dot(x, o), concatenate3(out))\n\n\ndef test_chunked_transpose_plus_one():\n x = np.arange(400).reshape((20, 20))\n\n getx = graph_from_arraylike(x, (5, 5), shape=(20, 20), name=\"x\")\n\n f = lambda x: x.T + 1\n comp = top(f, \"out\", \"ij\", \"x\", \"ji\", numblocks={\"x\": (4, 4)})\n\n dsk = merge(getx, comp)\n out = dask.get(dsk, [[(\"out\", i, j) for j in range(4)] for i in range(4)])\n\n assert_eq(concatenate3(out), x.T + 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_dimensions_works_with_singleton_dimensions_test_stack_rechunk.assert_eq_z_np_stack_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_broadcast_dimensions_works_with_singleton_dimensions_test_stack_rechunk.assert_eq_z_np_stack_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 430, "span_ids": ["test_Array", "test_broadcast_dimensions_works_with_singleton_dimensions", "test_Array_computation", "test_stack", "test_Array_numpy_gufunc_call__array_ufunc__01", "test_stack_zero_size", "test_short_stack", "test_stack_rechunk", "test_stack_scalars", "test_broadcast_dimensions", "test_keys", "test_uneven_chunks", "test_stack_promote_type", "test_Array_numpy_gufunc_call__array_ufunc__02", "test_numblocks_suppoorts_singleton_block_dims"], "tokens": 2075}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_broadcast_dimensions_works_with_singleton_dimensions():\n argpairs = [(\"x\", \"i\")]\n numblocks = {\"x\": ((1,),)}\n assert broadcast_dimensions(argpairs, numblocks) == {\"i\": (1,)}\n\n\ndef test_broadcast_dimensions():\n argpairs = [(\"x\", \"ij\"), (\"y\", \"ij\")]\n d = {\"x\": (\"Hello\", 1), \"y\": (1, (2, 3))}\n assert broadcast_dimensions(argpairs, d) == {\"i\": \"Hello\", \"j\": (2, 3)}\n\n\ndef test_Array():\n arr = object() # arraylike is unimportant since we never compute\n shape = (1000, 1000)\n chunks = (100, 100)\n name = \"x\"\n dsk = graph_from_arraylike(arr, chunks, shape, name)\n a = Array(dsk, name, chunks, shape=shape, dtype=\"f8\")\n\n assert a.numblocks == (10, 10)\n\n assert a.__dask_keys__() == [[(\"x\", i, j) for j in range(10)] for i in range(10)]\n\n assert a.chunks == ((100,) * 10, (100,) * 10)\n\n assert a.shape == shape\n\n assert len(a) == shape[0]\n\n with pytest.raises(ValueError):\n Array(dsk, name, chunks, shape=shape)\n with pytest.raises(TypeError):\n Array(dsk, name, chunks, shape=shape, dtype=\"f8\", meta=np.empty(0, 0))\n\n\ndef test_uneven_chunks():\n a = Array({}, \"x\", chunks=(3, 3), shape=(10, 10), dtype=\"f8\")\n assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))\n\n\ndef test_numblocks_suppoorts_singleton_block_dims():\n arr = object() # arraylike is unimportant since we never compute\n shape = (100, 10)\n chunks = (10, 10)\n name = \"x\"\n dsk = graph_from_arraylike(arr, chunks, shape, name)\n a = Array(dsk, name, chunks, shape=shape, dtype=\"f8\")\n\n assert set(concat(a.__dask_keys__())) == {(\"x\", i, 0) for i in range(10)}\n\n\ndef test_keys():\n dsk = {(\"x\", i, j): () for i in range(5) for j in range(6)}\n dx = Array(dsk, \"x\", chunks=(10, 10), shape=(50, 60), dtype=\"f8\")\n assert dx.__dask_keys__() == [[(dx.name, i, j) for j in range(6)] for i in range(5)]\n # Cache works\n assert dx.__dask_keys__() is dx.__dask_keys__()\n # Test mutating names clears key cache\n dx.dask = {(\"y\", i, j): () for i in range(5) for j in range(6)}\n dx._name = \"y\"\n new_keys = [[(dx.name, i, j) for j in range(6)] for i in range(5)]\n assert dx.__dask_keys__() == new_keys\n assert np.array_equal(dx._key_array, np.array(new_keys, dtype=\"object\"))\n d = Array({}, \"x\", (), shape=(), dtype=\"f8\")\n assert d.__dask_keys__() == [(\"x\",)]\n\n\ndef test_Array_computation():\n a = Array({(\"x\", 0, 0): np.eye(3)}, \"x\", shape=(3, 3), chunks=(3, 3), dtype=\"f8\")\n assert_eq(np.array(a), np.eye(3))\n assert isinstance(a.compute(), np.ndarray)\n assert float(a[0, 0]) == 1\n\n\ndef test_Array_numpy_gufunc_call__array_ufunc__01():\n x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))\n nx = x.compute()\n ny = np.linalg._umath_linalg.inv(nx)\n y = np.linalg._umath_linalg.inv(x)\n assert_eq(ny, y)\n\n\ndef test_Array_numpy_gufunc_call__array_ufunc__02():\n x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))\n nx = x.compute()\n nw, nv = np.linalg._umath_linalg.eig(nx)\n w, v = np.linalg._umath_linalg.eig(x)\n assert_eq(nw, w)\n assert_eq(nv, v)\n\n\ndef test_stack():\n a, b, c = (\n Array(\n graph_from_arraylike(object(), chunks=(2, 3), shape=(4, 6), name=name),\n name,\n chunks=(2, 3),\n dtype=\"f8\",\n shape=(4, 6),\n )\n for name in \"ABC\"\n )\n\n s = stack([a, b, c], axis=0)\n\n colon = slice(None, None, None)\n\n assert s.shape == (3, 4, 6)\n assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))\n assert s.chunksize == (1, 2, 3)\n assert s.dask[(s.name, 0, 1, 0)] == (getitem, (\"A\", 1, 0), (None, colon, colon))\n assert s.dask[(s.name, 2, 1, 0)] == (getitem, (\"C\", 1, 0), (None, colon, colon))\n assert same_keys(s, stack([a, b, c], axis=0))\n\n s2 = stack([a, b, c], axis=1)\n assert s2.shape == (4, 3, 6)\n assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))\n assert s2.chunksize == (2, 1, 3)\n assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, (\"B\", 0, 0), (colon, None, colon))\n assert s2.dask[(s2.name, 1, 1, 0)] == (getitem, (\"B\", 1, 0), (colon, None, colon))\n assert same_keys(s2, stack([a, b, c], axis=1))\n\n s2 = stack([a, b, c], axis=2)\n assert s2.shape == (4, 6, 3)\n assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))\n assert s2.chunksize == (2, 3, 1)\n assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, (\"A\", 0, 1), (colon, colon, None))\n assert s2.dask[(s2.name, 1, 1, 2)] == (getitem, (\"C\", 1, 1), (colon, colon, None))\n assert same_keys(s2, stack([a, b, c], axis=2))\n\n pytest.raises(ValueError, lambda: stack([]))\n pytest.raises(ValueError, lambda: stack([a, b, c], axis=3))\n\n assert set(b.dask.keys()).issubset(s2.dask.keys())\n\n assert stack([a, b, c], axis=-1).chunks == stack([a, b, c], axis=2).chunks\n\n\ndef test_stack_zero_size():\n x = np.empty((2, 0, 3))\n y = da.from_array(x, chunks=1)\n\n result_np = np.concatenate([x, x])\n result_da = da.concatenate([y, y])\n\n assert_eq(result_np, result_da)\n\n\ndef test_short_stack():\n x = np.array([1])\n d = da.from_array(x, chunks=(1,))\n s = da.stack([d])\n assert s.shape == (1, 1)\n chunks = compute_as_if_collection(Array, s.dask, s.__dask_keys__())\n assert chunks[0][0].shape == (1, 1)\n\n\ndef test_stack_scalars():\n d = da.arange(4, chunks=2)\n\n s = da.stack([d.mean(), d.sum()])\n\n assert s.compute().tolist() == [np.arange(4).mean(), np.arange(4).sum()]\n\n\ndef test_stack_promote_type():\n i = np.arange(10, dtype=\"i4\")\n f = np.arange(10, dtype=\"f4\")\n di = da.from_array(i, chunks=5)\n df = da.from_array(f, chunks=5)\n res = da.stack([di, df])\n assert_eq(res, np.stack([i, f]))\n\n\ndef test_stack_rechunk():\n x = da.random.random(10, chunks=5)\n y = da.random.random(10, chunks=4)\n\n z = da.stack([x, y], axis=0)\n assert z.shape == (2, 10)\n assert z.chunks == ((1, 1), (4, 1, 3, 2))\n\n assert_eq(z, np.stack([x.compute(), y.compute()], axis=0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tri_test_tri.assert_eq_da_tri_N_M_k_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tri_test_tri.assert_eq_da_tri_N_M_k_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 368, "end_line": 382, "span_ids": ["test_tri"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"N, M, k, dtype, chunks\",\n [\n (3, None, 0, float, \"auto\"),\n (4, None, 0, float, \"auto\"),\n (3, 4, 0, bool, \"auto\"),\n (3, None, 1, int, \"auto\"),\n (3, None, -1, int, \"auto\"),\n (3, None, 2, int, 1),\n (6, 8, -2, int, (3, 4)),\n (6, 8, 0, int, (3, \"auto\")),\n ],\n)\ndef test_tri(N, M, k, dtype, chunks):\n assert_eq(da.tri(N, M, k, dtype, chunks), np.tri(N, M, k, dtype))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_test_ravel.assert_eq_np_ravel_x_da": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_test_ravel.assert_eq_np_ravel_x_da", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 960, "end_line": 983, "span_ids": ["test_ravel"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ravel():\n x = np.random.randint(10, size=(4, 6))\n\n # 2d\n for chunks in [(4, 6), (2, 6)]:\n a = da.from_array(x, chunks=chunks)\n assert_eq(x.ravel(), a.ravel())\n assert len(a.ravel().dask) == len(a.dask) + len(a.chunks[0])\n\n # 0d\n assert_eq(x[0, 0].ravel(), a[0, 0].ravel())\n\n # 1d\n a_flat = a.ravel()\n assert_eq(a_flat.ravel(), a_flat)\n\n # 3d\n x = np.random.randint(10, size=(2, 3, 4))\n for chunks in [4, (1, 3, 4)]:\n a = da.from_array(x, chunks=chunks)\n assert_eq(x.ravel(), a.ravel())\n\n assert_eq(x.flatten(), a.flatten())\n assert_eq(np.ravel(x), da.ravel(a))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_1D_no_op_test_ravel_with_array_like.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_1D_no_op_test_ravel_with_array_like.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 986, "end_line": 1010, "span_ids": ["test_ravel_with_array_like", "test_ravel_1D_no_op"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ravel_1D_no_op():\n x = np.random.randint(10, size=100)\n dx = da.from_array(x, chunks=10)\n # known dims\n assert_eq(dx.ravel(), x.ravel())\n # Unknown dims\n assert_eq(dx[dx > 2].ravel(), x[x > 2].ravel())\n\n\ndef test_ravel_with_array_like():\n # int\n assert_eq(np.ravel(0), da.ravel(0))\n assert isinstance(da.ravel(0), da.core.Array)\n\n # list\n assert_eq(np.ravel([0, 0]), da.ravel([0, 0]))\n assert isinstance(da.ravel([0, 0]), da.core.Array)\n\n # tuple\n assert_eq(np.ravel((0, 0)), da.ravel((0, 0)))\n assert isinstance(da.ravel((0, 0)), da.core.Array)\n\n # nested i.e. tuples in list\n assert_eq(np.ravel([(0,), (0,)]), da.ravel([(0,), (0,)]))\n assert isinstance(da.ravel([(0,), (0,)]), da.core.Array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_with_excess_test_coarsen_with_excess.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_with_excess_test_coarsen_with_excess.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1567, "end_line": 1573, "span_ids": ["test_coarsen_with_excess"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_coarsen_with_excess():\n x = da.arange(10, chunks=5)\n assert_eq(da.coarsen(np.min, x, {0: 5}, trim_excess=True), np.array([0, 5]))\n assert_eq(\n da.coarsen(np.sum, x, {0: 3}, trim_excess=True),\n np.array([0 + 1 + 2, 3 + 4 + 5, 6 + 7 + 8]),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_bad_chunks_test_coarsen_bad_chunks.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_coarsen_bad_chunks_test_coarsen_bad_chunks.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1576, "end_line": 1583, "span_ids": ["test_coarsen_bad_chunks"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [(x,) * 3 for x in range(16, 32)])\ndef test_coarsen_bad_chunks(chunks):\n x1 = da.arange(np.sum(chunks), chunks=5)\n x2 = x1.rechunk(tuple(chunks))\n assert_eq(\n da.coarsen(np.sum, x1, {0: 10}, trim_excess=True),\n da.coarsen(np.sum, x2, {0: 10}, trim_excess=True),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_aligned_coarsen_chunks_test_aligned_coarsen_chunks.if_any_remainders_sum_.assert_any_remainders_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_aligned_coarsen_chunks_test_aligned_coarsen_chunks.if_any_remainders_sum_.assert_any_remainders_1_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1586, "end_line": 1623, "span_ids": ["test_aligned_coarsen_chunks"], "tokens": 436}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"chunks, divisor\",\n [\n ((1, 1), 1),\n ((1, 1), 2),\n ((1, 1, 1), 2),\n ((10, 1), 10),\n ((20, 10, 15, 23, 24), 10),\n ((20, 10, 15, 23, 24), 8),\n ((10, 20, 30, 40, 2), 10),\n ((20, 10, 15, 42, 23, 24), 16),\n ((20, 10, 15, 47, 23, 24), 10),\n ((2, 10, 15, 47, 23, 24), 4),\n ],\n)\ndef test_aligned_coarsen_chunks(chunks, divisor):\n\n from ..routines import aligned_coarsen_chunks as acc\n\n aligned_chunks = acc(chunks, divisor)\n any_remainders = (np.array(aligned_chunks) % divisor) != 0\n valid_chunks = np.where((np.array(chunks) % divisor) == 0)[0]\n\n # check that total number of elements is conserved\n assert sum(aligned_chunks) == sum(chunks)\n # check that valid chunks are not modified\n assert [chunks[idx] for idx in valid_chunks] == [\n aligned_chunks[idx] for idx in valid_chunks\n ]\n # check that no chunks are 0\n assert (np.array(aligned_chunks) > 0).all()\n # check that at most one chunk was added\n assert len(aligned_chunks) <= len(chunks) + 1\n # check that either 0 or 1 chunks are not divisible by divisor\n assert any_remainders.sum() in (0, 1)\n # check that the only indivisible chunk is the last\n if any_remainders.sum() == 1:\n assert any_remainders[-1] == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_append_test_append.with_pytest_raises_ValueE.da_append_a_0_10_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_append_test_append.with_pytest_raises_ValueE.da_append_a_0_10_a", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 2183, "end_line": 2228, "span_ids": ["test_append"], "tokens": 558}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append():\n x = np.random.randint(10, size=(10, 10))\n a = da.from_array(x, chunks=(5, 5))\n\n # appendage for axis 1 / -1\n y1 = np.random.randint(10, size=(10, 5))\n b1 = da.from_array(y1, chunks=(4, 4))\n\n # appendage for axis 0 / -2\n y0 = np.random.randint(10, size=(5, 10))\n b0 = da.from_array(y0, chunks=(4, 4))\n\n # test axis None\n assert_eq(np.append(x, x, axis=None), da.append(a, a, axis=None))\n assert_eq(np.append(x, y0, axis=None), da.append(a, b0, axis=None))\n assert_eq(np.append(x, y1, axis=None), da.append(a, b1, axis=None))\n\n # test axis 0 / -2\n assert_eq(np.append(x, y0, axis=0), da.append(a, b0, axis=0))\n assert_eq(np.append(x, y0, axis=-2), da.append(a, b0, axis=-2))\n\n # test axis 1 / -1\n assert_eq(np.append(x, y1, axis=1), da.append(a, b1, axis=1))\n assert_eq(np.append(x, y1, axis=-1), da.append(a, b1, axis=-1))\n\n # test --> treat values as array_likes\n assert_eq(\n np.append(x, ((0,) * 10,) * 10, axis=None),\n da.append(a, ((0,) * 10,) * 10, axis=None),\n )\n assert_eq(\n np.append(x, ((0,) * 10,) * 10, axis=0), da.append(a, ((0,) * 10,) * 10, axis=0)\n )\n assert_eq(\n np.append(x, ((0,) * 10,) * 10, axis=1), da.append(a, ((0,) * 10,) * 10, axis=1)\n )\n\n # check AxisError\n with pytest.raises(np.AxisError):\n da.append(a, ((0,) * 10,) * 10, axis=2)\n with pytest.raises(np.AxisError):\n da.append(a, ((0,) * 10,) * 10, axis=-3)\n\n # check ValueError if dimensions don't align\n with pytest.raises(ValueError):\n da.append(a, (0,) * 10, axis=0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_multi_insert_test_delete.None_1.da_delete_a_3_axis_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_multi_insert_test_delete.None_1.da_delete_a_3_axis_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 2231, "end_line": 2264, "span_ids": ["test_delete", "test_multi_insert"], "tokens": 469}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_multi_insert():\n z = np.random.randint(10, size=(1, 2))\n c = da.from_array(z, chunks=(1, 2))\n assert_eq(\n np.insert(np.insert(z, [0, 1], -1, axis=0), [1], -1, axis=1),\n da.insert(da.insert(c, [0, 1], -1, axis=0), [1], -1, axis=1),\n )\n\n\ndef test_delete():\n x = np.random.randint(10, size=(10, 10))\n a = da.from_array(x, chunks=(5, 5))\n\n assert_eq(np.delete(x, 0, axis=0), da.delete(a, 0, axis=0))\n assert_eq(np.delete(x, 3, axis=-1), da.delete(a, 3, axis=-1))\n assert_eq(np.delete(x, 5, axis=1), da.delete(a, 5, axis=1))\n assert_eq(np.delete(x, -1, axis=-2), da.delete(a, -1, axis=-2))\n assert_eq(np.delete(x, [2, 3, 3], axis=1), da.delete(a, [2, 3, 3], axis=1))\n assert_eq(\n np.delete(x, [2, 3, 8, 8], axis=0),\n da.delete(a, [2, 3, 8, 8], axis=0),\n )\n assert_eq(np.delete(x, slice(1, 4), axis=1), da.delete(a, slice(1, 4), axis=1))\n assert_eq(\n np.delete(x, slice(1, 10, -1), axis=1), da.delete(a, slice(1, 10, -1), axis=1)\n )\n\n assert_eq(np.delete(a, [4, 2], axis=0), da.delete(a, [4, 2], axis=0))\n\n with pytest.raises(np.AxisError):\n da.delete(a, [3], axis=2)\n\n with pytest.raises(np.AxisError):\n da.delete(a, [3], axis=-3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_result_type_test_result_type.None_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_result_type_test_result_type.None_10", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1749, "end_line": 1767, "span_ids": ["test_result_type"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_result_type():\n a = da.from_array(np.ones(5, np.float32), chunks=(3,))\n b = da.from_array(np.ones(5, np.int16), chunks=(3,))\n c = da.from_array(np.ones(5, np.int64), chunks=(3,))\n x = np.ones(5, np.float32)\n assert da.result_type(b, c) == np.int64\n assert da.result_type(a, b, c) == np.float64\n assert da.result_type(b, np.float32) == np.float32\n assert da.result_type(b, np.dtype(np.float32)) == np.float32\n assert da.result_type(b, x) == np.float32\n # Effect of scalars depends on their value\n assert da.result_type(1, b) == np.int16\n assert da.result_type(1.0, a) == np.float32\n assert da.result_type(np.int64(1), b) == np.int16\n assert da.result_type(np.ones((), np.int64), b) == np.int16 # 0d array\n assert da.result_type(1e200, a) == np.float64 # 1e200 is too big for float32\n # dask 0d-arrays are NOT treated like scalars\n c = da.from_array(np.ones((), np.float64), chunks=())\n assert da.result_type(a, c) == np.float64", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_average_test_iscomplexobj.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_average_test_iscomplexobj.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1978, "end_line": 2017, "span_ids": ["test_average", "test_average_raises", "test_average_weights", "test_iscomplexobj"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"a\", [np.arange(11), np.arange(6).reshape((3, 2))])\n@pytest.mark.parametrize(\"returned\", [True, False])\ndef test_average(a, returned):\n d_a = da.from_array(a, chunks=2)\n\n np_avg = np.average(a, returned=returned)\n da_avg = da.average(d_a, returned=returned)\n\n assert_eq(np_avg, da_avg)\n\n\ndef test_average_weights():\n a = np.arange(6).reshape((3, 2))\n d_a = da.from_array(a, chunks=2)\n\n weights = np.array([0.25, 0.75])\n d_weights = da.from_array(weights, chunks=2)\n\n np_avg = np.average(a, weights=weights, axis=1)\n da_avg = da.average(d_a, weights=d_weights, axis=1)\n\n assert_eq(np_avg, da_avg)\n\n\ndef test_average_raises():\n d_a = da.arange(11, chunks=2)\n\n with pytest.raises(TypeError):\n da.average(d_a, weights=[1, 2, 3])\n\n with pytest.warns(RuntimeWarning):\n da.average(d_a, weights=da.zeros_like(d_a)).compute()\n\n\ndef test_iscomplexobj():\n a = da.from_array(np.array([1, 2]), 2)\n assert np.iscomplexobj(a) is False\n\n a = da.from_array(np.array([1, 2 + 0j]), 2)\n assert np.iscomplexobj(a) is True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 2020, "end_line": 2053, "span_ids": ["test_tril_triu"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tril_triu():\n A = np.random.randn(20, 20)\n for chk in [5, 4]:\n dA = da.from_array(A, (chk, chk))\n\n assert np.allclose(da.triu(dA).compute(), np.triu(A))\n assert np.allclose(da.tril(dA).compute(), np.tril(A))\n\n for k in [\n -25,\n -20,\n -19,\n -15,\n -14,\n -9,\n -8,\n -6,\n -5,\n -1,\n 1,\n 4,\n 5,\n 6,\n 8,\n 10,\n 11,\n 15,\n 16,\n 19,\n 20,\n 21,\n ]:\n assert np.allclose(da.triu(dA, k).compute(), np.triu(A, k))\n assert np.allclose(da.tril(dA, k).compute(), np.tril(A, k))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tril_ndims_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tril_ndims_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 2056, "end_line": 2066, "span_ids": ["test_tril_triu_non_square_arrays", "test_tril_ndims"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tril_ndims():\n A = np.random.randint(0, 11, (10, 10, 10))\n dA = da.from_array(A, chunks=(5, 5, 5))\n assert_eq(da.triu(dA), np.triu(A))\n\n\ndef test_tril_triu_non_square_arrays():\n A = np.random.randint(0, 11, (30, 35))\n dA = da.from_array(A, chunks=(5, 5))\n assert_eq(da.triu(dA), np.triu(A))\n assert_eq(da.tril(dA), np.tril(A))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tril_triu_indices_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_tril_triu_indices_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 2587, "end_line": 2613, "span_ids": ["test_tril_triu_indices"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"n, k, m, chunks\",\n [(3, 0, 3, \"auto\"), (3, 1, 3, \"auto\"), (3, -1, 3, \"auto\"), (5, 0, 5, 1)],\n)\ndef test_tril_triu_indices(n, k, m, chunks):\n actual = da.tril_indices(n=n, k=k, m=m, chunks=chunks)[0]\n expected = np.tril_indices(n=n, k=k, m=m)[0]\n\n if sys.platform == \"win32\":\n assert_eq(\n actual.astype(expected.dtype),\n expected,\n )\n else:\n assert_eq(actual, expected)\n\n actual = da.triu_indices(n=n, k=k, m=m, chunks=chunks)[0]\n expected = np.triu_indices(n=n, k=k, m=m)[0]\n\n if sys.platform == \"win32\":\n assert_eq(\n actual.astype(expected.dtype),\n expected,\n )\n else:\n assert_eq(actual, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__array_like_safe__array_like_safe.try_.except_TypeError_.return.np_func_a_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__array_like_safe__array_like_safe.try_.except_TypeError_.return.np_func_a_kwargs_", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 451, "end_line": 464, "span_ids": ["_array_like_safe"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _array_like_safe(np_func, da_func, a, like, **kwargs):\n if like is a and hasattr(a, \"__array_function__\"):\n return a\n\n if isinstance(like, Array):\n return da_func(a, **kwargs)\n elif isinstance(a, Array):\n if is_cupy_type(a._meta):\n a = a.compute(scheduler=\"sync\")\n\n try:\n return np_func(a, like=meta_from_array(like), **kwargs)\n except TypeError:\n return np_func(a, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_array_safe_array_safe.return._array_like_safe_np_array": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_array_safe_array_safe.return._array_like_safe_np_array", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 412, "end_line": 425, "span_ids": ["array_safe"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def array_safe(a, like, **kwargs):\n \"\"\"\n If `a` is `dask.array`, return `dask.array.asarray(a, **kwargs)`,\n otherwise return `np.asarray(a, like=like, **kwargs)`, dispatching\n the call to the library that implements the like array. Note that\n when `a` is a `dask.Array` backed by `cupy.ndarray` but `like`\n isn't, this function will call `a.compute(scheduler=\"sync\")`\n before `np.array`, as downstream libraries are unlikely to know how\n to convert a `dask.Array` and CuPy doesn't implement `__array__` to\n prevent implicit copies to host.\n \"\"\"\n from .routines import array\n\n return _array_like_safe(np.array, array, a, like, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_asarray_safe_asarray_safe.return._array_like_safe_np_asarr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_asarray_safe_asarray_safe.return._array_like_safe_np_asarr", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 428, "end_line": 439, "span_ids": ["asarray_safe"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def asarray_safe(a, like, **kwargs):\n \"\"\"\n If a is dask.array, return dask.array.asarray(a, **kwargs),\n otherwise return np.asarray(a, like=like, **kwargs), dispatching\n the call to the library that implements the like array. Note that\n when a is a dask.Array but like isn't, this function will call\n a.compute(scheduler=\"sync\") before np.asarray, as downstream\n libraries are unlikely to know how to convert a dask.Array.\n \"\"\"\n from .core import asarray\n\n return _array_like_safe(np.asarray, asarray, a, like, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_asanyarray_safe_asanyarray_safe.return._array_like_safe_np_asany": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_asanyarray_safe_asanyarray_safe.return._array_like_safe_np_asany", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 442, "end_line": 453, "span_ids": ["asanyarray_safe"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def asanyarray_safe(a, like, **kwargs):\n \"\"\"\n If a is dask.array, return dask.array.asanyarray(a, **kwargs),\n otherwise return np.asanyarray(a, like=like, **kwargs), dispatching\n the call to the library that implements the like array. Note that\n when a is a dask.Array but like isn't, this function will call\n a.compute(scheduler=\"sync\") before np.asanyarray, as downstream\n libraries are unlikely to know how to convert a dask.Array.\n \"\"\"\n from .core import asanyarray\n\n return _array_like_safe(np.asanyarray, asanyarray, a, like, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_releases_element_references_as_soon_as_possible_test_map_releases_element_references_as_soon_as_possible.try_.finally_.gc_enable_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_map_releases_element_references_as_soon_as_possible_test_map_releases_element_references_as_soon_as_possible.try_.finally_.gc_enable_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1508, "end_line": 1557, "span_ids": ["test_map_releases_element_references_as_soon_as_possible"], "tokens": 394}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_releases_element_references_as_soon_as_possible():\n # Ensure that Bag.map doesn't keep *element* references longer than\n # necessary. Previous map implementations used ``yield``, which would keep\n # a reference to the yielded element until the yielded method resumed (this\n # is just how generator functions work in CPython).\n #\n # See https://github.com/dask/dask/issues/5189\n #\n # We test 2 variant of potential extra references here:\n # 1. Within an element of a partition:\n # At the time of the second `f_create` for each element, the `C` from\n # the first `f_create` should be dropped.\n # 2. Within a partition:\n # When the second item within a partition is processed, `C` from the\n # first item should already be dropped.\n class C:\n def __init__(self, i):\n self.i = i\n\n # keep a weakref to all existing instances of `C`\n in_memory = weakref.WeakSet()\n\n def f_create(i):\n # check that there are no instances of `C` left\n assert len(in_memory) == 0\n\n # create new instance\n o = C(i)\n in_memory.add(o)\n\n return o\n\n def f_drop(o):\n # o reference dropped on return, should collect\n return o.i + 100\n\n b = (\n db.from_sequence(range(2), npartitions=1)\n .map(f_create)\n .map(f_drop)\n .map(f_create)\n .map(f_drop)\n .sum()\n )\n try:\n # Disable gc to ensure refcycles don't matter here\n gc.disable()\n b.compute(scheduler=\"sync\")\n finally:\n gc.enable()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bagged_array_delayed_test_dask_layers.assert_i_dask_dependencie": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bagged_array_delayed_test_dask_layers.assert_i_dask_dependencie", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1560, "end_line": 1577, "span_ids": ["test_dask_layers", "test_bagged_array_delayed"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bagged_array_delayed():\n da = pytest.importorskip(\"dask.array\")\n\n obj = da.ones(10, chunks=5).to_delayed()[0]\n bag = db.from_delayed(obj)\n b = bag.compute()\n assert_eq(b, [1.0, 1.0, 1.0, 1.0, 1.0])\n\n\ndef test_dask_layers():\n a = db.from_sequence([1, 2], npartitions=2)\n assert a.__dask_layers__() == (a.name,)\n assert a.dask.layers.keys() == {a.name}\n assert a.dask.dependencies == {a.name: set()}\n i = a.min()\n assert i.__dask_layers__() == (i.key,)\n assert i.dask.layers.keys() == {a.name, i.key}\n assert i.dask.dependencies == {a.name: set(), i.key: {a.name}}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_get_scheduler_get_scheduler.return.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_get_scheduler_get_scheduler.return.None", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1257, "end_line": 1336, "span_ids": ["get_scheduler"], "tokens": 619}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_scheduler(get=None, scheduler=None, collections=None, cls=None):\n \"\"\"Get scheduler function\n\n There are various ways to specify the scheduler to use:\n\n 1. Passing in scheduler= parameters\n 2. Passing these into global configuration\n 3. Using defaults of a dask collection\n\n This function centralizes the logic to determine the right scheduler to use\n from those many options\n \"\"\"\n if get:\n raise TypeError(get_err_msg)\n\n if scheduler is not None:\n if callable(scheduler):\n return scheduler\n elif \"Client\" in type(scheduler).__name__ and hasattr(scheduler, \"get\"):\n return scheduler.get\n elif isinstance(scheduler, str):\n scheduler = scheduler.lower()\n\n if scheduler in named_schedulers:\n if config.get(\"scheduler\", None) in (\"dask.distributed\", \"distributed\"):\n warnings.warn(\n \"Running on a single-machine scheduler when a distributed client \"\n \"is active might lead to unexpected results.\"\n )\n return named_schedulers[scheduler]\n elif scheduler in (\"dask.distributed\", \"distributed\"):\n from distributed.worker import get_client\n\n return get_client().get\n else:\n raise ValueError(\n \"Expected one of [distributed, %s]\"\n % \", \".join(sorted(named_schedulers))\n )\n elif isinstance(scheduler, Executor):\n # Get `num_workers` from `Executor`'s `_max_workers` attribute.\n # If undefined, fallback to `config` or worst case CPU_COUNT.\n num_workers = getattr(scheduler, \"_max_workers\", None)\n if num_workers is None:\n num_workers = config.get(\"num_workers\", CPU_COUNT)\n assert isinstance(num_workers, Integral) and num_workers > 0\n return partial(local.get_async, scheduler.submit, num_workers)\n else:\n raise ValueError(\"Unexpected scheduler: %s\" % repr(scheduler))\n # else: # try to connect to remote scheduler with this name\n # return get_client(scheduler).get\n\n if config.get(\"scheduler\", None):\n return get_scheduler(scheduler=config.get(\"scheduler\", None))\n\n if config.get(\"get\", None):\n raise ValueError(get_err_msg)\n\n if getattr(thread_state, \"key\", False):\n from distributed.worker import get_worker\n\n return get_worker().client.get\n\n if cls is not None:\n return cls.__dask_scheduler__\n\n if collections:\n collections = [c for c in collections if c is not None]\n if collections:\n get = collections[0].__dask_scheduler__\n if not all(c.__dask_scheduler__ == get for c in collections):\n raise ValueError(\n \"Compute called on multiple collections with \"\n \"differing default schedulers. Please specify a \"\n \"scheduler=` parameter explicitly in compute or \"\n \"globally with `dask.config.set`.\"\n )\n return get\n\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_wait_get_collection_names.return._get_name_from_key_k_for": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_wait_get_collection_names.return._get_name_from_key_k_for", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1339, "end_line": 1372, "span_ids": ["wait", "get_collection_names"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wait(x, timeout=None, return_when=\"ALL_COMPLETED\"):\n \"\"\"Wait until computation has finished\n\n This is a compatibility alias for ``dask.distributed.wait``.\n If it is applied onto Dask collections without Dask Futures or if Dask\n distributed is not installed then it is a no-op\n \"\"\"\n try:\n from distributed import wait\n\n return wait(x, timeout=timeout, return_when=return_when)\n except (ImportError, ValueError):\n return x\n\n\ndef get_collection_names(collection) -> set[str]:\n \"\"\"Infer the collection names from the dask keys, under the assumption that all keys\n are either tuples with matching first element, and that element is a string, or\n there is exactly one key and it is a string.\n\n Examples\n --------\n >>> a.__dask_keys__() # doctest: +SKIP\n [\"foo\", \"bar\"]\n >>> get_collection_names(a) # doctest: +SKIP\n {\"foo\", \"bar\"}\n >>> b.__dask_keys__() # doctest: +SKIP\n [[(\"foo-123\", 0, 0), (\"foo-123\", 0, 1)], [(\"foo-123\", 1, 0), (\"foo-123\", 1, 1)]]\n >>> get_collection_names(b) # doctest: +SKIP\n {\"foo-123\"}\n \"\"\"\n if not is_dask_collection(collection):\n raise TypeError(f\"Expected Dask collection; got {type(collection)}\")\n return {get_name_from_key(k) for k in flatten(collection.__dask_keys__())}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_get_name_from_key_get_name_from_key.raise_TypeError_f_Expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_get_name_from_key_get_name_from_key.raise_TypeError_f_Expecte", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1216, "end_line": 1236, "span_ids": ["get_name_from_key"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_name_from_key(key) -> str:\n \"\"\"Given a dask collection's key, extract the collection name.\n\n Parameters\n ----------\n key: string or tuple\n Dask collection's key, which must be either a single string or a tuple whose\n first element is a string (commonly referred to as a collection's 'name'),\n\n Examples\n --------\n >>> get_name_from_key(\"foo\")\n 'foo'\n >>> get_name_from_key((\"foo-123\", 1, 2))\n 'foo-123'\n \"\"\"\n if isinstance(key, tuple) and key and isinstance(key[0], str):\n return key[0]\n if isinstance(key, str):\n return key\n raise TypeError(f\"Expected str or tuple[str, Hashable, ...]; got {key}\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_replace_name_in_key_replace_name_in_key.raise_TypeError_f_Expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_replace_name_in_key_replace_name_in_key.raise_TypeError_f_Expecte", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1239, "end_line": 1264, "span_ids": ["replace_name_in_key"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def replace_name_in_key(key, rename: Mapping[str, str]):\n \"\"\"Given a dask collection's key, replace the collection name with a new one.\n\n Parameters\n ----------\n key: string or tuple\n Dask collection's key, which must be either a single string or a tuple whose\n first element is a string (commonly referred to as a collection's 'name'),\n rename:\n Mapping of zero or more names from : to. Extraneous names will be ignored.\n Names not found in this mapping won't be replaced.\n\n Examples\n --------\n >>> replace_name_in_key(\"foo\", {})\n 'foo'\n >>> replace_name_in_key(\"foo\", {\"foo\": \"bar\"})\n 'bar'\n >>> replace_name_in_key((\"foo-123\", 1, 2), {\"foo-123\": \"bar-456\"})\n ('bar-456', 1, 2)\n \"\"\"\n if isinstance(key, tuple) and key and isinstance(key[0], str):\n return (rename.get(key[0], key[0]),) + key[1:]\n if isinstance(key, str):\n return rename.get(key, key)\n raise TypeError(f\"Expected str or tuple[str, Hashable, ...]; got {key}\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_clone_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_clone_key_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1426, "end_line": 1445, "span_ids": ["clone_key"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def clone_key(key, seed):\n \"\"\"Clone a key from a Dask collection, producing a new key with the same prefix and\n indices and a token which is a deterministic function of the previous key and seed.\n\n Examples\n --------\n >>> clone_key(\"x\", 123)\n 'x-dc2b8d1c184c72c19faa81c797f8c6b0'\n >>> clone_key(\"inc-cbb1eca3bafafbb3e8b2419c4eebb387\", 123)\n 'inc-f81b5a88038a2132882aa29a9fcfec06'\n >>> clone_key((\"sum-cbb1eca3bafafbb3e8b2419c4eebb387\", 4, 3), 123)\n ('sum-fd6be9e9fe07fc232ad576fa997255e8', 4, 3)\n \"\"\"\n if isinstance(key, tuple) and key and isinstance(key[0], str):\n return (clone_key(key[0], seed),) + key[1:]\n if isinstance(key, str):\n prefix = key_split(key)\n return prefix + \"-\" + tokenize(key, seed)\n raise TypeError(f\"Expected str or tuple[str, Hashable, ...]; got {key}\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__dask_distributed_unpack___Blockwise.__dask_distributed_unpack__.return._dsk_layer_dsk_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__dask_distributed_unpack___Blockwise.__dask_distributed_unpack__.return._dsk_layer_dsk_deps_", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 579, "end_line": 626, "span_ids": ["Blockwise.__dask_distributed_unpack__"], "tokens": 407}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n @classmethod\n def __dask_distributed_unpack__(cls, state, dsk, dependencies):\n from distributed.protocol.serialize import import_allowed_module\n\n # Make sure we convert list items back from tuples in `indices`.\n # The msgpack serialization will have converted lists into\n # tuples, and tuples may be stringified during graph\n # materialization (bad if the item was not a key).\n indices = [\n list(ind) if is_list else ind\n for ind, is_list in zip(state[\"indices\"], state[\"is_list\"])\n ]\n\n # Unpack io_deps state\n io_deps = {}\n for replace_name, packed_dep in state[\"io_deps\"].items():\n mod = import_allowed_module(packed_dep[\"__module__\"])\n dep_cls = getattr(mod, packed_dep[\"__name__\"])\n io_deps[replace_name] = dep_cls.__dask_distributed_unpack__(\n packed_dep[\"state\"]\n )\n\n layer_dsk, layer_deps = make_blockwise_graph(\n state[\"func\"],\n state[\"output\"],\n state[\"output_indices\"],\n *indices,\n new_axes=state[\"new_axes\"],\n numblocks=state[\"numblocks\"],\n concatenate=state[\"concatenate\"],\n output_blocks=state[\"output_blocks\"],\n dims=state[\"dims\"],\n return_key_deps=True,\n deserializing=True,\n func_future_args=state[\"func_future_args\"],\n io_deps=io_deps,\n )\n g_deps = state[\"global_dependencies\"]\n\n # Stringify layer graph and dependencies\n layer_dsk = {\n stringify(k): stringify_collection_keys(v) for k, v in layer_dsk.items()\n }\n deps = {\n stringify(k): {stringify(d) for d in v} | g_deps\n for k, v in layer_deps.items()\n }\n return {\"dsk\": layer_dsk, \"deps\": deps}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._cull_Blockwise.cull.if_prod_out_size_iter_.else_.return.self_culled_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise._cull_Blockwise.cull.if_prod_out_size_iter_.else_.return.self_culled_deps", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 713, "end_line": 744, "span_ids": ["Blockwise.cull", "Blockwise._cull"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n def _cull(self, output_blocks):\n return Blockwise(\n self.output,\n self.output_indices,\n self.dsk,\n self.indices,\n self.numblocks,\n concatenate=self.concatenate,\n new_axes=self.new_axes,\n output_blocks=output_blocks,\n annotations=self.annotations,\n io_deps=self.io_deps,\n )\n\n def cull(\n self, keys: set, all_hlg_keys: Iterable\n ) -> tuple[Layer, Mapping[Hashable, set]]:\n # Culling is simple for Blockwise layers. We can just\n # collect a set of required output blocks (tuples), and\n # only construct graph for these blocks in `make_blockwise_graph`\n\n output_blocks = set()\n for key in keys:\n if key[0] == self.output:\n output_blocks.add(key[1:])\n culled_deps = self._cull_dependencies(all_hlg_keys, output_blocks)\n out_size_iter = (self.dims[i] for i in self.output_indices)\n if prod(out_size_iter) != len(culled_deps):\n culled_layer = self._cull(output_blocks)\n return culled_layer, culled_deps\n else:\n return self, culled_deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar_Scalar.key.return._self__name_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar_Scalar.key.return._self__name_0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 125, "end_line": 227, "span_ids": ["Scalar:3", "Scalar", "Scalar.__dask_graph__", "Scalar.__dir__", "Scalar.dtype", "Scalar.__repr__", "Scalar.__bool__", "Scalar.__dask_tokenize__", "Scalar.__array__", "Scalar.__dask_keys__", "Scalar.divisions", "Scalar._args", "Scalar.__init__", "Scalar.__dask_layers__", "Scalar.__dask_postpersist__", "Scalar._meta_nonempty", "Scalar._rebuild", "Scalar.__getstate__", "Scalar.__dask_postcompute__", "Scalar.key", "Scalar.__setstate__"], "tokens": 783}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Scalar(DaskMethodsMixin, OperatorMethodMixin):\n \"\"\"A Dask object to represent a pandas scalar\"\"\"\n\n def __init__(self, dsk, name, meta, divisions=None):\n # divisions is ignored, only present to be compatible with other\n # objects.\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n self.dask = dsk\n self._name = name\n self._parent_meta = pd.Series(dtype=\"float64\")\n\n meta = make_meta(meta, parent_meta=self._parent_meta)\n if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):\n raise TypeError(\n f\"Expected meta to specify scalar, got {typename(type(meta))}\"\n )\n self._meta = meta\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [self.key]\n\n def __dask_tokenize__(self):\n return self._name\n\n def __dask_layers__(self):\n return (self._name,)\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"dataframe_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return first, ()\n\n def __dask_postpersist__(self):\n return self._rebuild, ()\n\n def _rebuild(self, dsk, *, rename=None):\n name = self._name\n if rename:\n name = rename.get(name, name)\n return Scalar(dsk, name, self._meta, self.divisions)\n\n @property\n def _meta_nonempty(self):\n return self._meta\n\n @property\n def dtype(self):\n return self._meta.dtype\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n if not hasattr(self._meta, \"dtype\"):\n o.remove(\"dtype\") # dtype only in `dir` if available\n return list(o)\n\n @property\n def divisions(self):\n \"\"\"Dummy divisions to be compat with Series and DataFrame\"\"\"\n return (None, None)\n\n def __repr__(self):\n name = self._name if len(self._name) < 10 else self._name[:7] + \"...\"\n if hasattr(self._meta, \"dtype\"):\n extra = \", dtype=%s\" % self._meta.dtype\n else:\n extra = \", type=%s\" % type(self._meta).__name__\n return f\"dd.Scalar<{name}{extra}>\"\n\n def __array__(self):\n # array interface is required to support pandas instance + Scalar\n # Otherwise, above op results in pd.Series of Scalar (object dtype)\n return np.asarray(self.compute())\n\n @property\n def _args(self):\n return (self.dask, self._name, self._meta)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self._name, self._meta = state\n\n def __bool__(self):\n raise TypeError(\n f\"Trying to convert {self} to a boolean value. Because Dask objects are \"\n \"lazily evaluated, they cannot be converted to a boolean value or used \"\n \"in boolean conditions like if statements. Try calling .compute() to \"\n \"force computation prior to converting to a boolean value or using in \"\n \"a conditional statement.\"\n )\n\n @property\n def key(self):\n return (self._name, 0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar._get_unary_operator_Scalar._get_binary_operator.return.lambda_self_other__scal": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar._get_unary_operator_Scalar._get_binary_operator.return.lambda_self_other__scal", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 213, "end_line": 226, "span_ids": ["Scalar._get_binary_operator", "Scalar._get_unary_operator"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Scalar(DaskMethodsMixin, OperatorMethodMixin):\n\n @classmethod\n def _get_unary_operator(cls, op):\n def f(self):\n name = funcname(op) + \"-\" + tokenize(self)\n dsk = {(name, 0): (op, (self._name, 0))}\n meta = op(self._meta_nonempty)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])\n return Scalar(graph, name, meta)\n\n return f\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n return lambda self, other: _scalar_binary(op, self, other, inv=inv)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._process_metadata_ArrowLegacyEngine._process_metadata.return.parts_stats_common_kwar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._process_metadata_ArrowLegacyEngine._process_metadata.return.parts_stats_common_kwar", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2284, "end_line": 2348, "span_ids": ["ArrowLegacyEngine._process_metadata"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _process_metadata(\n cls,\n metadata,\n schema,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n categories,\n partition_info,\n data_path,\n fs,\n chunksize,\n aggregation_depth,\n kwargs,\n ):\n \"\"\"Process row-groups and statistics.\n\n This method is only used by `ArrowLegacyEngine`.\n \"\"\"\n\n # Organize row-groups by file\n (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n ) = cls._organize_row_groups(\n metadata,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n chunksize,\n aggregation_depth,\n )\n\n # Convert organized row-groups to parts\n parts, stats = _row_groups_to_parts(\n gather_statistics,\n split_row_groups,\n aggregation_depth,\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n cls._make_part,\n make_part_kwargs={\n \"fs\": fs,\n \"partition_keys\": partition_info.get(\"partition_keys\", None),\n \"partition_obj\": partition_info.get(\"partitions\", None),\n \"data_path\": data_path,\n },\n )\n\n # Add common kwargs\n common_kwargs = {\n \"partitions\": partition_info[\"partitions\"],\n \"categories\": categories,\n \"filters\": filters,\n **kwargs,\n }\n\n return parts, stats, common_kwargs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._organize_row_groups.for_rg_row_group_in_enum_FastParquetEngine._organize_row_groups.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._organize_row_groups.for_rg_row_group_in_enum_FastParquetEngine._organize_row_groups.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 158, "end_line": 293, "span_ids": ["FastParquetEngine._organize_row_groups"], "tokens": 1146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n @classmethod\n def _organize_row_groups(\n cls,\n pf,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n dtypes,\n base_path,\n has_metadata_file,\n chunksize,\n aggregation_depth,\n ):\n # ... other code\n for rg, row_group in enumerate(pf.row_groups):\n\n # We can filter partition columns here without dealing\n # with statistics\n if (\n pqpartitions\n and filters\n and fastparquet.api.filter_out_cats(row_group, filters)\n ):\n continue\n\n # NOTE: Here we assume that all column chunks are stored\n # in the same file. This is not strictly required by the\n # parquet spec.\n fp = row_group.columns[0].file_path\n fpath = fp.decode() if isinstance(fp, bytes) else fp\n if fpath is None:\n if not has_metadata_file:\n # There doesn't need to be a file_path if the\n # row group is in the same file as the metadata.\n # Assume this is a single-file dataset.\n fpath = pf.fn\n base_path = base_path or \"\"\n else:\n raise ValueError(\n \"Global metadata structure is missing a file_path string. \"\n \"If the dataset includes a _metadata file, that file may \"\n \"have one or more missing file_path fields.\"\n )\n\n # Append a tuple to file_row_groups. This tuple will\n # be structured as: `(, )`\n if file_row_groups[fpath]:\n file_row_groups[fpath].append((file_row_groups[fpath][-1][0] + 1, rg))\n else:\n file_row_groups[fpath].append((0, rg))\n\n if gather_statistics:\n if single_rg_parts:\n s = {\n \"file_path_0\": fpath,\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n \"columns\": [],\n }\n else:\n s = {\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n }\n cstats = []\n for name, i in stat_col_indices.items():\n column = row_group.columns[i]\n if column.meta_data.statistics:\n cmin = None\n cmax = None\n # TODO: Avoid use of `pf.statistics`\n if pf.statistics[\"min\"][name][0] is not None:\n cmin = pf.statistics[\"min\"][name][rg]\n cmax = pf.statistics[\"max\"][name][rg]\n elif dtypes[name] == \"object\":\n cmin = column.meta_data.statistics.min_value\n cmax = column.meta_data.statistics.max_value\n # Older versions may not have cmin/cmax_value\n if cmin is None:\n cmin = column.meta_data.statistics.min\n if cmax is None:\n cmax = column.meta_data.statistics.max\n # Decode bytes as long as \"bytes\" is not the\n # expected `pandas_type` for this column\n if (\n isinstance(cmin, (bytes, bytearray))\n and pandas_type.get(name, None) != \"bytes\"\n ):\n cmin = cmin.decode(\"utf-8\")\n cmax = cmax.decode(\"utf-8\")\n if isinstance(cmin, np.datetime64):\n tz = getattr(dtypes[name], \"tz\", None)\n cmin = pd.Timestamp(cmin, tz=tz)\n cmax = pd.Timestamp(cmax, tz=tz)\n last = cmax_last.get(name, None)\n\n if not (filters or chunksize or aggregation_depth):\n # Only think about bailing if we don't need\n # stats for filtering\n if cmin is None or (last and cmin < last):\n # We are collecting statistics for divisions\n # only (no filters) - Column isn't sorted, or\n # we have an all-null partition, so lets bail.\n #\n # Note: This assumes ascending order.\n #\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": cmin,\n \"max\": cmax,\n }\n )\n else:\n cstats += [cmin, cmax]\n cmax_last[name] = cmax\n else:\n if (\n not (filters or chunksize or aggregation_depth)\n and column.meta_data.num_values > 0\n ):\n # We are collecting statistics for divisions\n # only (no filters) - Lets bail.\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n s[\"columns\"].append({\"name\": name})\n else:\n cstats += [None, None, None]\n if gather_statistics:\n file_row_group_stats[fpath].append(s)\n if not single_rg_parts:\n file_row_group_column_stats[fpath].append(tuple(cstats))\n\n return (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n base_path,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._make_part_FastParquetEngine._make_part.return.part": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._make_part_FastParquetEngine._make_part.return.part", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 537, "end_line": 562, "span_ids": ["FastParquetEngine._make_part"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def _make_part(\n cls,\n filename,\n rg_list,\n fs=None,\n pf=None,\n base_path=None,\n partitions=None,\n ):\n \"\"\"Generate a partition-specific element of `parts`.\"\"\"\n\n if partitions:\n real_row_groups = cls._get_thrift_row_groups(\n pf,\n filename,\n rg_list,\n )\n part = {\"piece\": (real_row_groups,)}\n else:\n # Get full path (empty strings should be ignored)\n full_path = fs.sep.join([p for p in [base_path, filename] if p != \"\"])\n row_groups = [rg[0] for rg in rg_list] # Don't need global IDs\n part = {\"piece\": (full_path, row_groups)}\n\n return part", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_metadata_FastParquetEngine.read_metadata.return._meta_stats_parts_inde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_metadata_FastParquetEngine.read_metadata.return._meta_stats_parts_inde", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 844, "end_line": 905, "span_ids": ["FastParquetEngine.read_metadata"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n split_row_groups=True,\n chunksize=None,\n aggregate_files=None,\n ignore_metadata_file=False,\n metadata_task_size=None,\n **kwargs,\n ):\n\n # Stage 1: Collect general dataset information\n dataset_info = cls._collect_dataset_info(\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n )\n\n # Stage 2: Generate output `meta`\n meta = cls._create_dd_meta(dataset_info)\n\n # Stage 3: Generate parts and stats\n parts, stats, common_kwargs = cls._construct_collection_plan(dataset_info)\n\n # Cannot allow `None` in columns if the user has specified index=False\n index = dataset_info[\"index\"]\n if index is False and None in meta.columns:\n meta.drop(columns=[None], inplace=True)\n\n # Add `common_kwargs` to the first element of `parts`.\n # We can return as a separate element in the future, but\n # should avoid breaking the API for now.\n if len(parts):\n parts[0][\"common_kwargs\"] = common_kwargs\n parts[0][\"aggregation_depth\"] = dataset_info[\"aggregation_depth\"]\n\n if len(parts) and len(parts[0][\"piece\"]) == 1:\n\n # Strip all partition-dependent or unnecessary\n # data from the `ParquetFile` object\n pf = dataset_info[\"pf\"]\n pf.row_groups = None\n pf.fmd.row_groups = None\n pf._statistics = None\n parts[0][\"common_kwargs\"][\"parquet_file\"] = pf\n\n return (meta, stats, parts, index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths.path_parts_list__flatten_filters.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__analyze_paths.path_parts_list__flatten_filters.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 461, "end_line": 496, "span_ids": ["_flatten_filters", "_analyze_paths"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _analyze_paths(file_list, fs, root=False):\n # ... other code\n\n path_parts_list = [_join_path(fn).split(\"/\") for fn in file_list]\n if root is False:\n basepath = path_parts_list[0][:-1]\n for i, path_parts in enumerate(path_parts_list):\n j = len(path_parts) - 1\n for k, (base_part, path_part) in enumerate(zip(basepath, path_parts)):\n if base_part != path_part:\n j = k\n break\n basepath = basepath[:j]\n l = len(basepath)\n else:\n basepath = _join_path(root).split(\"/\")\n l = len(basepath)\n assert all(\n p[:l] == basepath for p in path_parts_list\n ), \"All paths must begin with the given root\"\n out_list = []\n for path_parts in path_parts_list:\n out_list.append(\n \"/\".join(path_parts[l:])\n ) # use '/'.join() instead of _join_path to be consistent with split('/')\n\n return (\n \"/\".join(basepath),\n out_list,\n ) # use '/'.join() instead of _join_path to be consistent with split('/')\n\n\ndef _flatten_filters(filters):\n \"\"\"Flatten DNF-formatted filters (list of tuples)\"\"\"\n return (\n set(flatten(tuple(flatten(filters, container=list)), container=tuple))\n if filters\n else []\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__aggregate_stats__aggregate_stats.if_len_file_row_group_sta.else_.return.s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__aggregate_stats__aggregate_stats.if_len_file_row_group_sta.else_.return.s", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 501, "end_line": 562, "span_ids": ["_aggregate_stats"], "tokens": 489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _aggregate_stats(\n file_path,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n):\n \"\"\"Utility to aggregate the statistics for N row-groups\n into a single dictionary.\n\n Used by `Engine._construct_parts`\n \"\"\"\n if len(file_row_group_stats) < 1:\n # Empty statistics\n return {}\n elif len(file_row_group_column_stats) == 0:\n assert len(file_row_group_stats) == 1\n return file_row_group_stats[0]\n else:\n # Note: It would be better to avoid df_rgs and df_cols\n # construction altogether. It makes it fast to aggregate\n # the statistics for many row groups, but isn't\n # worthwhile for a small number of row groups.\n if len(file_row_group_stats) > 1:\n df_rgs = pd.DataFrame(file_row_group_stats)\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": df_rgs[\"num-rows\"].sum(),\n \"num-row-groups\": df_rgs[\"num-rows\"].count(),\n \"total_byte_size\": df_rgs[\"total_byte_size\"].sum(),\n \"columns\": [],\n }\n else:\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": file_row_group_stats[0][\"num-rows\"],\n \"num-row-groups\": 1,\n \"total_byte_size\": file_row_group_stats[0][\"total_byte_size\"],\n \"columns\": [],\n }\n\n df_cols = None\n if len(file_row_group_column_stats) > 1:\n df_cols = pd.DataFrame(file_row_group_column_stats)\n for ind, name in enumerate(stat_col_indices):\n i = ind * 2\n if df_cols is None:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": file_row_group_column_stats[0][i],\n \"max\": file_row_group_column_stats[0][i + 1],\n }\n )\n else:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": df_cols.iloc[:, i].min(),\n \"max\": df_cols.iloc[:, i + 1].max(),\n }\n )\n return s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_test_split_row_groups.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_test_split_row_groups.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2542, "end_line": 2580, "span_ids": ["test_split_row_groups"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_split_row_groups(tmpdir, engine):\n \"\"\"Test split_row_groups read_parquet kwarg\"\"\"\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"i32\": np.arange(800, dtype=np.int32), \"f\": np.arange(800, dtype=np.float64)}\n )\n df.index.name = \"index\"\n\n half = len(df) // 2\n dd.from_pandas(df.iloc[:half], npartitions=2).to_parquet(\n tmp, engine=\"pyarrow\", row_group_size=100\n )\n\n ddf3 = dd.read_parquet(tmp, engine=engine, split_row_groups=True, chunksize=1)\n assert ddf3.npartitions == 4\n\n ddf3 = dd.read_parquet(\n tmp, engine=engine, gather_statistics=True, split_row_groups=False\n )\n assert ddf3.npartitions == 2\n\n dd.from_pandas(df.iloc[half:], npartitions=2).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=50\n )\n\n ddf3 = dd.read_parquet(\n tmp,\n engine=engine,\n gather_statistics=True,\n split_row_groups=True,\n chunksize=1,\n )\n assert ddf3.npartitions == 12\n\n ddf3 = dd.read_parquet(\n tmp, engine=engine, gather_statistics=True, split_row_groups=False\n )\n assert ddf3.npartitions == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_int_test_split_row_groups_int.assert_ddf2_npartitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_int_test_split_row_groups_int.assert_ddf2_npartitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2583, "end_line": 2613, "span_ids": ["test_split_row_groups_int"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\"split_row_groups\", [1, 12])\n@pytest.mark.parametrize(\"gather_statistics\", [True, False])\ndef test_split_row_groups_int(tmpdir, split_row_groups, gather_statistics, engine):\n tmp = str(tmpdir)\n row_group_size = 10\n npartitions = 4\n half_size = 400\n df = pd.DataFrame(\n {\n \"i32\": np.arange(2 * half_size, dtype=np.int32),\n \"f\": np.arange(2 * half_size, dtype=np.float64),\n }\n )\n half = len(df) // 2\n\n dd.from_pandas(df.iloc[:half], npartitions=npartitions).to_parquet(\n tmp, engine=\"pyarrow\", row_group_size=row_group_size\n )\n dd.from_pandas(df.iloc[half:], npartitions=npartitions).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=row_group_size\n )\n\n ddf2 = dd.read_parquet(\n tmp,\n engine=engine,\n split_row_groups=split_row_groups,\n gather_statistics=gather_statistics,\n )\n expected_rg_cout = int(half_size / row_group_size)\n assert ddf2.npartitions == 2 * math.ceil(expected_rg_cout / split_row_groups)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_filter_test_split_row_groups_filter.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_filter_test_split_row_groups_filter.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2672, "end_line": 2699, "span_ids": ["test_split_row_groups_filter"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_split_row_groups_filter(tmpdir, engine):\n tmp = str(tmpdir)\n df = pd.DataFrame(\n {\"i32\": np.arange(800, dtype=np.int32), \"f\": np.arange(800, dtype=np.float64)}\n )\n df.index.name = \"index\"\n search_val = 600\n filters = [(\"f\", \"==\", search_val)]\n\n dd.from_pandas(df, npartitions=4).to_parquet(\n tmp, append=True, engine=\"pyarrow\", row_group_size=50\n )\n\n ddf2 = dd.read_parquet(tmp, engine=engine)\n ddf3 = dd.read_parquet(\n tmp,\n engine=engine,\n gather_statistics=True,\n split_row_groups=True,\n filters=filters,\n )\n\n assert (ddf3[\"i32\"] == search_val).any().compute()\n assert_eq(\n ddf2[ddf2[\"i32\"] == search_val].compute(),\n ddf3[ddf3[\"i32\"] == search_val].compute(),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_filter_partitioned_test_pyarrow_dataset_filter_partitioned.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_filter_partitioned_test_pyarrow_dataset_filter_partitioned.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3310, "end_line": 3336, "span_ids": ["test_pyarrow_dataset_filter_partitioned"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\"split_row_groups\", [True, False])\ndef test_pyarrow_dataset_filter_partitioned(tmpdir, split_row_groups):\n fn = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a\": [4, 5, 6],\n \"b\": [\"a\", \"b\", \"b\"],\n \"c\": [\"A\", \"B\", \"B\"],\n }\n )\n df[\"b\"] = df[\"b\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=\"pyarrow\", partition_on=[\"b\", \"c\"])\n\n # Filter on a a non-partition column\n read_df = dd.read_parquet(\n fn,\n engine=\"pyarrow-dataset\",\n split_row_groups=split_row_groups,\n filters=[(\"a\", \"==\", 5)],\n )\n assert_eq(\n read_df.compute()[[\"a\"]],\n df[df[\"a\"] == 5][[\"a\"]],\n check_index=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_axis_test_series_round.assert_eq_s_round_ps_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_axis_test_series_round.assert_eq_s_round_ps_r", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1724, "end_line": 1906, "span_ids": ["test_dataframe_picklable", "test_combine", "test_combine_first", "test_series_round", "test_align_axis", "test_random_partitions"], "tokens": 1840}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"join\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_align_axis(join):\n df1a = pd.DataFrame(\n {\"A\": np.random.randn(10), \"B\": np.random.randn(10), \"C\": np.random.randn(10)},\n index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11],\n )\n\n df1b = pd.DataFrame(\n {\"B\": np.random.randn(10), \"C\": np.random.randn(10), \"D\": np.random.randn(10)},\n index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13],\n )\n ddf1a = dd.from_pandas(df1a, 3)\n ddf1b = dd.from_pandas(df1b, 3)\n\n res1, res2 = ddf1a.align(ddf1b, join=join, axis=0)\n exp1, exp2 = df1a.align(df1b, join=join, axis=0)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n res1, res2 = ddf1a.align(ddf1b, join=join, axis=1)\n exp1, exp2 = df1a.align(df1b, join=join, axis=1)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n res1, res2 = ddf1a.align(ddf1b, join=join, axis=\"index\")\n exp1, exp2 = df1a.align(df1b, join=join, axis=\"index\")\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n res1, res2 = ddf1a.align(ddf1b, join=join, axis=\"columns\")\n exp1, exp2 = df1a.align(df1b, join=join, axis=\"columns\")\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n # invalid\n with pytest.raises(ValueError):\n ddf1a.align(ddf1b, join=join, axis=\"XXX\")\n\n with pytest.raises(ValueError):\n ddf1a[\"A\"].align(ddf1b[\"B\"], join=join, axis=1)\n\n\ndef test_combine():\n df1 = pd.DataFrame(\n {\n \"A\": np.random.choice([1, 2, np.nan], 100),\n \"B\": np.random.choice([\"a\", \"b\", \"nan\"], 100),\n }\n )\n\n df2 = pd.DataFrame(\n {\n \"A\": np.random.choice([1, 2, 3], 100),\n \"B\": np.random.choice([\"a\", \"b\", \"c\"], 100),\n }\n )\n ddf1 = dd.from_pandas(df1, 4)\n ddf2 = dd.from_pandas(df2, 5)\n\n first = lambda a, b: a\n\n # You can add series with strings and nans but you can't add scalars 'a' + np.NaN\n str_add = lambda a, b: a + b if a is not np.nan else a\n\n # DataFrame\n for dda, ddb, a, b, runs in [\n (ddf1, ddf2, df1, df2, [(add, None), (first, None)]),\n (ddf1.A, ddf2.A, df1.A, df2.A, [(add, None), (add, 100), (first, None)]),\n (\n ddf1.B,\n ddf2.B,\n df1.B,\n df2.B,\n [(str_add, None), (str_add, \"d\"), (first, None)],\n ),\n ]:\n for func, fill_value in runs:\n sol = a.combine(b, func, fill_value=fill_value)\n assert_eq(dda.combine(ddb, func, fill_value=fill_value), sol)\n assert_eq(dda.combine(b, func, fill_value=fill_value), sol)\n\n assert_eq(\n ddf1.combine(ddf2, add, overwrite=False), df1.combine(df2, add, overwrite=False)\n )\n assert dda.combine(ddb, add)._name == dda.combine(ddb, add)._name\n\n\ndef test_combine_first():\n df1 = pd.DataFrame(\n {\n \"A\": np.random.choice([1, 2, np.nan], 100),\n \"B\": np.random.choice([\"a\", \"b\", \"nan\"], 100),\n }\n )\n\n df2 = pd.DataFrame(\n {\n \"A\": np.random.choice([1, 2, 3], 100),\n \"B\": np.random.choice([\"a\", \"b\", \"c\"], 100),\n }\n )\n ddf1 = dd.from_pandas(df1, 4)\n ddf2 = dd.from_pandas(df2, 5)\n\n # DataFrame\n assert_eq(ddf1.combine_first(ddf2), df1.combine_first(df2))\n assert_eq(ddf1.combine_first(df2), df1.combine_first(df2))\n\n # Series\n assert_eq(ddf1.A.combine_first(ddf2.A), df1.A.combine_first(df2.A))\n assert_eq(ddf1.A.combine_first(df2.A), df1.A.combine_first(df2.A))\n\n assert_eq(ddf1.B.combine_first(ddf2.B), df1.B.combine_first(df2.B))\n assert_eq(ddf1.B.combine_first(df2.B), df1.B.combine_first(df2.B))\n\n\ndef test_dataframe_picklable():\n from pickle import dumps, loads\n\n from cloudpickle import dumps as cp_dumps\n from cloudpickle import loads as cp_loads\n\n d = _compat.makeTimeDataFrame()\n df = dd.from_pandas(d, npartitions=3)\n df = df + 2\n\n # dataframe\n df2 = loads(dumps(df))\n assert_eq(df, df2)\n df2 = cp_loads(cp_dumps(df))\n assert_eq(df, df2)\n\n # series\n a2 = loads(dumps(df.A))\n assert_eq(df.A, a2)\n a2 = cp_loads(cp_dumps(df.A))\n assert_eq(df.A, a2)\n\n # index\n i2 = loads(dumps(df.index))\n assert_eq(df.index, i2)\n i2 = cp_loads(cp_dumps(df.index))\n assert_eq(df.index, i2)\n\n # scalar\n # lambdas are present, so only test cloudpickle\n s = df.A.sum()\n s2 = cp_loads(cp_dumps(s))\n assert_eq(s, s2)\n\n\ndef test_random_partitions():\n a, b = d.random_split([0.5, 0.5], 42)\n assert isinstance(a, dd.DataFrame)\n assert isinstance(b, dd.DataFrame)\n assert a._name != b._name\n np.testing.assert_array_equal(a.index, sorted(a.index))\n\n assert len(a.compute()) + len(b.compute()) == len(full)\n a2, b2 = d.random_split([0.5, 0.5], 42)\n assert a2._name == a._name\n assert b2._name == b._name\n\n a, b = d.random_split([0.5, 0.5], 42, True)\n a2, b2 = d.random_split([0.5, 0.5], 42, True)\n assert_eq(a, a2)\n assert_eq(b, b2)\n with pytest.raises(AssertionError):\n np.testing.assert_array_equal(a.index, sorted(a.index))\n\n parts = d.random_split([0.4, 0.5, 0.1], 42)\n names = {p._name for p in parts}\n names.update([a._name, b._name])\n assert len(names) == 5\n\n with pytest.raises(ValueError):\n d.random_split([0.4, 0.5], 42)\n\n\ndef test_series_round():\n ps = pd.Series([1.123, 2.123, 3.123, 1.234, 2.234, 3.234], name=\"a\")\n s = dd.from_pandas(ps, npartitions=3)\n assert_eq(s.round(), ps.round())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_test_repartition_npartitions.assert_all_map_len_parts": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_test_repartition_npartitions.assert_all_map_len_parts", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1718, "end_line": 1881, "span_ids": ["test_repartition_on_pandas_dataframe", "test_repartition", "test_repartition_divisions", "test_repartition_npartitions"], "tokens": 1893}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.slow\ndef test_repartition():\n def _check_split_data(orig, d):\n \"\"\"Check data is split properly\"\"\"\n keys = [k for k in d.dask if k[0].startswith(\"repartition-split\")]\n keys = sorted(keys)\n sp = pd.concat(\n [compute_as_if_collection(dd.DataFrame, d.dask, k) for k in keys]\n )\n assert_eq(orig, sp)\n assert_eq(orig, d)\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n a = dd.from_pandas(df, 2)\n\n b = a.repartition(divisions=[10, 20, 50, 60])\n assert b.divisions == (10, 20, 50, 60)\n assert_eq(a, b)\n assert_eq(compute_as_if_collection(dd.DataFrame, b.dask, (b._name, 0)), df.iloc[:1])\n\n for div in [\n [20, 60],\n [10, 50],\n [1], # first / last element mismatch\n [0, 60],\n [10, 70], # do not allow to expand divisions by default\n [10, 50, 20, 60], # not sorted\n [10, 10, 20, 60],\n ]: # not unique (last element can be duplicated)\n\n pytest.raises(ValueError, lambda: a.repartition(divisions=div))\n\n pdf = pd.DataFrame(np.random.randn(7, 5), columns=list(\"abxyz\"))\n for p in range(1, 7):\n ddf = dd.from_pandas(pdf, p)\n assert_eq(ddf, pdf)\n for div in [\n [0, 6],\n [0, 6, 6],\n [0, 5, 6],\n [0, 4, 6, 6],\n [0, 2, 6],\n [0, 2, 6, 6],\n [0, 2, 3, 6, 6],\n [0, 1, 2, 3, 4, 5, 6, 6],\n ]:\n rddf = ddf.repartition(divisions=div)\n _check_split_data(ddf, rddf)\n assert rddf.divisions == tuple(div)\n assert_eq(pdf, rddf)\n\n rds = ddf.x.repartition(divisions=div)\n _check_split_data(ddf.x, rds)\n assert rds.divisions == tuple(div)\n assert_eq(pdf.x, rds)\n\n # expand divisions\n for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:\n rddf = ddf.repartition(divisions=div, force=True)\n _check_split_data(ddf, rddf)\n assert rddf.divisions == tuple(div)\n assert_eq(pdf, rddf)\n\n rds = ddf.x.repartition(divisions=div, force=True)\n _check_split_data(ddf.x, rds)\n assert rds.divisions == tuple(div)\n assert_eq(pdf.x, rds)\n\n pdf = pd.DataFrame(\n {\"x\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], \"y\": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},\n index=list(\"abcdefghij\"),\n )\n for p in range(1, 7):\n ddf = dd.from_pandas(pdf, p)\n assert_eq(ddf, pdf)\n for div in [\n list(\"aj\"),\n list(\"ajj\"),\n list(\"adj\"),\n list(\"abfj\"),\n list(\"ahjj\"),\n list(\"acdj\"),\n list(\"adfij\"),\n list(\"abdefgij\"),\n list(\"abcdefghij\"),\n ]:\n rddf = ddf.repartition(divisions=div)\n _check_split_data(ddf, rddf)\n assert rddf.divisions == tuple(div)\n assert_eq(pdf, rddf)\n\n rds = ddf.x.repartition(divisions=div)\n _check_split_data(ddf.x, rds)\n assert rds.divisions == tuple(div)\n assert_eq(pdf.x, rds)\n\n # expand divisions\n for div in [list(\"Yadijm\"), list(\"acmrxz\"), list(\"Yajz\")]:\n rddf = ddf.repartition(divisions=div, force=True)\n _check_split_data(ddf, rddf)\n assert rddf.divisions == tuple(div)\n assert_eq(pdf, rddf)\n\n rds = ddf.x.repartition(divisions=div, force=True)\n _check_split_data(ddf.x, rds)\n assert rds.divisions == tuple(div)\n assert_eq(pdf.x, rds)\n\n\ndef test_repartition_divisions():\n result = repartition_divisions([0, 6], [0, 6, 6], \"a\", \"b\", \"c\")\n assert result == {\n (\"b\", 0): (methods.boundary_slice, (\"a\", 0), 0, 6, False),\n (\"b\", 1): (methods.boundary_slice, (\"a\", 0), 6, 6, True),\n (\"c\", 0): (\"b\", 0),\n (\"c\", 1): (\"b\", 1),\n }\n\n result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], \"a\", \"b\", \"c\")\n assert result == {\n (\"b\", 0): (methods.boundary_slice, (\"a\", 0), 1, 3, False),\n (\"b\", 1): (methods.boundary_slice, (\"a\", 1), 3, 4, False),\n (\"b\", 2): (methods.boundary_slice, (\"a\", 1), 4, 6, False),\n (\"b\", 3): (methods.boundary_slice, (\"a\", 1), 6, 7, True),\n (\"c\", 0): (methods.concat, [(\"b\", 0), (\"b\", 1)]),\n (\"c\", 1): (\"b\", 2),\n (\"c\", 2): (\"b\", 3),\n }\n\n\ndef test_repartition_on_pandas_dataframe():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n ddf = dd.repartition(df, divisions=[10, 20, 50, 60])\n assert isinstance(ddf, dd.DataFrame)\n assert ddf.divisions == (10, 20, 50, 60)\n assert_eq(ddf, df)\n\n ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])\n assert isinstance(ddf, dd.Series)\n assert ddf.divisions == (10, 20, 50, 60)\n assert_eq(ddf, df.y)\n\n\n@pytest.mark.parametrize(\"use_index\", [True, False])\n@pytest.mark.parametrize(\"n\", [1, 2, 4, 5])\n@pytest.mark.parametrize(\"k\", [1, 2, 4, 5])\n@pytest.mark.parametrize(\"dtype\", [float, \"M8[ns]\"])\n@pytest.mark.parametrize(\"transform\", [lambda df: df, lambda df: df.x])\ndef test_repartition_npartitions(use_index, n, k, dtype, transform):\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6] * 10, \"y\": list(\"abdabd\") * 10},\n index=pd.Series([1, 2, 3, 4, 5, 6] * 10, dtype=dtype),\n )\n df = transform(df)\n a = dd.from_pandas(df, npartitions=n, sort=use_index)\n b = a.repartition(k)\n assert_eq(a, b)\n assert b.npartitions == k\n parts = dask.get(b.dask, b.__dask_keys__())\n assert all(map(len, parts))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_errors_test_map_overlap_errors.with_pytest_raises_TypeEr.ddf_map_overlap_shifted_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_map_overlap_errors_test_map_overlap_errors.with_pytest_raises_TypeEr.ddf_map_overlap_shifted_s", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 82, "end_line": 97, "span_ids": ["test_map_overlap_errors"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_errors():\n # Non-integer\n with pytest.raises(ValueError):\n ddf.map_overlap(shifted_sum, 0.5, 3, 0, 2, c=2)\n\n # Negative\n with pytest.raises(ValueError):\n ddf.map_overlap(shifted_sum, 0, -5, 0, 2, c=2)\n\n # Partition size < window size\n with pytest.raises(NotImplementedError):\n ddf.map_overlap(shifted_sum, 0, 100, 0, 100, c=2).compute()\n\n # Offset with non-datetime\n with pytest.raises(TypeError):\n ddf.map_overlap(shifted_sum, pd.Timedelta(\"1s\"), pd.Timedelta(\"1s\"), 0, 2, c=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_apply_and_enforce_message_test_apply_and_enforce_message.None_1.apply_and_enforce__func_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_apply_and_enforce_message_test_apply_and_enforce_message.None_1.apply_and_enforce__func_f", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 455, "end_line": 464, "span_ids": ["test_apply_and_enforce_message"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_and_enforce_message():\n def func():\n return pd.DataFrame(columns=[\"A\", \"B\", \"C\"], index=[0])\n\n meta = pd.DataFrame(columns=[\"A\", \"D\"], index=[0])\n with pytest.raises(ValueError, match=\"Extra: *['B', 'C']\"):\n apply_and_enforce(_func=func, _meta=meta)\n\n with pytest.raises(ValueError, match=re.escape(\"Missing: ['D']\")):\n apply_and_enforce(_func=func, _meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed.__setattr___Delayed.__setattr__.try_.except_AttributeError_.raise_TypeError_Delayed_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed.__setattr___Delayed.__setattr__.try_.except_AttributeError_.raise_TypeError_Delayed_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 544, "end_line": 553, "span_ids": ["Delayed.__setattr__"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Delayed(DaskMethodsMixin, OperatorMethodMixin):\n\n def __setattr__(self, attr, val):\n try:\n object.__setattr__(self, attr, val)\n except AttributeError:\n # attr is neither in type(self).__slots__ nor in the __slots__ of any of its\n # parent classes, and all the parent classes define __slots__ too.\n # This last bit needs to be unit tested: if any of the parent classes omit\n # the __slots__ declaration, self will gain a __dict__ and this branch will\n # become unreachable.\n raise TypeError(\"Delayed objects are immutable\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed.__setitem___Delayed._get_unary_operator._get_binary_operator": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed.__setitem___Delayed._get_unary_operator._get_binary_operator", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 576, "end_line": 611, "span_ids": ["Delayed.__bool__", "Delayed.__len__", "Delayed:9", "Delayed:11", "Delayed._get_binary_operator", "Delayed.__iter__", "Delayed.__get__", "Delayed.__call__", "Delayed.__setitem__"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Delayed(DaskMethodsMixin, OperatorMethodMixin):\n\n def __setitem__(self, index, val):\n raise TypeError(\"Delayed objects are immutable\")\n\n def __iter__(self):\n if self._length is None:\n raise TypeError(\"Delayed objects of unspecified length are not iterable\")\n for i in range(self._length):\n yield self[i]\n\n def __len__(self):\n if self._length is None:\n raise TypeError(\"Delayed objects of unspecified length have no len()\")\n return self._length\n\n def __call__(self, *args, pure=None, dask_key_name=None, **kwargs):\n func = delayed(apply, pure=pure)\n if dask_key_name is not None:\n return func(self, args, kwargs, dask_key_name=dask_key_name)\n return func(self, args, kwargs)\n\n def __bool__(self):\n raise TypeError(\"Truth of Delayed objects is not supported\")\n\n __nonzero__ = __bool__\n\n def __get__(self, instance, cls):\n if instance is None:\n return self\n return types.MethodType(self, instance)\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n method = delayed(right(op) if inv else op, pure=True)\n return lambda *args, **kwargs: method(*args, **kwargs)\n\n _get_unary_operator = _get_binary_operator", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__can_apply_blockwise__can_apply_blockwise.None_2.except_ImportError_.return.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__can_apply_blockwise__can_apply_blockwise.None_2.except_ImportError_.return.False", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 117, "end_line": 143, "span_ids": ["_can_apply_blockwise"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _can_apply_blockwise(collection) -> bool:\n \"\"\"Return True if _map_blocks can be sped up via blockwise operations; False\n otherwise.\n\n FIXME this returns False for collections that wrap around around da.Array, such as\n pint.Quantity, xarray DataArray, Dataset, and Variable.\n \"\"\"\n try:\n from .bag import Bag\n\n if isinstance(collection, Bag):\n return True\n except ImportError:\n pass\n try:\n from .array import Array\n\n if isinstance(collection, Array):\n return True\n except ImportError:\n pass\n try:\n from .dataframe import DataFrame, Series\n\n return isinstance(collection, (DataFrame, Series))\n except ImportError:\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__build_map_layer__build_map_layer.if__can_apply_blockwise_c.else_.return.MaterializedLayer_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__build_map_layer__build_map_layer.if__can_apply_blockwise_c.else_.return.MaterializedLayer_", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 146, "end_line": 201, "span_ids": ["_build_map_layer"], "tokens": 414}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_map_layer(\n func: Callable,\n prev_name: str,\n new_name: str,\n collection,\n dependencies: tuple[Delayed, ...] = (),\n) -> Layer:\n \"\"\"Apply func to all keys of collection. Create a Blockwise layer whenever possible;\n fall back to MaterializedLayer otherwise.\n\n Parameters\n ----------\n func\n Callable to be invoked on the graph node\n prev_name : str\n name of the layer to map from; in case of dask base collections, this is the\n collection name. Note how third-party collections, e.g. xarray.Dataset, can\n have multiple names.\n new_name : str\n name of the layer to map to\n collection\n Arbitrary dask collection\n dependencies\n Zero or more Delayed objects, which will be passed as arbitrary variadic args to\n func after the collection's chunk\n \"\"\"\n if _can_apply_blockwise(collection):\n # Use a Blockwise layer\n try:\n numblocks = collection.numblocks\n except AttributeError:\n numblocks = (collection.npartitions,)\n indices = tuple(i for i, _ in enumerate(numblocks))\n kwargs = {\"_deps\": [d.key for d in dependencies]} if dependencies else {}\n\n return blockwise(\n func,\n new_name,\n indices,\n prev_name,\n indices,\n numblocks={prev_name: numblocks},\n dependencies=dependencies,\n **kwargs,\n )\n else:\n # Delayed, bag.Item, dataframe.core.Scalar, or third-party collection;\n # fall back to MaterializedLayer\n dep_keys = tuple(d.key for d in dependencies)\n return MaterializedLayer(\n {\n replace_name_in_key(k, {prev_name: new_name}): (func, k) + dep_keys\n for k in flatten(collection.__dask_keys__())\n if get_name_from_key(k) == prev_name\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__bind_one__bind_one.return.rebuild_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__bind_one__bind_one.return.rebuild_", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 307, "end_line": 400, "span_ids": ["_bind_one"], "tokens": 828}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _bind_one(\n child: T,\n blocker: Delayed | None,\n omit_layers: set[str],\n omit_keys: set[Hashable],\n seed: Hashable,\n) -> T:\n prev_coll_names = get_collection_names(child)\n if not prev_coll_names:\n # Collection with no keys; this is a legitimate use case but, at the moment of\n # writing, can only happen with third-party collections\n return child\n\n dsk = child.__dask_graph__() # type: ignore\n new_layers: dict[str, Layer] = {}\n new_deps: dict[str, set[str]] = {}\n\n if isinstance(dsk, HighLevelGraph):\n try:\n layers_to_clone = set(child.__dask_layers__()) # type: ignore\n except AttributeError:\n layers_to_clone = prev_coll_names.copy()\n else:\n if len(prev_coll_names) == 1:\n hlg_name = next(iter(prev_coll_names))\n else:\n hlg_name = tokenize(*prev_coll_names)\n dsk = HighLevelGraph.from_collections(hlg_name, dsk)\n layers_to_clone = {hlg_name}\n\n clone_keys = dsk.get_all_external_keys() - omit_keys\n for layer_name in omit_layers:\n try:\n layer = dsk.layers[layer_name]\n except KeyError:\n continue\n clone_keys -= layer.get_output_keys()\n # Note: when assume_layers=True, clone_keys can contain keys of the omit collections\n # that are not top-level. This is OK, as they will never be encountered inside the\n # values of their dependent layers.\n\n if blocker is not None:\n blocker_key = blocker.key\n blocker_dsk = blocker.__dask_graph__()\n assert isinstance(blocker_dsk, HighLevelGraph)\n new_layers.update(blocker_dsk.layers)\n new_deps.update(blocker_dsk.dependencies)\n else:\n blocker_key = None\n\n layers_to_copy_verbatim = set()\n\n while layers_to_clone:\n prev_layer_name = layers_to_clone.pop()\n new_layer_name = clone_key(prev_layer_name, seed=seed)\n if new_layer_name in new_layers:\n continue\n\n layer = dsk.layers[prev_layer_name]\n layer_deps = dsk.dependencies[prev_layer_name]\n layer_deps_to_clone = layer_deps - omit_layers\n layer_deps_to_omit = layer_deps & omit_layers\n layers_to_clone |= layer_deps_to_clone\n layers_to_copy_verbatim |= layer_deps_to_omit\n\n new_layers[new_layer_name], is_bound = layer.clone(\n keys=clone_keys, seed=seed, bind_to=blocker_key\n )\n new_dep = {\n clone_key(dep, seed=seed) for dep in layer_deps_to_clone\n } | layer_deps_to_omit\n if is_bound:\n new_dep.add(blocker_key)\n new_deps[new_layer_name] = new_dep\n\n # Add the layers of the collections from omit from child.dsk. Note that, when\n # assume_layers=False, it would be unsafe to simply do HighLevelGraph.merge(dsk,\n # omit[i].dsk). Also, collections in omit may or may not be parents of this specific\n # child, or of any children at all.\n while layers_to_copy_verbatim:\n layer_name = layers_to_copy_verbatim.pop()\n if layer_name in new_layers:\n continue\n layer_deps = dsk.dependencies[layer_name]\n layers_to_copy_verbatim |= layer_deps\n new_deps[layer_name] = layer_deps\n new_layers[layer_name] = dsk.layers[layer_name]\n\n rebuild, args = child.__dask_postpersist__() # type: ignore\n return rebuild(\n HighLevelGraph(new_layers, new_deps),\n *args,\n rename={prev_name: clone_key(prev_name, seed) for prev_name in prev_coll_names},\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_clone_clone.return.out_0_if_len_collections": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_clone_clone.return.out_0_if_len_collections", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 414, "end_line": 465, "span_ids": ["clone"], "tokens": 605}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def clone(*collections, omit=None, seed: Hashable = None, assume_layers: bool = True):\n \"\"\"Clone dask collections, returning equivalent collections that are generated from\n independent calculations.\n\n Examples\n --------\n (tokens have been simplified for the sake of brevity)\n\n >>> from dask import array as da\n >>> x_i = da.asarray([1, 1, 1, 1], chunks=2)\n >>> y_i = x_i + 1\n >>> z_i = y_i + 2\n >>> dict(z_i.dask) # doctest: +SKIP\n {('array-1', 0): array([1, 1]),\n ('array-1', 1): array([1, 1]),\n ('add-2', 0): (, ('array-1', 0), 1),\n ('add-2', 1): (, ('array-1', 1), 1),\n ('add-3', 0): (, ('add-2', 0), 1),\n ('add-3', 1): (, ('add-2', 1), 1)}\n >>> w_i = clone(z_i, omit=x_i)\n >>> w_i.compute()\n array([4, 4, 4, 4])\n >>> dict(w_i.dask) # doctest: +SKIP\n {('array-1', 0): array([1, 1]),\n ('array-1', 1): array([1, 1]),\n ('add-4', 0): (, ('array-1', 0), 1),\n ('add-4', 1): (, ('array-1', 1), 1),\n ('add-5', 0): (, ('add-4', 0), 1),\n ('add-5', 1): (, ('add-4', 1), 1)}\n\n Parameters\n ----------\n collections\n Zero or more Dask collections or nested structures of Dask collections\n omit\n Dask collection or nested structure of Dask collections which will not be cloned\n seed\n See :func:`bind`\n assume_layers\n See :func:`bind`\n\n Returns\n -------\n Same as ``collections``\n Dask collections of the same type as the inputs, which compute to the same\n value, or nested structures equivalent to the inputs, where the original\n collections have been replaced.\n \"\"\"\n out = bind(\n collections, parents=None, omit=omit, seed=seed, assume_layers=assume_layers\n )\n return out[0] if len(collections) == 1 else out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_wait_on_wait_on.blocker.checkpoint_collections_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_wait_on_wait_on.blocker.checkpoint_collections_", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 457, "end_line": 494, "span_ids": ["wait_on"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wait_on(\n *collections,\n split_every: float | Literal[False] | None = None,\n):\n \"\"\"Ensure that all chunks of all input collections have been computed before\n computing the dependents of any of the chunks.\n\n The following example creates a dask array ``u`` that, when used in a computation,\n will only proceed when all chunks of the array ``x`` have been computed, but\n otherwise matches ``x``:\n\n >>> from dask import array as da\n >>> x = da.ones(10, chunks=5)\n >>> u = wait_on(x)\n\n The following example will create two arrays ``u`` and ``v`` that, when used in a\n computation, will only proceed when all chunks of the arrays ``x`` and ``y`` have\n been computed but otherwise match ``x`` and ``y``:\n\n >>> x = da.ones(10, chunks=5)\n >>> y = da.zeros(10, chunks=5)\n >>> u, v = wait_on(x, y)\n\n Parameters\n ----------\n collections\n Zero or more Dask collections or nested structures of Dask collections\n split_every\n See :func:`checkpoint`\n\n Returns\n -------\n Same as ``collections``\n Dask collection of the same type as the input, which computes to the same value,\n or a nested structure equivalent to the input where the original collections\n have been replaced.\n \"\"\"\n blocker = checkpoint(*collections, split_every=split_every)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_wait_on.block_one_wait_on.return.out_0_if_len_collections": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_wait_on.block_one_wait_on.return.out_0_if_len_collections", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 496, "end_line": 517, "span_ids": ["wait_on"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wait_on(\n *collections,\n split_every: float | Literal[False] | None = None,\n):\n # ... other code\n\n def block_one(coll):\n tok = tokenize(coll, blocker)\n dsks = []\n rename = {}\n for prev_name in get_collection_names(coll):\n new_name = \"wait_on-\" + tokenize(prev_name, tok)\n rename[prev_name] = new_name\n layer = _build_map_layer(\n chunks.bind, prev_name, new_name, coll, dependencies=(blocker,)\n )\n dsks.append(\n HighLevelGraph.from_collections(\n new_name, layer, dependencies=(coll, blocker)\n )\n )\n dsk = HighLevelGraph.merge(*dsks)\n rebuild, args = coll.__dask_postpersist__()\n return rebuild(dsk, *args, rename=rename)\n\n unpacked, repack = unpack_collections(*collections)\n out = repack([block_one(coll) for coll in unpacked])\n return out[0] if len(collections) == 1 else out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_chunks_", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 475, "end_line": 491, "span_ids": ["chunks", "chunks.bind", "chunks.checkpoint"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class chunks:\n \"\"\"Callables to be inserted in the Dask graph\"\"\"\n\n @staticmethod\n def bind(node: T, *args, **kwargs) -> T:\n \"\"\"Dummy graph node of :func:`bind` and :func:`wait_on`.\n Wait for both node and all variadic args to complete; then return node.\n \"\"\"\n return node\n\n @staticmethod\n def checkpoint(*args, **kwargs) -> None:\n \"\"\"Dummy graph node of :func:`checkpoint`.\n Wait for all variadic args to complete; then return None.\n \"\"\"\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_annotations_unpack___Layer.__dask_distributed_annotations_unpack__.annotations_update_expand": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_annotations_unpack___Layer.__dask_distributed_annotations_unpack__.annotations_update_expand", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 194, "end_line": 240, "span_ids": ["Layer.__dask_distributed_annotations_unpack__"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n @staticmethod\n def __dask_distributed_annotations_unpack__(\n annotations: MutableMapping[str, Any],\n new_annotations: Mapping[str, Any] | None,\n keys: Iterable[Hashable],\n ) -> None:\n \"\"\"\n Unpack a set of layer annotations across a set of keys, then merge those\n expanded annotations for the layer into an existing annotations mapping.\n\n This is not a simple shallow merge because some annotations like retries,\n priority, workers, etc need to be able to retain keys from different layers.\n\n Parameters\n ----------\n annotations: MutableMapping[str, Any], input/output\n Already unpacked annotations, which are to be updated with the new\n unpacked annotations\n new_annotations: Mapping[str, Any], optional\n New annotations to be unpacked into `annotations`\n keys: Iterable\n All keys in the layer.\n \"\"\"\n if new_annotations is None:\n return\n\n expanded = {}\n keys_stringified = False\n\n # Expand the new annotations across the keyset\n for a, v in new_annotations.items():\n if type(v) is dict and \"__expanded_annotations__\" in v:\n # Maybe do a destructive update for efficiency?\n v = v.copy()\n del v[\"__expanded_annotations__\"]\n expanded[a] = v\n else:\n if not keys_stringified:\n keys = [stringify(k) for k in keys]\n keys_stringified = True\n\n expanded[a] = dict.fromkeys(keys, v)\n\n # Merge the expanded annotations with the existing annotations mapping\n for k, v in expanded.items():\n v.update(annotations.get(k, {}))\n annotations.update(expanded)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.clone_Layer.clone.is_leaf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.clone_Layer.clone.is_leaf", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 242, "end_line": 277, "span_ids": ["Layer.clone"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n def clone(\n self,\n keys: set,\n seed: Hashable,\n bind_to: Hashable = None,\n ) -> tuple[Layer, bool]:\n \"\"\"Clone selected keys in the layer, as well as references to keys in other\n layers\n\n Parameters\n ----------\n keys\n Keys to be replaced. This never includes keys not listed by\n :meth:`get_output_keys`. It must also include any keys that are outside\n of this layer that may be referenced by it.\n seed\n Common hashable used to alter the keys; see :func:`dask.base.clone_key`\n bind_to\n Optional key to bind the leaf nodes to. A leaf node here is one that does\n not reference any replaced keys; in other words it's a node where the\n replacement graph traversal stops; it may still have dependencies on\n non-replaced nodes.\n A bound node will not be computed until after ``bind_to`` has been computed.\n\n Returns\n -------\n - New layer\n - True if the ``bind_to`` key was injected anywhere; False otherwise\n\n Notes\n -----\n This method should be overridden by subclasses to avoid materializing the layer.\n \"\"\"\n from .graph_manipulation import chunks\n\n is_leaf: bool\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.clone.clone_value_Layer.clone.return.MaterializedLayer_dsk_new": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.clone.clone_value_Layer.clone.return.MaterializedLayer_dsk_new", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 279, "end_line": 315, "span_ids": ["Layer.clone"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n def clone(\n self,\n keys: set,\n seed: Hashable,\n bind_to: Hashable = None,\n ) -> tuple[Layer, bool]:\n # ... other code\n\n def clone_value(o):\n \"\"\"Variant of distributed.utils_comm.subs_multiple, which allows injecting\n bind_to\n \"\"\"\n nonlocal is_leaf\n\n typ = type(o)\n if typ is tuple and o and callable(o[0]):\n return (o[0],) + tuple(clone_value(i) for i in o[1:])\n elif typ is list:\n return [clone_value(i) for i in o]\n elif typ is dict:\n return {k: clone_value(v) for k, v in o.items()}\n else:\n try:\n if o not in keys:\n return o\n except TypeError:\n return o\n is_leaf = False\n return clone_key(o, seed)\n\n dsk_new = {}\n bound = False\n\n for key, value in self.items():\n if key in keys:\n key = clone_key(key, seed)\n is_leaf = True\n value = clone_value(value)\n if bind_to is not None and is_leaf:\n value = (chunks.bind, value, bind_to)\n bound = True\n\n dsk_new[key] = value\n\n return MaterializedLayer(dsk_new), bound", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph_HighLevelGraph.dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph_HighLevelGraph.dependencies", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 551, "end_line": 618, "span_ids": ["HighLevelGraph"], "tokens": 748}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n \"\"\"Task graph composed of layers of dependent subgraphs\n\n This object encodes a Dask task graph that is composed of layers of\n dependent subgraphs, such as commonly occurs when building task graphs\n using high level collections like Dask array, bag, or dataframe.\n\n Typically each high level array, bag, or dataframe operation takes the task\n graphs of the input collections, merges them, and then adds one or more new\n layers of tasks for the new operation. These layers typically have at\n least as many tasks as there are partitions or chunks in the collection.\n The HighLevelGraph object stores the subgraphs for each operation\n separately in sub-graphs, and also stores the dependency structure between\n them.\n\n Parameters\n ----------\n layers : Mapping[str, Mapping]\n The subgraph layers, keyed by a unique name\n dependencies : Mapping[str, set[str]]\n The set of layers on which each layer depends\n key_dependencies : Mapping[Hashable, set], optional\n Mapping (some) keys in the high level graph to their dependencies. If\n a key is missing, its dependencies will be calculated on-the-fly.\n\n Examples\n --------\n Here is an idealized example that shows the internal state of a\n HighLevelGraph\n\n >>> import dask.dataframe as dd\n\n >>> df = dd.read_csv('myfile.*.csv') # doctest: +SKIP\n >>> df = df + 100 # doctest: +SKIP\n >>> df = df[df.name == 'Alice'] # doctest: +SKIP\n\n >>> graph = df.__dask_graph__() # doctest: +SKIP\n >>> graph.layers # doctest: +SKIP\n {\n 'read-csv': {('read-csv', 0): (pandas.read_csv, 'myfile.0.csv'),\n ('read-csv', 1): (pandas.read_csv, 'myfile.1.csv'),\n ('read-csv', 2): (pandas.read_csv, 'myfile.2.csv'),\n ('read-csv', 3): (pandas.read_csv, 'myfile.3.csv')},\n 'add': {('add', 0): (operator.add, ('read-csv', 0), 100),\n ('add', 1): (operator.add, ('read-csv', 1), 100),\n ('add', 2): (operator.add, ('read-csv', 2), 100),\n ('add', 3): (operator.add, ('read-csv', 3), 100)}\n 'filter': {('filter', 0): (lambda part: part[part.name == 'Alice'], ('add', 0)),\n ('filter', 1): (lambda part: part[part.name == 'Alice'], ('add', 1)),\n ('filter', 2): (lambda part: part[part.name == 'Alice'], ('add', 2)),\n ('filter', 3): (lambda part: part[part.name == 'Alice'], ('add', 3))}\n }\n\n >>> graph.dependencies # doctest: +SKIP\n {\n 'read-csv': set(),\n 'add': {'read-csv'},\n 'filter': {'add'}\n }\n\n See Also\n --------\n HighLevelGraph.from_collections :\n typically used by developers to make new HighLevelGraphs\n \"\"\"\n\n layers: Mapping[str, Layer]\n dependencies: Mapping[str, Set]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.key_dependencies_HighLevelGraph.__init__.self.layers._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.key_dependencies_HighLevelGraph.__init__.self.layers._", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 619, "end_line": 635, "span_ids": ["HighLevelGraph", "HighLevelGraph.__init__"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n key_dependencies: dict[Hashable, Set]\n _to_dict: dict\n _all_external_keys: set\n\n def __init__(\n self,\n layers: Mapping[str, Mapping],\n dependencies: Mapping[str, Set],\n key_dependencies: dict[Hashable, Set] | None = None,\n ):\n self.dependencies = dependencies\n self.key_dependencies = key_dependencies or {}\n # Makes sure that all layers are `Layer`\n self.layers = {\n k: v if isinstance(v, Layer) else MaterializedLayer(v)\n for k, v in layers.items()\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__getitem___HighLevelGraph.__getitem__.raise_KeyError_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__getitem___HighLevelGraph.__getitem__.raise_KeyError_key_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 634, "end_line": 654, "span_ids": ["HighLevelGraph.__getitem__"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def __getitem__(self, key):\n # Attempt O(1) direct access first, under the assumption that layer names match\n # either the keys (Scalar, Item, Delayed) or the first element of the key tuples\n # (Array, Bag, DataFrame, Series). This assumption is not always true.\n try:\n return self.layers[key][key]\n except KeyError:\n pass\n try:\n return self.layers[key[0]][key]\n except (KeyError, IndexError, TypeError):\n pass\n\n # Fall back to O(n) access\n for d in self.layers.values():\n try:\n return d[key]\n except KeyError:\n pass\n\n raise KeyError(key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.cull_layers_HighLevelGraph.cull_layers.return.HighLevelGraph_ret_layers": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.cull_layers_HighLevelGraph.cull_layers.return.HighLevelGraph_ret_layers", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 964, "end_line": 986, "span_ids": ["HighLevelGraph.cull_layers"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def cull_layers(self, layers: Iterable[str]) -> HighLevelGraph:\n \"\"\"Return a new HighLevelGraph with only the given layers and their\n dependencies. Internally, layers are not modified.\n\n This is a variant of :meth:`HighLevelGraph.cull` which is much faster and does\n not risk creating a collision between two layers with the same name and\n different content when two culled graphs are merged later on.\n\n Returns\n -------\n hlg: HighLevelGraph\n Culled high level graph\n \"\"\"\n to_visit = set(layers)\n ret_layers = {}\n ret_dependencies = {}\n while to_visit:\n k = to_visit.pop()\n ret_layers[k] = self.layers[k]\n ret_dependencies[k] = self.dependencies[k]\n to_visit |= ret_dependencies[k] - ret_dependencies.keys()\n\n return HighLevelGraph(ret_layers, ret_dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_get_name_from_key_test_get_name_from_key.None_2.get_name_from_key_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_get_name_from_key_test_get_name_from_key.None_2.get_name_from_key_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 588, "end_line": 601, "span_ids": ["test_get_name_from_key"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_name_from_key():\n # Arbitrary hashables\n h1 = object()\n h2 = object()\n\n assert get_name_from_key(\"foo\") == \"foo\"\n assert get_name_from_key(\"foo-123\"), \"foo-123\"\n assert get_name_from_key((\"foo-123\", h1, h2)) == \"foo-123\"\n with pytest.raises(TypeError):\n get_name_from_key(1)\n with pytest.raises(TypeError):\n get_name_from_key(())\n with pytest.raises(TypeError):\n get_name_from_key((1,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_replace_name_in_keys_test_replace_name_in_keys.None_2.replace_name_in_key_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_replace_name_in_keys_test_replace_name_in_keys.None_2.replace_name_in_key_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 604, "end_line": 621, "span_ids": ["test_replace_name_in_keys"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_replace_name_in_keys():\n assert replace_name_in_key(\"foo\", {}) == \"foo\"\n assert replace_name_in_key(\"foo\", {\"bar\": \"baz\"}) == \"foo\"\n assert replace_name_in_key(\"foo\", {\"foo\": \"bar\", \"baz\": \"asd\"}) == \"bar\"\n assert replace_name_in_key(\"foo-123\", {\"foo-123\": \"bar-456\"}) == \"bar-456\"\n h1 = object() # Arbitrary hashables\n h2 = object()\n assert replace_name_in_key((\"foo-123\", h1, h2), {\"foo-123\": \"bar\"}) == (\n \"bar\",\n h1,\n h2,\n )\n with pytest.raises(TypeError):\n replace_name_in_key(1, {})\n with pytest.raises(TypeError):\n replace_name_in_key((), {})\n with pytest.raises(TypeError):\n replace_name_in_key((1,), {})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_Tuple_Tuple._rebuild.return.Tuple_dsk_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_Tuple_Tuple._rebuild.return.Tuple_dsk_keys_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 624, "end_line": 659, "span_ids": ["Tuple.__init__", "Tuple.__add__", "Tuple.__dask_layers__", "Tuple.__dask_keys__", "Tuple.__dask_tokenize__", "Tuple._rebuild", "Tuple", "Tuple.__dask_postpersist__", "Tuple.__dask_postcompute__", "Tuple.__dask_graph__"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Tuple(DaskMethodsMixin):\n __slots__ = (\"_dask\", \"_keys\")\n __dask_scheduler__ = staticmethod(dask.threaded.get)\n\n def __init__(self, dsk, keys):\n self._dask = dsk\n self._keys = keys\n\n def __add__(self, other):\n if not isinstance(other, Tuple):\n return NotImplemented # pragma: nocover\n return Tuple(merge(self._dask, other._dask), self._keys + other._keys)\n\n def __dask_graph__(self):\n return self._dask\n\n def __dask_keys__(self):\n return self._keys\n\n def __dask_layers__(self):\n return tuple(get_collection_names(self))\n\n def __dask_tokenize__(self):\n return self._keys\n\n def __dask_postcompute__(self):\n return tuple, ()\n\n def __dask_postpersist__(self):\n return Tuple._rebuild, (self._keys,)\n\n @staticmethod\n def _rebuild(dsk, keys, *, rename=None):\n if rename:\n keys = [replace_name_in_key(key, rename) for key in keys]\n return Tuple(dsk, keys)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_custom_collection_test_custom_collection.assert_z3_compute_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_custom_collection_test_custom_collection.assert_z3_compute_7", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 662, "end_line": 741, "span_ids": ["test_custom_collection"], "tokens": 885}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_custom_collection():\n # Arbitrary hashables\n h1 = object()\n h2 = object()\n\n dsk = {(\"x\", h1): 1, (\"x\", h2): 2}\n dsk2 = {(\"y\", h1): (add, (\"x\", h1), (\"x\", h2)), (\"y\", h2): (add, (\"y\", h1), 1)}\n dsk2.update(dsk)\n dsk3 = {\"z\": (add, (\"y\", h1), (\"y\", h2))}\n dsk3.update(dsk2)\n\n w = Tuple({}, []) # A collection can have no keys at all\n x = Tuple(dsk, [(\"x\", h1), (\"x\", h2)])\n y = Tuple(dsk2, [(\"y\", h1), (\"y\", h2)])\n z = Tuple(dsk3, [\"z\"])\n # Collection with multiple names\n t = w + x + y + z\n\n # __slots__ defined on base mixin class propagates\n with pytest.raises(AttributeError):\n x.foo = 1\n\n # is_dask_collection\n assert is_dask_collection(w)\n assert is_dask_collection(x)\n assert is_dask_collection(y)\n assert is_dask_collection(z)\n assert is_dask_collection(t)\n\n # tokenize\n assert tokenize(w) == tokenize(w)\n assert tokenize(x) == tokenize(x)\n assert tokenize(y) == tokenize(y)\n assert tokenize(z) == tokenize(z)\n assert tokenize(t) == tokenize(t)\n # All tokens are unique\n assert len({tokenize(coll) for coll in (w, x, y, z, t)}) == 5\n\n # get_collection_names\n assert get_collection_names(w) == set()\n assert get_collection_names(x) == {\"x\"}\n assert get_collection_names(y) == {\"y\"}\n assert get_collection_names(z) == {\"z\"}\n assert get_collection_names(t) == {\"x\", \"y\", \"z\"}\n\n # compute\n assert w.compute() == ()\n assert x.compute() == (1, 2)\n assert y.compute() == (3, 4)\n assert z.compute() == (7,)\n assert dask.compute(w, [{\"x\": x}, y, z]) == ((), [{\"x\": (1, 2)}, (3, 4), (7,)])\n assert t.compute() == (1, 2, 3, 4, 7)\n\n # persist\n t2 = t.persist()\n assert isinstance(t2, Tuple)\n assert t2._keys == t._keys\n assert sorted(t2._dask.values()) == [1, 2, 3, 4, 7]\n assert t2.compute() == (1, 2, 3, 4, 7)\n\n w2, x2, y2, z2 = dask.persist(w, x, y, z)\n assert y2._keys == y._keys\n assert y2._dask == {(\"y\", h1): 3, (\"y\", h2): 4}\n assert y2.compute() == (3, 4)\n\n t3 = x2 + y2 + z2\n assert t3.compute() == (1, 2, 3, 4, 7)\n\n # __dask_postpersist__ with name change\n rebuild, args = w.__dask_postpersist__()\n w3 = rebuild({}, *args, rename={\"w\": \"w3\"})\n assert w3.compute() == ()\n\n rebuild, args = x.__dask_postpersist__()\n x3 = rebuild({(\"x3\", h1): 10, (\"x3\", h2): 20}, *args, rename={\"x\": \"x3\"})\n assert x3.compute() == (10, 20)\n\n rebuild, args = z.__dask_postpersist__()\n z3 = rebuild({\"z3\": 70}, *args, rename={\"z\": \"z3\"})\n assert z3.compute() == (70,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_array_rename_test_persist_array_rename.da_utils_assert_eq_b_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_array_rename_test_persist_array_rename.da_utils_assert_eq_b_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 797, "end_line": 806, "span_ids": ["test_persist_array_rename"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da\")\ndef test_persist_array_rename():\n a = da.zeros(4, dtype=int, chunks=2)\n rebuild, args = a.__dask_postpersist__()\n dsk = {(\"b\", 0): np.array([1, 2]), (\"b\", 1): np.array([3, 4])}\n b = rebuild(dsk, *args, rename={a.name: \"b\"})\n assert isinstance(b, da.Array)\n assert b.name == \"b\"\n assert b.__dask_keys__() == [(\"b\", 0), (\"b\", 1)]\n da.utils.assert_eq(b, [1, 2, 3, 4])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_test_compute_dataframe.dd_utils_assert_eq_out2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_dataframe_test_compute_dataframe.dd_utils_assert_eq_out2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 809, "end_line": 817, "span_ids": ["test_compute_dataframe"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_compute_dataframe():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 5, 3, 3]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf1 = ddf.a + 1\n ddf2 = ddf.a + ddf.b\n out1, out2 = compute(ddf1, ddf2)\n dd.utils.assert_eq(out1, df.a + 1)\n dd.utils.assert_eq(out2, df.a + df.b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_dataframe_test_persist_dataframe.dd_utils_assert_eq_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_dataframe_test_persist_dataframe.dd_utils_assert_eq_ddf2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 820, "end_line": 828, "span_ids": ["test_persist_dataframe"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_persist_dataframe():\n df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 6, 7, 8]})\n ddf1 = dd.from_pandas(df, npartitions=2) * 2\n assert len(ddf1.__dask_graph__()) == 4\n ddf2 = ddf1.persist()\n assert isinstance(ddf2, dd.DataFrame)\n assert len(ddf2.__dask_graph__()) == 2\n dd.utils.assert_eq(ddf2, ddf1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_series_test_persist_series.dd_utils_assert_eq_dds2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_series_test_persist_series.dd_utils_assert_eq_dds2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 831, "end_line": 839, "span_ids": ["test_persist_series"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_persist_series():\n ds = pd.Series([1, 2, 3, 4])\n dds1 = dd.from_pandas(ds, npartitions=2) * 2\n assert len(dds1.__dask_graph__()) == 4\n dds2 = dds1.persist()\n assert isinstance(dds2, dd.Series)\n assert len(dds2.__dask_graph__()) == 2\n dd.utils.assert_eq(dds2, dds1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_scalar_test_persist_scalar.dd_utils_assert_eq_dds2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_scalar_test_persist_scalar.dd_utils_assert_eq_dds2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 842, "end_line": 850, "span_ids": ["test_persist_scalar"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_persist_scalar():\n ds = pd.Series([1, 2, 3, 4])\n dds1 = dd.from_pandas(ds, npartitions=2).min()\n assert len(dds1.__dask_graph__()) == 5\n dds2 = dds1.persist()\n assert isinstance(dds2, dd.core.Scalar)\n assert len(dds2.__dask_graph__()) == 1\n dd.utils.assert_eq(dds2, dds1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_dataframe_rename_test_persist_dataframe_rename.dd_utils_assert_eq_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_dataframe_rename_test_persist_dataframe_rename.dd_utils_assert_eq_ddf2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 853, "end_line": 862, "span_ids": ["test_persist_dataframe_rename"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_persist_dataframe_rename():\n df1 = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 6, 7, 8]})\n df2 = pd.DataFrame({\"a\": [2, 3, 5, 6], \"b\": [6, 7, 9, 10]})\n ddf1 = dd.from_pandas(df1, npartitions=2)\n rebuild, args = ddf1.__dask_postpersist__()\n dsk = {(\"x\", 0): df2.iloc[:2], (\"x\", 1): df2.iloc[2:]}\n ddf2 = rebuild(dsk, *args, rename={ddf1._name: \"x\"})\n assert ddf2.__dask_keys__() == [(\"x\", 0), (\"x\", 1)]\n dd.utils.assert_eq(ddf2, df2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_series_rename_test_persist_series_rename.dd_utils_assert_eq_dds2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_series_rename_test_persist_series_rename.dd_utils_assert_eq_dds2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 865, "end_line": 874, "span_ids": ["test_persist_series_rename"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_persist_series_rename():\n ds1 = pd.Series([1, 2, 3, 4])\n ds2 = pd.Series([5, 6, 7, 8])\n dds1 = dd.from_pandas(ds1, npartitions=2)\n rebuild, args = dds1.__dask_postpersist__()\n dsk = {(\"x\", 0): ds2.iloc[:2], (\"x\", 1): ds2.iloc[2:]}\n dds2 = rebuild(dsk, *args, rename={dds1._name: \"x\"})\n assert dds2.__dask_keys__() == [(\"x\", 0), (\"x\", 1)]\n dd.utils.assert_eq(dds2, ds2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_scalar_rename_test_persist_scalar_rename.dd_utils_assert_eq_dds2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_scalar_rename_test_persist_scalar_rename.dd_utils_assert_eq_dds2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 877, "end_line": 884, "span_ids": ["test_persist_scalar_rename"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd\")\ndef test_persist_scalar_rename():\n ds1 = pd.Series([1, 2, 3, 4])\n dds1 = dd.from_pandas(ds1, npartitions=2).min()\n rebuild, args = dds1.__dask_postpersist__()\n dds2 = rebuild({(\"x\", 0): 5}, *args, rename={dds1._name: \"x\"})\n assert dds2.__dask_keys__() == [(\"x\", 0)]\n dd.utils.assert_eq(dds2, 5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_dataframe_test_compute_array_dataframe.dd_utils_assert_eq_df_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_array_dataframe_test_compute_array_dataframe.dd_utils_assert_eq_df_out", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 887, "end_line": 895, "span_ids": ["test_compute_array_dataframe"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not dd or not da\")\ndef test_compute_array_dataframe():\n arr = np.arange(100).reshape((10, 10))\n darr = da.from_array(arr, chunks=(5, 5)) + 1\n df = pd.DataFrame({\"a\": [1, 2, 3, 4], \"b\": [5, 5, 3, 3]})\n ddf = dd.from_pandas(df, npartitions=2).a + 2\n arr_out, df_out = compute(darr, ddf)\n assert np.allclose(arr_out, arr + 1)\n dd.utils.assert_eq(df_out, df.a + 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayed_test_persist_delayed_custom_key.assert_dict_dp_dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayed_test_persist_delayed_custom_key.assert_dict_dp_dask_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1201, "end_line": 1223, "span_ids": ["impl:15", "test_persist_delayed", "test_persist_delayed_custom_key"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_delayed():\n x1 = delayed(1)\n x2 = delayed(inc)(x1)\n x3 = delayed(inc)(x2)\n (xx,) = persist(x3)\n assert isinstance(xx, Delayed)\n assert xx.key == x3.key\n assert len(xx.dask) == 1\n\n assert x3.compute() == xx.compute()\n\n\nsome_hashable = object()\n\n\n@pytest.mark.parametrize(\"key\", [\"a\", (\"a-123\", some_hashable)])\ndef test_persist_delayed_custom_key(key):\n d = Delayed(key, {key: \"b\", \"b\": 1})\n assert d.compute() == 1\n dp = d.persist()\n assert dp.compute() == 1\n assert dp.key == key\n assert dict(dp.dask) == {key: 1}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayed_rename_test_persist_delayed_rename.assert_dict_dp_dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayed_rename_test_persist_delayed_rename.assert_dict_dp_dask_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1164, "end_line": 1180, "span_ids": ["test_persist_delayed_rename"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"key,rename,new_key\",\n [\n (\"a\", {}, \"a\"),\n (\"a\", {\"c\": \"d\"}, \"a\"),\n (\"a\", {\"a\": \"b\"}, \"b\"),\n ((\"a-123\", some_hashable), {\"a-123\": \"b-123\"}, (\"b-123\", some_hashable)),\n ],\n)\ndef test_persist_delayed_rename(key, rename, new_key):\n d = Delayed(key, {key: 1})\n assert d.compute() == 1\n rebuild, args = d.__dask_postpersist__()\n dp = rebuild({new_key: 2}, *args, rename=rename)\n assert dp.compute() == 2\n assert dp.key == new_key\n assert dict(dp.dask) == {new_key: 2}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayedleaf_test_persist_array_bag.assert_list_b_list_bb": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_delayedleaf_test_persist_array_bag.assert_list_b_list_bb", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1164, "end_line": 1200, "span_ids": ["test_persist_array_bag", "test_persist_delayedleaf", "test_persist_delayedattr"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_delayedleaf():\n x = delayed(1)\n (xx,) = persist(x)\n assert isinstance(xx, Delayed)\n assert xx.compute() == 1\n\n\ndef test_persist_delayedattr():\n class C:\n x = 1\n\n x = delayed(C).x\n (xx,) = persist(x)\n assert isinstance(xx, Delayed)\n assert xx.compute() == 1\n\n\n@pytest.mark.skipif(\"not da\")\ndef test_persist_array_bag():\n x = da.arange(5, chunks=2) + 1\n b = db.from_sequence([1, 2, 3]).map(inc)\n\n with pytest.raises(ValueError):\n persist(x, b)\n\n xx, bb = persist(x, b, scheduler=\"single-threaded\")\n\n assert isinstance(xx, da.Array)\n assert isinstance(bb, db.Bag)\n\n assert xx.name == x.name\n assert bb.name == b.name\n assert len(xx.dask) == xx.npartitions < len(x.dask)\n assert len(bb.dask) == bb.npartitions < len(b.dask)\n\n assert np.allclose(x, xx)\n assert list(b) == list(bb)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_bag_test_persist_item.db_utils_assert_eq_a_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_bag_test_persist_item.db_utils_assert_eq_a_b_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1203, "end_line": 1218, "span_ids": ["test_persist_bag", "test_persist_item"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_bag():\n a = db.from_sequence([1, 2, 3], npartitions=2).map(lambda x: x * 2)\n assert len(a.__dask_graph__()) == 4\n b = a.persist(scheduler=\"sync\")\n assert isinstance(b, db.Bag)\n assert len(b.__dask_graph__()) == 2\n db.utils.assert_eq(a, b)\n\n\ndef test_persist_item():\n a = db.from_sequence([1, 2, 3], npartitions=2).map(lambda x: x * 2).min()\n assert len(a.__dask_graph__()) == 7\n b = a.persist(scheduler=\"sync\")\n assert isinstance(b, db.Item)\n assert len(b.__dask_graph__()) == 1\n db.utils.assert_eq(a, b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_bag_rename_test_persist_bag_rename.db_utils_assert_eq_b_4_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_bag_rename_test_persist_bag_rename.db_utils_assert_eq_b_4_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1221, "end_line": 1229, "span_ids": ["test_persist_bag_rename"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_bag_rename():\n a = db.from_sequence([1, 2, 3], npartitions=2)\n rebuild, args = a.__dask_postpersist__()\n dsk = {(\"b\", 0): [4], (\"b\", 1): [5, 6]}\n b = rebuild(dsk, *args, rename={a.name: \"b\"})\n assert isinstance(b, db.Bag)\n assert b.name == \"b\"\n assert b.__dask_keys__() == [(\"b\", 0), (\"b\", 1)]\n db.utils.assert_eq(b, [4, 5, 6])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_globals_test_optimize_globals.None_1.assert_eq_xx_np_ones_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_optimize_globals_test_optimize_globals.None_1.assert_eq_xx_np_ones_10", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1248, "end_line": 1269, "span_ids": ["test_optimize_globals"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize_globals():\n da = pytest.importorskip(\"dask.array\")\n\n x = da.ones(10, chunks=(5,))\n\n def optimize_double(dsk, keys):\n return {k: (mul, 2, v) for k, v in dsk.items()}\n\n from dask.array.utils import assert_eq\n\n assert_eq(x + 1, np.ones(10) + 1)\n\n with dask.config.set(array_optimize=optimize_double):\n assert_eq(x + 1, (np.ones(10) * 2 + 1) * 2, check_chunks=False)\n\n assert_eq(x + 1, np.ones(10) + 1)\n\n b = db.range(10, npartitions=2)\n\n with dask.config.set(array_optimize=optimize_double):\n xx, bb = dask.compute(x + 1, b.map(inc), scheduler=\"single-threaded\")\n assert_eq(xx, (np.ones(10) * 2 + 1) * 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_attribute_of_attribute_test_cloudpickle.assert_d_compute_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_attribute_of_attribute_test_cloudpickle.assert_d_compute_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 634, "end_line": 688, "span_ids": ["test_cloudpickle", "modlevel_delayed1", "test_pickle", "modlevel_delayed2", "test_check_meta_flag", "test_attribute_of_attribute", "modlevel_eager"], "tokens": 373}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_attribute_of_attribute():\n x = delayed(123)\n assert isinstance(x.a, Delayed)\n assert isinstance(x.a.b, Delayed)\n assert isinstance(x.a.b.c, Delayed)\n\n\ndef test_check_meta_flag():\n dd = pytest.importorskip(\"dask.dataframe\")\n from pandas import Series\n\n a = Series([\"a\", \"b\", \"a\"], dtype=\"category\")\n b = Series([\"a\", \"c\", \"a\"], dtype=\"category\")\n da = delayed(lambda x: x)(a)\n db = delayed(lambda x: x)(b)\n\n c = dd.from_delayed([da, db], verify_meta=False)\n dd.utils.assert_eq(c, c)\n\n\ndef modlevel_eager(x):\n return x + 1\n\n\n@delayed\ndef modlevel_delayed1(x):\n return x + 1\n\n\n@delayed(pure=False)\ndef modlevel_delayed2(x):\n return x + 1\n\n\n@pytest.mark.parametrize(\n \"f\",\n [\n delayed(modlevel_eager),\n pytest.param(modlevel_delayed1, marks=pytest.mark.xfail(reason=\"#3369\")),\n pytest.param(modlevel_delayed2, marks=pytest.mark.xfail(reason=\"#3369\")),\n ],\n)\ndef test_pickle(f):\n d = f(2)\n d = pickle.loads(pickle.dumps(d, protocol=pickle.HIGHEST_PROTOCOL))\n assert d.compute() == 3\n\n\n@pytest.mark.parametrize(\n \"f\", [delayed(modlevel_eager), modlevel_delayed1, modlevel_delayed2]\n)\ndef test_cloudpickle(f):\n d = f(2)\n d = cloudpickle.loads(cloudpickle.dumps(d, protocol=pickle.HIGHEST_PROTOCOL))\n assert d.compute() == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_fused_blockwise_dataframe_merge_test_fused_blockwise_dataframe_merge.dd_utils_assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_fused_blockwise_dataframe_merge_test_fused_blockwise_dataframe_merge.dd_utils_assert_eq_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 87, "end_line": 111, "span_ids": ["test_fused_blockwise_dataframe_merge"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"fuse\", [True, False])\ndef test_fused_blockwise_dataframe_merge(c, fuse):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n # Generate two DataFrames with more partitions than\n # the `max_branch` default used for shuffling (32).\n # We need a multi-stage shuffle to cover #7178 fix.\n size = 35\n df1 = pd.DataFrame({\"x\": range(size), \"y\": range(size)})\n df2 = pd.DataFrame({\"x\": range(size), \"z\": range(size)})\n ddf1 = dd.from_pandas(df1, npartitions=size) + 10\n ddf2 = dd.from_pandas(df2, npartitions=5) + 10\n df1 += 10\n df2 += 10\n\n with dask.config.set({\"optimization.fuse.active\": fuse}):\n ddfm = ddf1.merge(ddf2, on=[\"x\"], how=\"left\")\n ddfm.head() # https://github.com/dask/dask/issues/7178\n dfm = ddfm.compute().sort_values(\"x\")\n # We call compute above since `sort_values` is not\n # supported in `dask.dataframe`\n dd.utils.assert_eq(\n dfm, df1.merge(df2, on=[\"x\"], how=\"left\").sort_values(\"x\"), check_index=False\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_annotations_blockwise_unpack_test_annotations_blockwise_unpack.assert_eq_z_np_ones_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_annotations_blockwise_unpack_test_annotations_blockwise_unpack.assert_eq_z_np_ones_10_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 249, "end_line": 277, "span_ids": ["test_annotations_blockwise_unpack"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_annotations_blockwise_unpack(c, s, a, b):\n da = pytest.importorskip(\"dask.array\")\n np = pytest.importorskip(\"numpy\")\n from dask.array.utils import assert_eq\n\n # A flaky doubling function -- need extra args because it is called before\n # application to establish dtype/meta.\n scale = varying([ZeroDivisionError(\"one\"), ZeroDivisionError(\"two\"), 2, 2])\n\n def flaky_double(x):\n return scale() * x\n\n # A reliable double function.\n def reliable_double(x):\n return 2 * x\n\n x = da.ones(10, chunks=(5,))\n\n # The later annotations should not override the earlier annotations\n with dask.annotate(retries=2):\n y = x.map_blocks(flaky_double, meta=np.array((), dtype=np.float_))\n with dask.annotate(retries=0):\n z = y.map_blocks(reliable_double, meta=np.array((), dtype=np.float_))\n\n with dask.config.set(optimization__fuse__active=False):\n z = await c.compute(z)\n\n assert_eq(z, np.ones(10) * 4.0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_combo_of_layer_types_test_combo_of_layer_types.assert_res_21": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_combo_of_layer_types_test_combo_of_layer_types.assert_res_21", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 280, "end_line": 314, "span_ids": ["test_combo_of_layer_types"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_combo_of_layer_types(c, s, a, b):\n \"\"\"Check pack/unpack of a HLG that has every type of Layers!\"\"\"\n\n da = pytest.importorskip(\"dask.array\")\n dd = pytest.importorskip(\"dask.dataframe\")\n np = pytest.importorskip(\"numpy\")\n pd = pytest.importorskip(\"pandas\")\n\n def add(x, y, z, extra_arg):\n return x + y + z + extra_arg\n\n y = c.submit(lambda x: x, 2)\n z = c.submit(lambda x: x, 3)\n x = da.blockwise(\n add,\n \"x\",\n da.zeros((3,), chunks=(1,)),\n \"x\",\n da.ones((3,), chunks=(1,)),\n \"x\",\n y,\n None,\n concatenate=False,\n dtype=int,\n extra_arg=z,\n )\n\n df = dd.from_pandas(pd.DataFrame({\"a\": np.arange(3)}), npartitions=3)\n df = df.shuffle(\"a\", shuffle=\"tasks\")\n df = df[\"a\"].to_dask_array()\n\n res = x.sum() + df.sum()\n res = await c.compute(res, optimize_graph=False)\n assert res == 21", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_random_NodeCounter.f.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_random_NodeCounter.f.return.x", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 29, "span_ids": ["NodeCounter.f", "imports", "NodeCounter.__init__", "NodeCounter"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\nimport time\nfrom operator import add\n\nimport pytest\n\nimport dask\nimport dask.bag as db\nfrom dask import delayed\nfrom dask.base import clone_key\nfrom dask.blockwise import Blockwise\nfrom dask.graph_manipulation import bind, checkpoint, chunks, clone, wait_on\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.tests.test_base import Tuple\nfrom dask.utils_test import import_or_none\n\nda = import_or_none(\"dask.array\")\ndd = import_or_none(\"dask.dataframe\")\npd = import_or_none(\"pandas\")\n\n\nclass NodeCounter:\n def __init__(self):\n self.n = 0\n\n def f(self, x):\n time.sleep(random.random() / 100)\n self.n += 1\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_collections_with_node_counters_collections_with_node_counters.return.colls_cnt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_collections_with_node_counters_collections_with_node_counters.return.colls_cnt", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 63, "end_line": 86, "span_ids": ["collections_with_node_counters"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def collections_with_node_counters():\n cnt = NodeCounter()\n df = pd.DataFrame({\"x\": list(range(10))})\n\n # Test two samples of all collections where applicable, one with multiple chunks\n # and one with a single chunk\n colls = [\n # dask.delayed\n delayed(cnt.f)(\"Hello 1\"), # 1 chunk\n # dask.array\n da.ones((10, 10), chunks=5).map_blocks(cnt.f), # 4 chunks\n da.ones((1,), chunks=-1).map_blocks(cnt.f), # 1 chunk\n # dask.bag\n db.from_sequence([1, 2], npartitions=2).map(cnt.f), # 2 chunks\n db.from_sequence([1], npartitions=1).map(cnt.f), # 1 chunk\n db.Item.from_delayed(delayed(cnt.f)(\"Hello 2\")), # 1 chunk\n # dask.dataframe\n dd.from_pandas(df, npartitions=2).map_partitions(cnt.f), # 2 chunks\n dd.from_pandas(df, npartitions=1).map_partitions(cnt.f), # 1 chunk\n dd.from_pandas(df[\"x\"], npartitions=2).map_partitions(cnt.f), # 2 chunks\n dd.from_pandas(df[\"x\"], npartitions=1).map_partitions(cnt.f), # 1 chunk\n ]\n cnt.n = 0\n return colls, cnt", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_demo_tuples_demo_tuples.return.Tuple_dsk1_list_dsk1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_demo_tuples_demo_tuples.return.Tuple_dsk1_list_dsk1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 106, "end_line": 121, "span_ids": ["demo_tuples"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def demo_tuples(layers: bool) -> \"tuple[Tuple, Tuple, NodeCounter]\":\n cnt = NodeCounter()\n # Collections have multiple names\n dsk1 = HighLevelGraph(\n {\"a\": {(\"a\", h1): (cnt.f, 1), (\"a\", h2): (cnt.f, 2)}, \"b\": {\"b\": (cnt.f, 3)}},\n {\"a\": set(), \"b\": set()},\n )\n dsk2 = HighLevelGraph(\n {\"c\": {\"c\": (cnt.f, 4)}, \"d\": {\"d\": (cnt.f, 5)}},\n {\"c\": set(), \"d\": set()},\n )\n if not layers:\n dsk1 = dsk1.to_dict() # type: ignore\n dsk2 = dsk2.to_dict() # type: ignore\n\n return Tuple(dsk1, list(dsk1)), Tuple(dsk2, list(dsk2)), cnt", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_checkpoint_test_wait_on_many.assert_cnt_n_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_checkpoint_test_wait_on_many.assert_cnt_n_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 121, "end_line": 150, "span_ids": ["test_wait_on_many", "test_checkpoint_collections", "test_wait_on_one", "test_checkpoint"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"layers\", [False, True])\ndef test_checkpoint(layers):\n t1, t2, cnt = demo_tuples(layers)\n cp = checkpoint(t1, {\"x\": [t2]})\n assert cp.compute(scheduler=\"sync\") is None\n assert cnt.n == 5\n\n\n@pytest.mark.skipif(\"not da or not dd\")\ndef test_checkpoint_collections():\n colls, cnt = collections_with_node_counters()\n cp = checkpoint(*colls)\n cp.compute(scheduler=\"sync\")\n assert cnt.n == 16\n\n\n@pytest.mark.parametrize(\"layers\", [False, True])\ndef test_wait_on_one(layers):\n t1, _, cnt = demo_tuples(layers)\n t1w = wait_on(t1)\n assert t1w.compute(scheduler=\"sync\") == (1, 2, 3)\n assert cnt.n == 3\n\n\n@pytest.mark.parametrize(\"layers\", [False, True])\ndef test_wait_on_many(layers):\n t1, t2, cnt = demo_tuples(layers)\n out = wait_on(t1, {\"x\": [t2]})\n assert dask.compute(*out, scheduler=\"sync\") == ((1, 2, 3), {\"x\": [(4, 5)]})\n assert cnt.n == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_wait_on_collections_test_wait_on_collections.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_wait_on_collections_test_wait_on_collections.None_9", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 153, "end_line": 179, "span_ids": ["test_wait_on_collections"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da or not dd\")\ndef test_wait_on_collections():\n colls, cnt = collections_with_node_counters()\n\n # Create a delayed that depends on a single one among all collections\n @delayed\n def f(x):\n pass\n\n colls2 = wait_on(*colls)\n f(colls2[0]).compute()\n assert cnt.n == 16\n\n # dask.delayed\n assert colls2[0].compute() == colls[0].compute()\n # dask.array\n da.utils.assert_eq(colls2[1], colls[1])\n da.utils.assert_eq(colls2[2], colls[2])\n # dask.bag\n db.utils.assert_eq(colls2[3], colls[3])\n db.utils.assert_eq(colls2[4], colls[4])\n db.utils.assert_eq(colls2[5], colls[5])\n # dask.dataframe\n dd.utils.assert_eq(colls2[6], colls[6])\n dd.utils.assert_eq(colls2[7], colls[7])\n dd.utils.assert_eq(colls2[8], colls[8])\n dd.utils.assert_eq(colls2[9], colls[9])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_clone_test_clone.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_clone_test_clone.None_7", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 165, "end_line": 205, "span_ids": ["test_clone"], "tokens": 605}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"layers\", [False, True])\ndef test_clone(layers):\n dsk1 = {(\"a\", h1): 1, (\"a\", h2): 2}\n dsk2 = {\"b\": (add, (\"a\", h1), (\"a\", h2))}\n dsk3 = {\"c\": 1, \"d\": 1} # Multiple names\n if layers:\n dsk1 = HighLevelGraph.from_collections(\"a\", dsk1)\n dsk2 = HighLevelGraph(\n {\"a\": dsk1, \"b\": dsk2}, dependencies={\"a\": set(), \"b\": {\"a\"}}\n )\n dsk3 = HighLevelGraph.from_collections(\"c\", dsk3)\n else:\n dsk2.update(dsk1)\n\n t1 = Tuple(dsk1, [(\"a\", h1), (\"a\", h2)])\n t2 = Tuple(dsk2, [\"b\"])\n t3 = Tuple(dsk3, [\"c\"])\n\n c1 = clone(t2, seed=1, assume_layers=layers)\n c2 = clone(t2, seed=1, assume_layers=layers)\n c3 = clone(t2, seed=2, assume_layers=layers)\n c4 = clone(c1, seed=1, assume_layers=layers) # Clone of a clone has different keys\n c5 = clone(t2, assume_layers=layers) # Random seed\n c6 = clone(t2, assume_layers=layers) # Random seed\n c7 = clone(t2, omit=t1, seed=1, assume_layers=layers)\n\n assert c1.__dask_graph__() == c2.__dask_graph__()\n assert_no_common_keys(c1, t2, layers=layers)\n assert_no_common_keys(c1, c3, layers=layers)\n assert_no_common_keys(c1, c4, layers=layers)\n assert_no_common_keys(c1, c5, layers=layers)\n assert_no_common_keys(c5, c6, layers=layers)\n assert_no_common_keys(c7, t2, omit=t1, layers=layers)\n assert dask.compute(t2, c1, c2, c3, c4, c5, c6, c7) == ((3,),) * 8\n\n # Clone nested; some of the collections in omit are unrelated\n out = clone({\"x\": [t2]}, omit={\"y\": [t1, t3]}, assume_layers=layers)\n assert dask.compute(out) == ({\"x\": [(3,)]},)\n c8 = out[\"x\"][0]\n assert_no_common_keys(c8, t2, omit=t1, layers=layers)\n assert_no_common_keys(c8, t3, layers=layers)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_keys_values_items_to_dict_methods_test_keys_values_items_to_dict_methods.assert_hg_to_dict_di": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_keys_values_items_to_dict_methods_test_keys_values_items_to_dict_methods.assert_hg_to_dict_di", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 34, "end_line": 47, "span_ids": ["test_keys_values_items_to_dict_methods"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_keys_values_items_to_dict_methods():\n da = pytest.importorskip(\"dask.array\")\n a = da.ones(10, chunks=(5,))\n b = a + 1\n c = a + 2\n d = b + c\n hg = d.dask\n\n keys, values, items = hg.keys(), hg.values(), hg.items()\n assert isinstance(keys, Set)\n assert list(keys) == list(hg)\n assert list(values) == [hg[i] for i in hg]\n assert list(items) == list(zip(keys, values))\n assert hg.to_dict() == dict(hg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_getitem_test_getitem.for_k_in_Unhashable_.with_pytest_raises_TypeEr.hg_k_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_getitem_test_getitem.for_k_in_Unhashable_.with_pytest_raises_TypeEr.hg_k_", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 50, "end_line": 72, "span_ids": ["test_getitem"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem():\n hg = HighLevelGraph(\n {\"a\": {\"a\": 1, (\"a\", 0): 2, \"b\": 3}, \"b\": {\"c\": 4}}, {\"a\": set(), \"b\": set()}\n )\n # Key is a string and it exists in a layer with the same name\n assert hg[\"a\"] == 1\n # Key is a tuple and the name exists in a layer with the same name\n assert hg[\"a\", 0] == 2\n # Key is in the wrong layer, while the right layer does not contain it\n assert hg[\"b\"] == 3\n # Key is in the wrong layer, while the right layer does not exist\n assert hg[\"c\"] == 4\n\n for k in (\"d\", \"\", 1, ()):\n with pytest.raises(KeyError):\n hg[k]\n\n class Unhashable:\n __hash__ = None\n\n for k in (Unhashable(), (Unhashable(),)):\n with pytest.raises(TypeError):\n hg[k]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_copy_test_cull.assert_dict_culled_by_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_copy_test_cull.assert_dict_culled_by_y_", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 99, "span_ids": ["test_copy", "test_cull"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_copy():\n h1 = HighLevelGraph(\n {\"a\": {\"a\": \"b\"}, \"b\": {\"b\": 1}},\n {\"a\": {\"b\"}, \"b\": set()},\n )\n h1.get_all_dependencies()\n assert h1.key_dependencies\n h2 = h1.copy()\n for k in (\"layers\", \"dependencies\", \"key_dependencies\"):\n v1 = getattr(h1, k)\n v2 = getattr(h2, k)\n assert v1 is not v2\n assert v1 == v2\n\n\ndef test_cull():\n a = {\"x\": 1, \"y\": (inc, \"x\")}\n hg = HighLevelGraph({\"a\": a}, {\"a\": set()})\n\n culled_by_x = hg.cull({\"x\"})\n assert dict(culled_by_x) == {\"x\": 1}\n\n # parameter is the raw output of __dask_keys__()\n culled_by_y = hg.cull([[[\"y\"]]])\n assert dict(culled_by_y) == a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_cull_layers_test_cull_layers.for_k_in_culled_layers_.assert_culled_dependencie": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_cull_layers_test_cull_layers.for_k_in_culled_layers_.assert_culled_dependencie", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 102, "end_line": 126, "span_ids": ["test_cull_layers"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cull_layers():\n hg = HighLevelGraph(\n {\n \"a\": {\"a1\": \"d1\", \"a2\": \"e1\"},\n \"b\": {\"b\": \"d\", \"dontcull_b\": 1},\n \"c\": {\"dontcull_c\": 1},\n \"d\": {\"d\": 1, \"dontcull_d\": 1},\n \"e\": {\"e\": 1, \"dontcull_e\": 1},\n },\n {\"a\": {\"d\", \"e\"}, \"b\": {\"d\"}, \"c\": set(), \"d\": set(), \"e\": set()},\n )\n\n # Deep-copy layers before calling method to test they aren't modified in place\n expect = HighLevelGraph(\n {k: dict(v) for k, v in hg.layers.items() if k != \"c\"},\n {k: set(v) for k, v in hg.dependencies.items() if k != \"c\"},\n )\n\n culled = hg.cull_layers([\"a\", \"b\"])\n\n assert culled.layers == expect.layers\n assert culled.dependencies == expect.dependencies\n for k in culled.layers:\n assert culled.layers[k] is hg.layers[k]\n assert culled.dependencies[k] is hg.dependencies[k]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_annot_map_fn_test_single_annotation.assert_dask_config_get_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_annot_map_fn_test_single_annotation.assert_dask_config_get_a", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 129, "end_line": 147, "span_ids": ["annot_map_fn", "test_single_annotation"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def annot_map_fn(key):\n return key[1:]\n\n\n@pytest.mark.parametrize(\n \"annotation\",\n [\n {\"worker\": \"alice\"},\n {\"block_id\": annot_map_fn},\n ],\n)\ndef test_single_annotation(annotation):\n da = pytest.importorskip(\"dask.array\")\n with dask.annotate(**annotation):\n A = da.ones((10, 10), chunks=(5, 5))\n\n alayer = A.__dask_graph__().layers[A.name]\n assert alayer.annotations == annotation\n assert dask.config.get(\"annotations\", None) is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_works_with_highlevel_graph_test_works_with_highlevel_graph.assert_res_x_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_test_works_with_highlevel_graph_test_works_with_highlevel_graph.assert_res_x_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 161, "end_line": 180, "span_ids": ["test_works_with_highlevel_graph"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_works_with_highlevel_graph():\n \"\"\"Previously `dask.multiprocessing.get` would accidentally forward\n `HighLevelGraph` graphs through the dask optimization/scheduling routines,\n resulting in odd errors. One way to trigger this was to have a\n non-indexable object in a task. This is just a smoketest to ensure that\n things work properly even if `HighLevelGraph` objects get passed to\n `dask.multiprocessing.get`. See https://github.com/dask/dask/issues/7190.\n \"\"\"\n\n class NoIndex:\n def __init__(self, x):\n self.x = x\n\n def __getitem__(self, key):\n raise Exception(\"Oh no!\")\n\n x = delayed(lambda x: x)(NoIndex(1))\n (res,) = get(x.dask, x.__dask_keys__())\n assert isinstance(res, NoIndex)\n assert res.x == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_scheduler_lock_ensure_dict.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_get_scheduler_lock_ensure_dict.return.result", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1134, "end_line": 1169, "span_ids": ["get_scheduler_lock", "ensure_dict"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_scheduler_lock(collection=None, scheduler=None):\n \"\"\"Get an instance of the appropriate lock for a certain situation based on\n scheduler used.\"\"\"\n from . import multiprocessing\n from .base import get_scheduler\n\n actual_get = get_scheduler(collections=[collection], scheduler=scheduler)\n\n if actual_get == multiprocessing.get:\n return multiprocessing.get_context().Manager().Lock()\n\n return SerializableLock()\n\n\ndef ensure_dict(d: Mapping[K, V], *, copy: bool = False) -> dict[K, V]:\n \"\"\"Convert a generic Mapping into a dict.\n Optimize use case of :class:`~dask.highlevelgraph.HighLevelGraph`.\n\n Parameters\n ----------\n d : Mapping\n copy : bool\n If True, guarantee that the return value is always a shallow copy of d;\n otherwise it may be the input itself.\n \"\"\"\n if type(d) is dict:\n return d.copy() if copy else d # type: ignore\n try:\n layers = d.layers # type: ignore\n except AttributeError:\n return dict(d)\n\n result = {}\n for layer in toolz.unique(layers.values(), key=id):\n result.update(layer)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_fuse_slice_implements.return.decorator": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_fuse_slice_implements.return.decorator", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 157, "end_line": 183, "span_ids": ["implements", "impl:5"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .optimization import fuse_slice, optimize\n\n# __array_function__ dict for mapping aliases and mismatching names\n_HANDLED_FUNCTIONS = {}\n\n\ndef implements(*numpy_functions):\n \"\"\"Register an __array_function__ implementation for dask.array.Array\n\n Register that a function implements the API of a NumPy function (or several\n NumPy functions in case of aliases) which is handled with\n ``__array_function__``.\n\n Parameters\n ----------\n \\\\*numpy_functions : callables\n One or more NumPy functions that are handled by ``__array_function__``\n and will be mapped by `implements` to a `dask.array` function.\n \"\"\"\n\n def decorator(dask_func):\n for numpy_function in numpy_functions:\n _HANDLED_FUNCTIONS[numpy_function] = dask_func\n\n return dask_func\n\n return decorator", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array_Array.__slots__._dask___name__cache": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array_Array.__slots__._dask___name__cache", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1090, "end_line": 1119, "span_ids": ["Array"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n \"\"\"Parallel Dask Array\n\n A parallel nd-array comprised of many numpy arrays arranged in a grid.\n\n This constructor is for advanced uses only. For normal use see the\n :func:`dask.array.from_array` function.\n\n Parameters\n ----------\n dask : dict\n Task dependency graph\n name : string\n Name of array in dask\n shape : tuple of ints\n Shape of the entire array\n chunks: iterable of tuples\n block sizes along each dimension\n dtype : str or dtype\n Typecode or data-type for the new Dask Array\n meta : empty ndarray\n empty ndarray created with same NumPy backend, ndim and dtype as the\n Dask Array being created (overrides dtype)\n\n See Also\n --------\n dask.array.from_array\n \"\"\"\n\n __slots__ = \"dask\", \"__name\", \"_cached_keys\", \"__chunks\", \"_meta\", \"__dict__\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__bool___Array.__index__.return.self__scalarfunc_operator": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__bool___Array.__index__.return.self__scalarfunc_operator", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1719, "end_line": 1748, "span_ids": ["Array.__index__", "Array.__int__", "Array._scalarfunc", "Array.__float__", "Array:13", "Array.__bool__", "Array.__complex__", "Array:11"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __bool__(self):\n if self.size > 1:\n raise ValueError(\n f\"The truth value of a {self.__class__.__name__} is ambiguous. \"\n \"Use a.any() or a.all().\"\n )\n else:\n return bool(self.compute())\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n if self.size > 1:\n raise TypeError(\"Only length-1 arrays can be converted to Python scalars\")\n else:\n return cast_type(self.compute())\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __complex__(self):\n return self._scalarfunc(complex)\n\n def __index__(self):\n return self._scalarfunc(operator.index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__setitem___Array.__setitem__.self._chunks.y_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__setitem___Array.__setitem__.self._chunks.y_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1750, "end_line": 1795, "span_ids": ["Array.__setitem__"], "tokens": 407}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __setitem__(self, key, value):\n if value is np.ma.masked:\n value = np.ma.masked_all(())\n\n ## Use the \"where\" method for cases when key is an Array\n if isinstance(key, Array):\n from .routines import where\n\n if isinstance(value, Array) and value.ndim > 1:\n raise ValueError(\"boolean index array should have 1 dimension\")\n try:\n y = where(key, value, self)\n except ValueError as e:\n raise ValueError(\n \"Boolean index assignment in Dask \"\n \"expects equally shaped arrays.\\nExample: da1[da2] = da3 \"\n \"where da1.shape == (4,), da2.shape == (4,) \"\n \"and da3.shape == (4,).\"\n ) from e\n self._meta = y._meta\n self.dask = y.dask\n self._name = y.name\n self._chunks = y.chunks\n return\n\n if np.isnan(self.shape).any():\n raise ValueError(f\"Arrays chunk sizes are unknown. {unknown_chunk_message}\")\n\n # Still here? Then apply the assignment to other type of\n # indices via the `setitem_array` function.\n value = asanyarray(value)\n\n out = \"setitem-\" + tokenize(self, key, value)\n dsk = setitem_array(out, self, key, value)\n\n meta = meta_from_array(self._meta)\n if np.isscalar(meta):\n meta = np.array(meta)\n\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])\n y = Array(graph, out, chunks=self.chunks, dtype=self.dtype, meta=meta)\n\n self._meta = y._meta\n self.dask = y.dask\n self._name = y.name\n self._chunks = y.chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/image.py_os_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/image.py_os_", "embedding": null, "metadata": {"file_path": "dask/array/image.py", "file_name": "image.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 70, "span_ids": ["add_leading_dimension", "imports", "imread"], "tokens": 468}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nfrom glob import glob\n\ntry:\n from skimage.io import imread as sk_imread\nexcept (AttributeError, ImportError):\n pass\n\nfrom ..base import tokenize\nfrom .core import Array\n\n\ndef add_leading_dimension(x):\n return x[None, ...]\n\n\ndef imread(filename, imread=None, preprocess=None):\n \"\"\"Read a stack of images into a dask array\n\n Parameters\n ----------\n\n filename: string\n A globstring like 'myfile.*.png'\n imread: function (optional)\n Optionally provide custom imread function.\n Function should expect a filename and produce a numpy array.\n Defaults to ``skimage.io.imread``.\n preprocess: function (optional)\n Optionally provide custom function to preprocess the image.\n Function should expect a numpy array for a single image.\n\n Examples\n --------\n\n >>> from dask.array.image import imread\n >>> im = imread('2015-*-*.png') # doctest: +SKIP\n >>> im.shape # doctest: +SKIP\n (365, 1000, 1000, 3)\n\n Returns\n -------\n\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n imread = imread or sk_imread\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n\n name = \"imread-%s\" % tokenize(filenames, map(os.path.getmtime, filenames))\n\n sample = imread(filenames[0])\n if preprocess:\n sample = preprocess(sample)\n\n keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]\n if preprocess:\n values = [\n (add_leading_dimension, (preprocess, (imread, fn))) for fn in filenames\n ]\n else:\n values = [(add_leading_dimension, (imread, fn)) for fn in filenames]\n dsk = dict(zip(keys, values))\n\n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n\n return Array(dsk, name, chunks, sample.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/lib/__init__.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/lib/__init__.py__", "embedding": null, "metadata": {"file_path": "dask/array/lib/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["imports"], "tokens": 6}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from . import stride_tricks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/lib/stride_tricks.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/lib/stride_tricks.py__", "embedding": null, "metadata": {"file_path": "dask/array/lib/stride_tricks.py", "file_name": "stride_tricks.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["imports"], "tokens": 13}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from ..overlap import sliding_window_view # noqa: F401", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd.if_nb_0_nb_1_1___solve_triangular_lower.return.solve_triangular_safe_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_svd.if_nb_0_nb_1_1___solve_triangular_lower.return.solve_triangular_safe_a_", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 929, "end_line": 962, "span_ids": ["svd", "_solve_triangular_lower"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svd(a, coerce_signs=True):\n # ... other code\n if nb[0] == nb[1] == 1:\n m, n = a.shape\n k = min(a.shape)\n mu, ms, mv = np.linalg.svd(\n np.ones_like(a._meta, shape=(1, 1), dtype=a._meta.dtype)\n )\n u, s, v = delayed(np.linalg.svd, nout=3)(a, full_matrices=False)\n u = from_delayed(u, shape=(m, k), meta=mu)\n s = from_delayed(s, shape=(k,), meta=ms)\n v = from_delayed(v, shape=(k, n), meta=mv)\n # Multi-chunk cases\n else:\n # Tall-and-skinny case\n if nb[0] > nb[1]:\n u, s, v = tsqr(a, compute_svd=True)\n truncate = a.shape[0] < a.shape[1]\n # Short-and-fat case\n else:\n vt, s, ut = tsqr(a.T, compute_svd=True)\n u, s, v = ut.T, s, vt.T\n truncate = a.shape[0] > a.shape[1]\n # Only when necessary, remove extra singular vectors if array\n # has shape that contradicts chunking, e.g. the array is a\n # column of chunks but still has more columns than rows overall\n if truncate:\n k = min(a.shape)\n u, v = u[:, :k], v[:k, :]\n if coerce_signs:\n u, v = svd_flip(u, v)\n return u, s, v\n\n\ndef _solve_triangular_lower(a, b):\n return solve_triangular_safe(a, b, lower=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_inv_cholesky.if_lower_.else_.return.u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/linalg.py_inv_cholesky.if_lower_.else_.return.u", "embedding": null, "metadata": {"file_path": "dask/array/linalg.py", "file_name": "linalg.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1241, "end_line": 1286, "span_ids": ["_cholesky_lower", "cholesky", "inv"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def inv(a):\n \"\"\"\n Compute the inverse of a matrix with LU decomposition and\n forward / backward substitutions.\n\n Parameters\n ----------\n a : array_like\n Square matrix to be inverted.\n\n Returns\n -------\n ainv : Array\n Inverse of the matrix `a`.\n \"\"\"\n return solve(a, eye(a.shape[0], chunks=a.chunks[0][0]))\n\n\ndef _cholesky_lower(a):\n return np.linalg.cholesky(a)\n\n\ndef cholesky(a, lower=False):\n \"\"\"\n Returns the Cholesky decomposition, :math:`A = L L^*` or\n :math:`A = U^* U` of a Hermitian positive-definite matrix A.\n\n Parameters\n ----------\n a : (M, M) array_like\n Matrix to be decomposed\n lower : bool, optional\n Whether to compute the upper or lower triangular Cholesky\n factorization. Default is upper-triangular.\n\n Returns\n -------\n c : (M, M) Array\n Upper- or lower-triangular Cholesky factor of `a`.\n \"\"\"\n\n l, u = _cholesky(a)\n if lower:\n return l\n else:\n return u", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_warnings_try_.except_TypeError_.ma_divide.np_ma_core__DomainedBinar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_warnings_try_.except_TypeError_.ma_divide.np_ma_core__DomainedBinar", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 47, "span_ids": ["imports"], "tokens": 405}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport numpy as np\nfrom packaging.version import parse as parse_version\n\nfrom ..utils import derived_from\n\n_np_version = parse_version(np.__version__)\n_numpy_120 = _np_version >= parse_version(\"1.20.0\")\n_numpy_121 = _np_version >= parse_version(\"1.21.0\")\n_numpy_122 = _np_version >= parse_version(\"1.22.0\")\n\n\n# Taken from scikit-learn:\n# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py#L84\ntry:\n with warnings.catch_warnings():\n if (\n not np.allclose(\n np.divide(0.4, 1, casting=\"unsafe\"),\n np.divide(0.4, 1, casting=\"unsafe\", dtype=float),\n )\n or not np.allclose(np.divide(1, 0.5, dtype=\"i8\"), 2)\n or not np.allclose(np.divide(0.4, 1), 0.4)\n ):\n raise TypeError(\n \"Divide not working with dtype: \"\n \"https://github.com/numpy/numpy/issues/3484\"\n )\n divide = np.divide\n ma_divide = np.ma.divide\n\nexcept TypeError:\n # Divide with dtype doesn't work on Python 3\n def divide(x1, x2, out=None, dtype=None):\n \"\"\"Implementation of numpy.divide that works with dtype kwarg.\n\n Temporary compatibility fix for a bug in numpy's version. See\n https://github.com/numpy/numpy/issues/3484 for the relevant issue.\"\"\"\n x = np.divide(x1, x2, out)\n if dtype is not None:\n x = x.astype(dtype)\n return x\n\n ma_divide = np.ma.core._DomainedBinaryOperation(\n divide, np.ma.core._DomainSafeDivide(), 0, 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_adapted__rollaxis.return.a_transpose_axes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Implementation_adapted__rollaxis.return.a_transpose_axes_", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 154, "end_line": 172, "span_ids": ["rollaxis", "moveaxis"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Implementation adapted directly from numpy:\n# https://github.com/numpy/numpy/blob/v1.17.0/numpy/core/numeric.py#L1107-L1204\ndef rollaxis(a, axis, start=0):\n n = a.ndim\n axis = np.core.numeric.normalize_axis_index(axis, n)\n if start < 0:\n start += n\n msg = \"'%s' arg requires %d <= %s < %d, but %d was passed in\"\n if not (0 <= start < n + 1):\n raise ValueError(msg % (\"start\", -n, \"start\", n + 1, start))\n if axis < start:\n # it's been removed\n start -= 1\n if axis == start:\n return a[...]\n axes = list(range(0, n))\n axes.remove(axis)\n axes.insert(start, axis)\n return a.transpose(axes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_if__numpy_120__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py_if__numpy_120__", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 175, "end_line": 274, "span_ids": ["impl:20", "percentile"], "tokens": 1011}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "if _numpy_120:\n sliding_window_view = np.lib.stride_tricks.sliding_window_view\nelse:\n # copied from numpy.lib.stride_tricks\n # https://github.com/numpy/numpy/blob/0721406ede8b983b8689d8b70556499fc2aea28a/numpy/lib/stride_tricks.py#L122-L336\n def sliding_window_view(\n x, window_shape, axis=None, *, subok=False, writeable=False\n ):\n \"\"\"\n Create a sliding window view into the array with the given window shape.\n Also known as rolling or moving window, the window slides across all\n dimensions of the array and extracts subsets of the array at all window\n positions.\n\n .. versionadded:: 1.20.0\n Parameters\n ----------\n x : array_like\n Array to create the sliding window view from.\n window_shape : int or tuple of int\n Size of window over each axis that takes part in the sliding window.\n If `axis` is not present, must have same length as the number of input\n array dimensions. Single integers `i` are treated as if they were the\n tuple `(i,)`.\n axis : int or tuple of int, optional\n Axis or axes along which the sliding window is applied.\n By default, the sliding window is applied to all axes and\n `window_shape[i]` will refer to axis `i` of `x`.\n If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to\n the axis `axis[i]` of `x`.\n Single integers `i` are treated as if they were the tuple `(i,)`.\n subok : bool, optional\n If True, sub-classes will be passed-through, otherwise the returned\n array will be forced to be a base-class array (default).\n writeable : bool, optional\n When true, allow writing to the returned view. The default is false,\n as this should be used with caution: the returned view contains the\n same memory location multiple times, so writing to one location will\n cause others to change.\n Returns\n -------\n view : ndarray\n Sliding window view of the array. The sliding window dimensions are\n inserted at the end, and the original dimensions are trimmed as\n required by the size of the sliding window.\n That is, ``view.shape = x_shape_trimmed + window_shape``, where\n ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less\n than the corresponding window size.\n \"\"\"\n from numpy.core.numeric import normalize_axis_tuple\n\n window_shape = (\n tuple(window_shape) if np.iterable(window_shape) else (window_shape,)\n )\n # first convert input to array, possibly keeping subclass\n x = np.array(x, copy=False, subok=subok)\n\n window_shape_array = np.array(window_shape)\n if np.any(window_shape_array < 0):\n raise ValueError(\"`window_shape` cannot contain negative values\")\n\n if axis is None:\n axis = tuple(range(x.ndim))\n if len(window_shape) != len(axis):\n raise ValueError(\n f\"Since axis is `None`, must provide \"\n f\"window_shape for all dimensions of `x`; \"\n f\"got {len(window_shape)} window_shape elements \"\n f\"and `x.ndim` is {x.ndim}.\"\n )\n else:\n axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)\n if len(window_shape) != len(axis):\n raise ValueError(\n f\"Must provide matching length window_shape and \"\n f\"axis; got {len(window_shape)} window_shape \"\n f\"elements and {len(axis)} axes elements.\"\n )\n\n out_strides = x.strides + tuple(x.strides[ax] for ax in axis)\n\n # note: same axis can be windowed repeatedly\n x_shape_trimmed = list(x.shape)\n for ax, dim in zip(axis, window_shape):\n if x_shape_trimmed[ax] < dim:\n raise ValueError(\"window shape cannot be larger than input array shape\")\n x_shape_trimmed[ax] -= dim - 1\n out_shape = tuple(x_shape_trimmed) + window_shape\n return np.lib.stride_tricks.as_strided(\n x, strides=out_strides, shape=out_shape, subok=subok, writeable=writeable\n )\n\n\n# kwarg is renamed in numpy 1.22.0\ndef percentile(a, q, method=\"linear\"):\n if _numpy_122:\n return np.percentile(a, q, method=method)\n else:\n return np.percentile(a, q, interpolation=method)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_coerce_depth_coerce_boundary.return.boundary": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_coerce_depth_coerce_boundary.return.boundary", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 718, "end_line": 750, "span_ids": ["coerce_depth_type", "coerce_boundary", "coerce_depth"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def coerce_depth(ndim, depth):\n default = 0\n if depth is None:\n depth = default\n if isinstance(depth, Integral):\n depth = (depth,) * ndim\n if isinstance(depth, tuple):\n depth = dict(zip(range(ndim), depth))\n if isinstance(depth, dict):\n depth = {ax: depth.get(ax, default) for ax in range(ndim)}\n return coerce_depth_type(ndim, depth)\n\n\ndef coerce_depth_type(ndim, depth):\n for i in range(ndim):\n if isinstance(depth[i], tuple):\n depth[i] = tuple(int(d) for d in depth[i])\n else:\n depth[i] = int(depth[i])\n return depth\n\n\ndef coerce_boundary(ndim, boundary):\n default = \"none\"\n if boundary is None:\n boundary = default\n if not isinstance(boundary, (tuple, dict)):\n boundary = (boundary,) * ndim\n if isinstance(boundary, tuple):\n boundary = dict(zip(range(ndim), boundary))\n if isinstance(boundary, dict):\n boundary = {ax: boundary.get(ax, default) for ax in range(ndim)}\n return boundary", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_sliding_window_view_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_sliding_window_view_", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 852, "end_line": 912, "span_ids": ["sliding_window_view"], "tokens": 538}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(numpy_compat)\ndef sliding_window_view(x, window_shape, axis=None):\n from numpy.core.numeric import normalize_axis_tuple\n\n window_shape = tuple(window_shape) if np.iterable(window_shape) else (window_shape,)\n\n window_shape_array = np.array(window_shape)\n if np.any(window_shape_array <= 0):\n raise ValueError(\"`window_shape` must contain values > 0\")\n\n if axis is None:\n axis = tuple(range(x.ndim))\n if len(window_shape) != len(axis):\n raise ValueError(\n f\"Since axis is `None`, must provide \"\n f\"window_shape for all dimensions of `x`; \"\n f\"got {len(window_shape)} window_shape elements \"\n f\"and `x.ndim` is {x.ndim}.\"\n )\n else:\n axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)\n if len(window_shape) != len(axis):\n raise ValueError(\n f\"Must provide matching length window_shape and \"\n f\"axis; got {len(window_shape)} window_shape \"\n f\"elements and {len(axis)} axes elements.\"\n )\n\n depths = [0] * x.ndim\n for ax, window in zip(axis, window_shape):\n depths[ax] += window - 1\n\n # Ensure that each chunk is big enough to leave at least a size-1 chunk\n # after windowing (this is only really necessary for the last chunk).\n safe_chunks = tuple(\n ensure_minimum_chunksize(d + 1, c) for d, c in zip(depths, x.chunks)\n )\n x = x.rechunk(safe_chunks)\n\n # result.shape = x_shape_trimmed + window_shape,\n # where x_shape_trimmed is x.shape with every entry\n # reduced by one less than the corresponding window size.\n # trim chunks to match x_shape_trimmed\n newchunks = tuple(c[:-1] + (c[-1] - d,) for d, c in zip(depths, x.chunks)) + tuple(\n (window,) for window in window_shape\n )\n\n return map_overlap(\n numpy_compat.sliding_window_view,\n x,\n depth=tuple((0, d) for d in depths), # Overlap on +ve side only\n boundary=\"none\",\n meta=x._meta,\n new_axis=range(x.ndim, x.ndim + len(axis)),\n chunks=newchunks,\n trim=False,\n align_arrays=False,\n window_shape=window_shape,\n axis=axis,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._wrap_RandomState._wrap.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState._wrap_RandomState._wrap.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 75, "end_line": 190, "span_ids": ["RandomState._wrap"], "tokens": 892}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState:\n\n def _wrap(\n self, funcname, *args, size=None, chunks=\"auto\", extra_chunks=(), **kwargs\n ):\n \"\"\"Wrap numpy random function to produce dask.array random function\n\n extra_chunks should be a chunks tuple to append to the end of chunks\n \"\"\"\n if size is not None and not isinstance(size, (tuple, list)):\n size = (size,)\n\n shapes = list(\n {\n ar.shape\n for ar in chain(args, kwargs.values())\n if isinstance(ar, (Array, np.ndarray))\n }\n )\n if size is not None:\n shapes.append(size)\n # broadcast to the final size(shape)\n size = broadcast_shapes(*shapes)\n chunks = normalize_chunks(\n chunks,\n size, # ideally would use dtype here\n dtype=kwargs.get(\"dtype\", np.float64),\n )\n slices = slices_from_chunks(chunks)\n\n def _broadcast_any(ar, shape, chunks):\n if isinstance(ar, Array):\n return broadcast_to(ar, shape).rechunk(chunks)\n if isinstance(ar, np.ndarray):\n return np.ascontiguousarray(np.broadcast_to(ar, shape))\n\n # Broadcast all arguments, get tiny versions as well\n # Start adding the relevant bits to the graph\n dsk = {}\n lookup = {}\n small_args = []\n dependencies = []\n for i, ar in enumerate(args):\n if isinstance(ar, (np.ndarray, Array)):\n res = _broadcast_any(ar, size, chunks)\n if isinstance(res, Array):\n dependencies.append(res)\n lookup[i] = res.name\n elif isinstance(res, np.ndarray):\n name = f\"array-{tokenize(res)}\"\n lookup[i] = name\n dsk[name] = res\n small_args.append(ar[tuple(0 for _ in ar.shape)])\n else:\n small_args.append(ar)\n\n small_kwargs = {}\n for key, ar in kwargs.items():\n if isinstance(ar, (np.ndarray, Array)):\n res = _broadcast_any(ar, size, chunks)\n if isinstance(res, Array):\n dependencies.append(res)\n lookup[key] = res.name\n elif isinstance(res, np.ndarray):\n name = f\"array-{tokenize(res)}\"\n lookup[key] = name\n dsk[name] = res\n small_kwargs[key] = ar[tuple(0 for _ in ar.shape)]\n else:\n small_kwargs[key] = ar\n\n sizes = list(product(*chunks))\n seeds = random_state_data(len(sizes), self._numpy_state)\n token = tokenize(seeds, size, chunks, args, kwargs)\n name = f\"{funcname}-{token}\"\n\n keys = product(\n [name], *([range(len(bd)) for bd in chunks] + [[0]] * len(extra_chunks))\n )\n blocks = product(*[range(len(bd)) for bd in chunks])\n\n vals = []\n for seed, size, slc, block in zip(seeds, sizes, slices, blocks):\n arg = []\n for i, ar in enumerate(args):\n if i not in lookup:\n arg.append(ar)\n else:\n if isinstance(ar, Array):\n arg.append((lookup[i],) + block)\n else: # np.ndarray\n arg.append((getitem, lookup[i], slc))\n kwrg = {}\n for k, ar in kwargs.items():\n if k not in lookup:\n kwrg[k] = ar\n else:\n if isinstance(ar, Array):\n kwrg[k] = (lookup[k],) + block\n else: # np.ndarray\n kwrg[k] = (getitem, lookup[k], slc)\n vals.append(\n (_apply_random, self._RandomState, funcname, seed, size, arg, kwrg)\n )\n\n meta = _apply_random(\n self._RandomState,\n funcname,\n seed,\n (0,) * len(size),\n small_args,\n small_kwargs,\n )\n\n dsk.update(dict(zip(keys, vals)))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n return Array(graph, name, chunks + extra_chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py___empty": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py___empty", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 28, "span_ids": ["docstring"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nThe rechunk module defines:\n intersect_chunks: a function for\n converting chunks to new dimensions\n rechunk: a function to convert the blocks\n of an existing dask array to new chunks or blockshape\n\"\"\"\nfrom __future__ import annotations\n\nimport heapq\nimport math\nfrom functools import reduce\nfrom itertools import chain, count, product\nfrom operator import add, itemgetter, mul\nfrom warnings import warn\n\nimport numpy as np\nimport tlz as toolz\nfrom tlz import accumulate\n\nfrom .. import config\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import parse_bytes\nfrom .chunk import getitem\nfrom .core import Array, concatenate3, normalize_chunks\nfrom .utils import validate_axis\nfrom .wrap import empty", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanprod_nanprod.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanprod_nanprod.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 454, "end_line": 469, "span_ids": ["nanprod"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nanprod(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n chunk.nanprod,\n chunk.prod,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nancumsum_nancumsum.return.cumreduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nancumsum_nancumsum.return.cumreduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 472, "end_line": 495, "span_ids": ["nancumsum"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nancumsum(x, axis, dtype=None, out=None, *, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumsum. Default is 'sequential'.\n\n * 'sequential' performs the cumsum of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumsum. It exposes parallelism by\n first taking the sum of each block and combines the sums via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n return cumreduction(\n chunk.nancumsum,\n operator.add,\n 0,\n x,\n axis,\n dtype,\n out=out,\n method=method,\n preop=np.nansum,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nancumprod_nancumprod.return.cumreduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nancumprod_nancumprod.return.cumreduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 498, "end_line": 521, "span_ids": ["nancumprod"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nancumprod(x, axis, dtype=None, out=None, *, method=\"sequential\"):\n \"\"\"Dask added an additional keyword-only argument ``method``.\n\n method : {'sequential', 'blelloch'}, optional\n Choose which method to use to perform the cumprod. Default is 'sequential'.\n\n * 'sequential' performs the cumprod of each prior block before the current block.\n * 'blelloch' is a work-efficient parallel cumprod. It exposes parallelism by first\n taking the product of each block and combines the products via a binary tree.\n This method may be faster or more memory efficient depending on workload,\n scheduler, and hardware. More benchmarking is necessary.\n \"\"\"\n return cumreduction(\n chunk.nancumprod,\n operator.mul,\n 1,\n x,\n axis,\n dtype,\n out=out,\n method=method,\n preop=np.nanprod,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmean_nanmean.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmean_nanmean.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 661, "end_line": 678, "span_ids": ["nanmean"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nanmean(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n if dtype is not None:\n dt = dtype\n else:\n dt = getattr(np.mean(np.empty(shape=(1,), dtype=a.dtype)), \"dtype\", object)\n return reduction(\n a,\n partial(mean_chunk, sum=chunk.nansum, numel=nannumel),\n mean_agg,\n axis=axis,\n keepdims=keepdims,\n dtype=dt,\n split_every=split_every,\n out=out,\n concatenate=False,\n combine=partial(mean_combine, sum=chunk.nansum, numel=nannumel),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__sqrt_safe_sqrt.return._sqrt_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__sqrt_safe_sqrt.return._sqrt_a_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 874, "end_line": 892, "span_ids": ["_sqrt", "safe_sqrt"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sqrt(a):\n o = np.sqrt(a)\n if isinstance(o, np.ma.masked_array) and not o.shape and o.mask.all():\n return np.ma.masked\n return o\n\n\ndef safe_sqrt(a):\n \"\"\"A version of sqrt that properly handles scalar masked arrays.\n\n To mimic ``np.ma`` reductions, we need to convert scalar masked arrays that\n have an active mask to the ``np.ma.masked`` singleton. This is properly\n handled automatically for reduction code, but not for ufuncs. We implement\n a simple version here, since calling `np.ma.sqrt` everywhere is\n significantly more expensive.\n \"\"\"\n if hasattr(a, \"_elemwise\"):\n return a._elemwise(_sqrt, a)\n return _sqrt(a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__arg_combine__arg_combine.return.arg_vals": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__arg_combine__arg_combine.return.arg_vals", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 956, "end_line": 986, "span_ids": ["_arg_combine"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _arg_combine(data, axis, argfunc, keepdims=False):\n \"\"\"Merge intermediate results from ``arg_*`` functions\"\"\"\n if isinstance(data, dict):\n # Array type doesn't support structured arrays (e.g., CuPy),\n # therefore `data` is stored in a `dict`.\n assert data[\"vals\"].ndim == data[\"arg\"].ndim\n axis = (\n None\n if len(axis) == data[\"vals\"].ndim or data[\"vals\"].ndim == 1\n else axis[0]\n )\n else:\n axis = None if len(axis) == data.ndim or data.ndim == 1 else axis[0]\n\n vals = data[\"vals\"]\n arg = data[\"arg\"]\n if axis is None:\n local_args = argfunc(vals, axis=axis, keepdims=keepdims)\n vals = vals.ravel()[local_args]\n arg = arg.ravel()[local_args]\n else:\n local_args = argfunc(vals, axis=axis)\n inds = np.ogrid[tuple(map(slice, local_args.shape))]\n inds.insert(axis, local_args)\n inds = tuple(inds)\n vals = vals[inds]\n arg = arg[inds]\n if keepdims:\n vals = np.expand_dims(vals, axis)\n arg = np.expand_dims(arg, axis)\n return arg, vals", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_flip_fliplr.return.flip_m_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_flip_fliplr.return.flip_m_1_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 184, "end_line": 227, "span_ids": ["flip", "fliplr", "flipud"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def flip(m, axis=None):\n \"\"\"\n Reverse element order along axis.\n\n Parameters\n ----------\n m : array_like\n Input array.\n axis : None or int or tuple of ints, optional\n Axis or axes to reverse element order of. None will reverse all axes.\n\n Returns\n -------\n dask.array.Array\n The flipped array.\n \"\"\"\n\n m = asanyarray(m)\n\n sl = m.ndim * [slice(None)]\n if axis is None:\n axis = range(m.ndim)\n if not isinstance(axis, Iterable):\n axis = (axis,)\n try:\n for ax in axis:\n sl[ax] = slice(None, None, -1)\n except IndexError as e:\n raise ValueError(\n f\"`axis` of {str(axis)} invalid for {str(m.ndim)}-D array\"\n ) from e\n sl = tuple(sl)\n\n return m[sl]\n\n\n@derived_from(np)\ndef flipud(m):\n return flip(m, 0)\n\n\n@derived_from(np)\ndef fliplr(m):\n return flip(m, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_rot90_rot90.if_k_1_.else_.return.flip_transpose_m_axes_li": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_rot90_rot90.if_k_1_.else_.return.flip_transpose_m_axes_li", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 230, "end_line": 258, "span_ids": ["rot90"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef rot90(m, k=1, axes=(0, 1)):\n axes = tuple(axes)\n if len(axes) != 2:\n raise ValueError(\"len(axes) must be 2.\")\n\n m = asanyarray(m)\n\n if axes[0] == axes[1] or np.absolute(axes[0] - axes[1]) == m.ndim:\n raise ValueError(\"Axes must be different.\")\n\n if axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim:\n raise ValueError(f\"Axes={axes} out of range for array of ndim={m.ndim}.\")\n\n k %= 4\n\n if k == 0:\n return m[:]\n if k == 2:\n return flip(flip(m, axes[0]), axes[1])\n\n axes_list = list(range(0, m.ndim))\n (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]])\n\n if k == 1:\n return transpose(flip(m, axes[1]), axes_list)\n else:\n # k == 3\n return flip(transpose(m, axes_list), axes[1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd._Blocked_variant_of_fu_histogramdd._Blocked_variant_of_fu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd._Blocked_variant_of_fu_histogramdd._Blocked_variant_of_fu", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1151, "end_line": 1316, "span_ids": ["histogramdd"], "tokens": 2054}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogramdd(sample, bins, range=None, normed=None, weights=None, density=None):\n \"\"\"Blocked variant of :func:`numpy.histogramdd`.\n\n Chunking of the input data (``sample``) is only allowed along the\n 0th (row) axis (the axis corresponding to the total number of\n samples). Data chunked along the 1st axis (column) axis is not\n compatible with this function. If weights are used, they must be\n chunked along the 0th axis identically to the input sample.\n\n An example setup for a three dimensional histogram, where the\n sample shape is ``(8, 3)`` and weights are shape ``(8,)``, sample\n chunks would be ``((4, 4), (3,))`` and the weights chunks would be\n ``((4, 4),)`` a table of the structure:\n\n +-------+-----------------------+-----------+\n | | sample (8 x 3) | weights |\n +=======+=====+=====+=====+=====+=====+=====+\n | chunk | row | `x` | `y` | `z` | row | `w` |\n +-------+-----+-----+-----+-----+-----+-----+\n | | 0 | 5 | 6 | 6 | 0 | 0.5 |\n | +-----+-----+-----+-----+-----+-----+\n | | 1 | 8 | 9 | 2 | 1 | 0.8 |\n | 0 +-----+-----+-----+-----+-----+-----+\n | | 2 | 3 | 3 | 1 | 2 | 0.3 |\n | +-----+-----+-----+-----+-----+-----+\n | | 3 | 2 | 5 | 6 | 3 | 0.7 |\n +-------+-----+-----+-----+-----+-----+-----+\n | | 4 | 3 | 1 | 1 | 4 | 0.3 |\n | +-----+-----+-----+-----+-----+-----+\n | | 5 | 3 | 2 | 9 | 5 | 1.3 |\n | 1 +-----+-----+-----+-----+-----+-----+\n | | 6 | 8 | 1 | 5 | 6 | 0.8 |\n | +-----+-----+-----+-----+-----+-----+\n | | 7 | 3 | 5 | 3 | 7 | 0.7 |\n +-------+-----+-----+-----+-----+-----+-----+\n\n If the sample 0th dimension and weight 0th (row) dimension are\n chunked differently, a ``ValueError`` will be raised. If\n coordinate groupings ((x, y, z) trios) are separated by a chunk\n boundry, then a ``ValueError`` will be raised. We suggest that you\n rechunk your data if it is of that form.\n\n The chunks property of the data (and optional weights) are used to\n check for compatibility with the blocked algorithm (as described\n above); therefore, you must call `to_dask_array` on a collection\n from ``dask.dataframe``, i.e. :class:`dask.dataframe.Series` or\n :class:`dask.dataframe.DataFrame`.\n\n The function is also compatible with `x`, `y`, and `z` being\n individual 1D arrays with equal chunking. In that case, the data\n should be passed as a tuple: ``histogramdd((x, y, z), ...)``\n\n Parameters\n ----------\n sample : dask.array.Array (N, D) or sequence of dask.array.Array\n Multidimensional data to be histogrammed.\n\n Note the unusual interpretation of a sample when it is a\n sequence of dask Arrays:\n\n * When a (N, D) dask Array, each row is an entry in the sample\n (coordinate in D dimensional space).\n * When a sequence of dask Arrays, each element in the sequence\n is the array of values for a single coordinate.\n bins : sequence of arrays describing bin edges, int, or sequence of ints\n The bin specification.\n\n The possible binning configurations are:\n\n * A sequence of arrays describing the monotonically increasing\n bin edges along each dimension.\n * A single int describing the total number of bins that will\n be used in each dimension (this requires the ``range``\n argument to be defined).\n * A sequence of ints describing the total number of bins to be\n used in each dimension (this requires the ``range`` argument\n to be defined).\n\n When bins are described by arrays, the rightmost edge is\n included. Bins described by arrays also allows for non-uniform\n bin widths.\n range : sequence of pairs, optional\n A sequence of length D, each a (min, max) tuple giving the\n outer bin edges to be used if the edges are not given\n explicitly in `bins`. If defined, this argument is required to\n have an entry for each dimension. Unlike\n :func:`numpy.histogramdd`, if `bins` does not define bin\n edges, this argument is required (this function will not\n automatically use the min and max of of the value in a given\n dimension because the input data may be lazy in dask).\n normed : bool, optional\n An alias for the density argument that behaves identically. To\n avoid confusion with the broken argument to `histogram`,\n `density` should be preferred.\n weights : dask.array.Array, optional\n An array of values weighing each sample in the input data. The\n chunks of the weights must be identical to the chunking along\n the 0th (row) axis of the data sample.\n density : bool, optional\n If ``False`` (default), the returned array represents the\n number of samples in each bin. If ``True``, the returned array\n represents the probability density function at each bin.\n\n See Also\n --------\n histogram\n\n Returns\n -------\n dask.array.Array\n The values of the histogram.\n list(dask.array.Array)\n Sequence of arrays representing the bin edges along each\n dimension.\n\n Examples\n --------\n Computing the histogram in 5 blocks using different bin edges\n along each dimension:\n\n >>> import dask.array as da\n >>> x = da.random.uniform(0, 1, size=(1000, 3), chunks=(200, 3))\n >>> edges = [\n ... np.linspace(0, 1, 5), # 4 bins in 1st dim\n ... np.linspace(0, 1, 6), # 5 in the 2nd\n ... np.linspace(0, 1, 4), # 3 in the 3rd\n ... ]\n >>> h, edges = da.histogramdd(x, bins=edges)\n >>> result = h.compute()\n >>> result.shape\n (4, 5, 3)\n\n Defining the bins by total number and their ranges, along with\n using weights:\n\n >>> bins = (4, 5, 3)\n >>> ranges = ((0, 1),) * 3 # expands to ((0, 1), (0, 1), (0, 1))\n >>> w = da.random.uniform(0, 1, size=(1000,), chunks=x.chunksize[0])\n >>> h, edges = da.histogramdd(x, bins=bins, range=ranges, weights=w)\n >>> np.isclose(h.sum().compute(), w.sum().compute())\n True\n\n Using a sequence of 1D arrays as the input:\n\n >>> x = da.array([2, 4, 2, 4, 2, 4])\n >>> y = da.array([2, 2, 4, 4, 2, 4])\n >>> z = da.array([4, 2, 4, 2, 4, 2])\n >>> bins = ([0, 3, 6],) * 3\n >>> h, edges = da.histogramdd((x, y, z), bins)\n >>> h\n dask.array\n >>> edges[0]\n dask.array\n >>> h.compute()\n array([[[0., 2.],\n [0., 1.]],\n \n [[1., 0.],\n [2., 0.]]])\n >>> edges[0].compute()\n array([0, 3, 6])\n >>> edges[1].compute()\n array([0, 3, 6])\n >>> edges[2].compute()\n array([0, 3, 6])\n\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_bisect__sanitize_index_element.if_isinstance_ind_Number.else_.raise_TypeError_Invalid_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_bisect__sanitize_index_element.if_isinstance_ind_Number.else_.raise_TypeError_Invalid_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 36, "span_ids": ["imports", "_sanitize_index_element"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import bisect\nimport functools\nimport math\nimport warnings\nfrom itertools import product\nfrom numbers import Integral, Number\nfrom operator import itemgetter\n\nimport numpy as np\nfrom tlz import concat, memoize, merge, pluck\n\nfrom .. import config, core, utils\nfrom ..base import is_dask_collection, tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import cached_cumsum, is_arraylike\nfrom .chunk import getitem\n\ncolon = slice(None, None, None)\n\n\ndef _sanitize_index_element(ind):\n \"\"\"Sanitize a one-element index.\"\"\"\n if isinstance(ind, Number):\n ind2 = int(ind)\n if ind2 != ind:\n raise IndexError(\"Bad index. Must be integer-like: %s\" % ind)\n else:\n return ind2\n elif ind is None:\n return None\n elif is_dask_collection(ind):\n if ind.dtype.kind != \"i\" or ind.size != 1:\n raise IndexError(f\"Bad index. Must be integer-like: {ind}\")\n return ind\n else:\n raise TypeError(\"Invalid index type\", type(ind), ind)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_concatenate_array_chunks_concatenate_array_chunks.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_concatenate_array_chunks_concatenate_array_chunks.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1515, "end_line": 1542, "span_ids": ["concatenate_array_chunks"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concatenate_array_chunks(x):\n \"\"\"Concatenate the multidimensional chunks of an array.\n\n Can be used on chunks with unknown sizes.\n\n Parameters\n ----------\n x : dask array\n\n Returns\n -------\n dask array\n The concatenated dask array with one chunk.\n\n \"\"\"\n from .core import Array, concatenate3\n\n if x.npartitions == 1:\n return x\n\n name = \"concatenate3-\" + tokenize(x)\n d = {(name, 0): (concatenate3, x.__dask_keys__())}\n graph = HighLevelGraph.from_collections(name, d, dependencies=[x])\n chunks = x.shape\n if not chunks:\n chunks = (1,)\n\n return Array(graph, name, chunks=(chunks,), dtype=x.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array_setitem_array._Master_function_for_ar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array_setitem_array._Master_function_for_ar", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1545, "end_line": 1607, "span_ids": ["setitem_array"], "tokens": 501}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setitem_array(out_name, array, indices, value):\n \"\"\"Master function for array assignment.\n\n This function, that is intended to be called by\n `Array.__setitem__`, creates a new dask that assigns values to\n each block that is touched by the indices, leaving other blocks\n unchanged.\n\n Each block that overlaps the indices is assigned from the\n approriate part of the assignment value. The dasks of these value\n parts are included in the output dask dictionary, as are the dasks\n of any 1-d dask array indices. This ensures that the dask array\n assignment value and any dask array indices are not computed until\n the `Array.__setitem__` operation is computed.\n\n The part of the assignment value applies to block is created as a\n \"getitem\" slice of the full asignment value.\n\n Parameters\n ----------\n out_name : `str`\n The dask variable output name.\n array : dask array\n The dask array that is being assigned to.\n indices : numpy-style indices\n Indices to array defining the elements to be assigned.\n value : dask array\n The assignment value, i.e. the values which will be assigned\n to elements of array.\n\n Returns\n -------\n dsk : `dict`\n A dictionary where the keys are new unique tokens for each\n block of the form\n\n (out_name, dim_index[, dim_index[, ...]])\n\n and the values are either\n\n (key,)\n\n or\n\n (setitem, key, v_key, block_indices)\n\n where key is an existing top-level dask key of array.\n\n The first case occurs when the block represented by key does\n not overlap the indices.\n\n The second case occurs when the block represented by key does\n overlap the indices. setitem is the chunk assignment function;\n v_key is the dask key of the the part of the assignment value\n that corresponds to the block; and block_indices are the\n assigment indices that apply to the block.\n\n The dictionary also includes any additional key/value pairs\n needed to define v_key, as well as any any additional\n key/value pairs needed to define dask keys contained in the\n block_indices list as references to dask array indices.\n\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.block_index_from_1d_index_setitem_array.block_index_from_1d_index.return.i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.block_index_from_1d_index_setitem_array.block_index_from_1d_index.return.i", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1577, "end_line": 1640, "span_ids": ["setitem_array"], "tokens": 598}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setitem_array(out_name, array, indices, value):\n\n @functools.lru_cache\n def block_index_from_1d_index(dim, loc0, loc1, is_bool):\n \"\"\"The positions of index elements in the range values loc0 and loc1.\n\n The index is the input assignment index that is defined in the\n namespace of the caller. It is assumed that negative elements\n of an integer array have already been posified.\n\n The non-hashable dsk is the output dask dictionary that is\n defined in the namespace of the caller.\n\n Parameters\n ----------\n dim : `int`\n The dimension position of the index that is used as a proxy\n for the non-hashable index to define the LRU cache key.\n loc0 : `int`\n The start index of the block along the dimension.\n loc1 : `int`\n The stop index of the block along the dimension.\n is_bool : `bool`\n Whether or not the index is of boolean data type.\n\n Returns\n -------\n numpy array or `str`\n If index is a numpy array then a numpy array is\n returned.\n\n If index is a dask array then the dask of the block index\n is inserted into the output dask dictionary, and its\n unique top-layer key is returned.\n\n \"\"\"\n if is_bool:\n # Boolean array (dask or numpy)\n i = index[loc0:loc1]\n elif is_dask_collection(index):\n # Integer dask array\n #\n # Check for values in [loc0,loc1).\n #\n # Use the 3-argument \"where\" to insert place-holder\n # elements that will be searched for and removed in the\n # `setitem` function at compute time. The place-holder\n # value must be the size of the block, i.e. loc1-loc0. We\n # can't use a 1-argument \"where\" here because that won't\n # work if index has unknown chunk sizes.\n i = np.where((loc0 <= index) & (index < loc1), index, loc1)\n i -= loc0\n else:\n # Integer numpy array\n #\n # Check for positive values in [loc0,loc1).\n i = np.where((loc0 <= index) & (index < loc1))[0]\n i = index[i] - loc0\n\n if is_dask_collection(i):\n # Return dask key intead of dask array\n i = concatenate_array_chunks(i)\n dsk.update(dict(i.dask))\n i = next(flatten(i.__dask_keys__()))\n\n return i\n # ... other code\n\n block_index_from_1d_index.cache_clear()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.block_index_shape_from_1d_bool_index_setitem_array.block_index_shape_from_1d_bool_index.return.np_sum_index_loc0_loc1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.block_index_shape_from_1d_bool_index_setitem_array.block_index_shape_from_1d_bool_index.return.np_sum_index_loc0_loc1_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1642, "end_line": 1668, "span_ids": ["setitem_array"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setitem_array(out_name, array, indices, value):\n # ... other code\n\n @functools.lru_cache\n def block_index_shape_from_1d_bool_index(dim, loc0, loc1):\n \"\"\"Number of True index elements between positions loc0 and loc1.\n\n The index is the input assignment index that is defined in the\n namespace of the caller.\n\n Parameters\n ----------\n dim : `int`\n The dimension position of the index that is used as a proxy\n for the non-hashable index to define the LRU cache key.\n loc0 : `int`\n The start index of the block along the dimension.\n loc1 : `int`\n The stop index of the block along the dimension.\n\n Returns\n -------\n numpy array or dask array\n If index is a numpy array then a numpy array is\n returned.\n\n If index is dask array then a dask array is returned.\n\n \"\"\"\n return np.sum(index[loc0:loc1])\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.n_preceeding_from_1d_bool_index_setitem_array.n_preceeding_from_1d_bool_index.return.np_sum_index_loc0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.n_preceeding_from_1d_bool_index_setitem_array.n_preceeding_from_1d_bool_index.return.np_sum_index_loc0_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1670, "end_line": 1694, "span_ids": ["setitem_array"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setitem_array(out_name, array, indices, value):\n # ... other code\n\n @functools.lru_cache\n def n_preceeding_from_1d_bool_index(dim, loc0):\n \"\"\"Number of True index elements preceeding position loc0.\n\n The index is the input assignment index that is defined in the\n namespace of the caller.\n\n Parameters\n ----------\n dim : `int`\n The dimension position of the index that is used as a proxy\n for the non-hashable index to define the LRU cache key.\n loc0 : `int`\n The start index of the block along the dimension.\n\n Returns\n -------\n numpy array or dask array\n If index is a numpy array then a numpy array is\n returned.\n\n If index is dask array then a dask array is returned.\n\n \"\"\"\n return np.sum(index[:loc0])\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.value_indices_from_1d_int_index_setitem_array.value_indices_from_1d_int_index.return.i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.value_indices_from_1d_int_index_setitem_array.value_indices_from_1d_int_index.return.i", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1696, "end_line": 1748, "span_ids": ["setitem_array"], "tokens": 491}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setitem_array(out_name, array, indices, value):\n # ... other code\n\n @functools.lru_cache\n def value_indices_from_1d_int_index(dim, vsize, loc0, loc1):\n \"\"\"Value indices for index elements between loc0 and loc1.\n\n The index is the input assignment index that is defined in the\n namespace of the caller. It is assumed that negative elements\n have already been posified.\n\n Parameters\n ----------\n dim : `int`\n The dimension position of the index that is used as a proxy\n for the non-hashable index to define the LRU cache key.\n vsize : `int`\n The full size of the dimension of the assignment value.\n loc0 : `int`\n The start index of the block along the dimension.\n loc1 : `int`\n The stop index of the block along the dimension.\n\n Returns\n -------\n numpy array or dask array\n If index is a numpy array then a numpy array is\n returned.\n\n If index is dask array then a dask array is returned.\n\n \"\"\"\n # Check for values in [loc0,loc1)\n if is_dask_collection(index):\n if np.isnan(index.size):\n # Integer dask array with unknown size.\n #\n # The 1-argument \"where\" won't work, so use the\n # 3-argument \"where\" and convert to a boolean\n # array. We concatenate the resulting boolean index\n # and set the chunk size (which must be the full size\n # of the dimension of the assignment value) which\n # allows the returned array to be used as a\n # __getitem__ index to the assignment value.\n i = np.where((loc0 <= index) & (index < loc1), True, False)\n i = concatenate_array_chunks(i)\n i._chunks = ((vsize,),)\n else:\n # Integer dask array with known size\n i = np.where((loc0 <= index) & (index < loc1))[0]\n i = concatenate_array_chunks(i)\n else:\n # Integer numpy array.\n i = np.where((loc0 <= index) & (index < loc1))[0]\n\n return i\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.flatten_setitem_array.non_broadcast_dimensions._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.flatten_setitem_array.non_broadcast_dimensions._", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1798, "end_line": 1888, "span_ids": ["setitem_array"], "tokens": 864}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setitem_array(out_name, array, indices, value):\n # ... other code\n\n from ..core import flatten\n\n array_shape = array.shape\n value_shape = value.shape\n value_ndim = len(value_shape)\n\n # Reformat input indices\n indices, implied_shape, reverse, implied_shape_positions = parse_assignment_indices(\n indices, array_shape\n )\n\n # Empty slices can only be assigned size 1 values\n if 0 in implied_shape and value_shape and max(value_shape) > 1:\n raise ValueError(\n f\"shape mismatch: value array of shape {value_shape} \"\n \"could not be broadcast to indexing result \"\n f\"of shape {tuple(implied_shape)}\"\n )\n\n # Set variables needed when creating the part of the assignment\n # value that applies to each block.\n #\n # offset: The additive offset to the assignment value dimension\n # positions that results in the positions of the\n # corresponding dimensions in the array. offset is a\n # non-negative integer, and a positive value means that\n # the array has more dimensions than the assignment\n # value.\n #\n # value_offset: The additive offset to the array dimension\n # positions that results in the positions of the\n # corresponding dimensions in the assignment\n # value. value_offset is a non-negative integer,\n # and a positive value means that the assignment\n # value has more dimensions than the array.\n #\n # For example:\n #\n # array.shape value.shape offset value_offset\n # ------------ ------------ ------ ------------\n # (3, 4) (3, 4) 0 0\n # (1, 1, 3, 4) (3, 4) 2 0\n # (3, 4) (1, 1, 3, 4) 0 2\n # ------------ ------------ ------ ------------\n #\n # array_common_shape: The shape of those dimensions of array\n # which correspond to dimensions of the\n # assignment value.\n #\n # value_common_shape: The shape of those dimensions of the\n # assignment value which correspond to\n # dimensions of the array.\n #\n # base_value_indices: The indices used for initialising the\n # selection of the part of the assignment\n # value that applies to each block of\n # array. An element of `None` will end up\n # being replaced by an appropriate slice on a\n # block-by-block basis.\n #\n # non_broadcast_dimensions: The integer positions of\n # array_common_shape which do not\n # correspond to broadcast dimensions in\n # the assignment value.\n #\n # Note that array_common_shape and value_common_shape may be\n # different if there are any size 1 dimensions being brodacast.\n offset = len(implied_shape) - value_ndim\n if offset >= 0:\n # The array has the same number or more dimensions than the\n # assignment value\n array_common_shape = implied_shape[offset:]\n value_common_shape = value_shape\n value_offset = 0\n reverse = [i - offset for i in reverse if i >= offset]\n else:\n # The assigmment value has more dimensions than the array\n value_offset = -offset\n array_common_shape = implied_shape\n value_common_shape = value_shape[value_offset:]\n offset = 0\n\n # All of the extra leading dimensions must have size 1\n if value_shape[:value_offset] != (1,) * value_offset:\n raise ValueError(\n \"could not broadcast input array from shape\"\n f\"{value_shape} into shape {tuple(implied_shape)}\"\n )\n\n base_value_indices = []\n non_broadcast_dimensions = []\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.for_in_key_locations_in__setitem_array.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.for_in_key_locations_in__setitem_array.return.dsk", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1891, "end_line": 2076, "span_ids": ["setitem_array"], "tokens": 1588}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setitem_array(out_name, array, indices, value):\n\n @functools.lru_cache\n def block_index_from_1d_index(dim, loc0, loc1, is_bool):\n # ... other code\n # ... other code\n for in_key, locations in zip(in_keys, array_locations):\n\n # Now loop round each block dimension.\n #\n # If the block overlaps the indices then set the following\n # (which will be used to define a new dask entry):\n #\n # block_indices: The indices that will be used to assign to\n # this block.\n #\n # block_indices_shape: The shape implied by block_indices.\n #\n # block_preceeding_sizes: How many assigned elements precede\n # this block along each dimension that\n # doesn't have an integer. It is\n # assumed that a slice will have a\n # positive step, as will be the case\n # for reformatted indices. `None` is\n # used for dimensions with 1-d integer\n # arrays.\n block_indices = []\n block_indices_shape = []\n block_preceeding_sizes = []\n\n local_offset = offset\n\n # Assume, until demonstrated otherwise, that this block\n # overlaps the assignment indices.\n overlaps = True\n\n # Note which dimension, if any, has 1-d integer array index\n dim_1d_int_index = None\n\n for dim, (index, full_size, (loc0, loc1)) in enumerate(\n zip(indices, array_shape, locations)\n ):\n\n integer_index = isinstance(index, int)\n if isinstance(index, slice):\n # Index is a slice\n stop = loc1 - loc0\n if index.stop < loc1:\n stop -= loc1 - index.stop\n\n start = index.start - loc0\n if start < 0:\n # Make start positive\n start %= index.step\n\n if start >= stop:\n # This block does not overlap the slice index\n overlaps = False\n break\n\n step = index.step\n block_index = slice(start, stop, step)\n block_index_size, rem = divmod(stop - start, step)\n if rem:\n block_index_size += 1\n\n pre = index.indices(loc0)\n n_preceeding, rem = divmod(pre[1] - pre[0], step)\n if rem:\n n_preceeding += 1\n\n elif integer_index:\n # Index is an integer\n local_offset += 1\n if not loc0 <= index < loc1:\n # This block does not overlap the integer index\n overlaps = False\n break\n\n block_index = index - loc0\n\n else:\n # Index is a 1-d array\n is_bool = index.dtype == bool\n block_index = block_index_from_1d_index(dim, loc0, loc1, is_bool)\n if is_bool:\n block_index_size = block_index_shape_from_1d_bool_index(\n dim, loc0, loc1\n )\n n_preceeding = n_preceeding_from_1d_bool_index(dim, loc0)\n else:\n block_index_size = None\n n_preceeding = None\n dim_1d_int_index = dim\n loc0_loc1 = loc0, loc1\n\n if not is_dask_collection(index) and not block_index.size:\n # This block does not overlap the 1-d numpy array\n # index\n overlaps = False\n break\n\n # Note: When the 1-d array index is a dask array then\n # we can't tell if this block overlaps it, so we\n # assume that it does. If it in fact doesn't\n # overlap then the part of the assignment value\n # that cooresponds to this block will have zero\n # size which, at compute time, will indicate to\n # the `setitem` function to pass the block\n # through unchanged.\n\n # Still here? This block overlaps the index for this\n # dimension.\n block_indices.append(block_index)\n if not integer_index:\n block_indices_shape.append(block_index_size)\n block_preceeding_sizes.append(n_preceeding)\n\n # The new dask key\n out_key = out_name + in_key[1:]\n\n if not overlaps:\n # This block does not overlap the indices for all\n # dimensions, so pass the block through unchanged.\n dsk[out_key] = in_key\n continue\n\n # Still here? Then this block overlaps the indices for all\n # dimensions and so needs to have some of its elements\n # assigned.\n\n # Initialise the indices of the assignment value that define\n # the parts of it which are to be assigned to this block\n value_indices = base_value_indices[:]\n for i in non_broadcast_dimensions:\n j = i + offset\n if j == dim_1d_int_index:\n # Index is a 1-d integer array\n #\n # Define index in the current namespace for use in\n # `value_indices_from_1d_int_index`\n index = indices[j]\n\n value_indices[i] = value_indices_from_1d_int_index(\n dim_1d_int_index, value_shape[i + value_offset], *loc0_loc1\n )\n else:\n # Index is a slice or 1-d boolean array\n start = block_preceeding_sizes[j]\n value_indices[i] = slice(start, start + block_indices_shape[j])\n\n # If required as a consequence of reformatting any slice\n # objects of the original indices to have a positive steps,\n # reverse the indices to assignment value.\n for i in reverse:\n size = value_common_shape[i]\n start, stop, step = value_indices[i].indices(size)\n size -= 1\n start = size - start\n stop = size - stop\n if stop < 0:\n stop = None\n\n value_indices[i] = slice(start, stop, -1)\n\n if value_ndim > len(indices):\n # The assignment value has more dimensions than array, so\n # add a leading Ellipsis to the indices of value.\n value_indices.insert(0, Ellipsis)\n\n # Create the part of the full assignment value that is to be\n # assigned to elements of this block and make sure that it has\n # just one chunk (so we can represent it with a single key in\n # the argument list of setitem).\n v = value[tuple(value_indices)]\n v = concatenate_array_chunks(v)\n v_key = next(flatten(v.__dask_keys__()))\n\n # Insert into the output dask dictionary the dask of the part\n # of assignment value for this block (not minding when we\n # overwrite any existing keys as the values will be the same).\n dsk = merge(dict(v.dask), dsk)\n\n # Define the assignment function for this block.\n dsk[out_key] = (setitem, in_key, v_key, block_indices)\n\n block_index_from_1d_index.cache_clear()\n block_index_shape_from_1d_bool_index.cache_clear()\n n_preceeding_from_1d_bool_index.cache_clear()\n value_indices_from_1d_int_index.cache_clear()\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2098, "end_line": 2183, "span_ids": ["setitem"], "tokens": 780}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setitem(x, v, indices):\n \"\"\"Chunk function of `setitem_array`.\n\n Assign v to indices of x.\n\n Parameters\n ----------\n x : numpy array\n The array to be assigned to.\n v : numpy array\n The values which will be assigned.\n indices : list of `slice`, `int`, or numpy array\n The indices describing the elements of x to be assigned from\n v. One index per axis.\n\n Note that an individual index can not be a `list`, use a 1-d\n numpy array instead.\n\n If a 1-d numpy array index contains the non-valid value of the\n size of the corresponding dimension of x, then those index\n elements will be removed prior to the assignment (see\n `block_index_from_1d_index` function).\n\n Returns\n -------\n numpy array\n A new independent array with assigned elements, unless v is\n empty (i.e. has zero size) in which case then the input array\n is returned and the indices are ignored.\n\n Examples\n --------\n >>> x = np.arange(8).reshape(2, 4)\n >>> setitem(x, np.array(-99), [np.array([False, True])])\n array([[ 0, 1, 2, 3],\n [-99, -99, -99, -99]])\n >>> x\n array([[0, 1, 2, 3],\n [4, 5, 6, 7]])\n >>> setitem(x, np.array([-88, -99]), [slice(None), np.array([1, 3])])\n array([[ 0, -88, 2, -99],\n [ 4, -88, 6, -99]])\n >>> setitem(x, -x, [slice(None)])\n array([[ 0, -1, -2, -3],\n [-4, -5, -6, -7]])\n >>> x\n array([[0, 1, 2, 3],\n [4, 5, 6, 7]])\n >>> setitem(x, np.array([-88, -99]), [slice(None), np.array([4, 4, 3, 4, 1, 4])])\n array([[ 0, -99, 2, -88],\n [ 4, -99, 6, -88]])\n >>> value = np.where(x < 0)[0]\n >>> value.size\n 0\n >>> y = setitem(x, value, [Ellipsis])\n >>> y is x\n True\n \"\"\"\n if not v.size:\n return x\n\n # Normalize integer array indices\n for i, (index, block_size) in enumerate(zip(indices, x.shape)):\n if isinstance(index, np.ndarray) and index.dtype != bool:\n # Strip out any non-valid place-holder values\n index = index[np.where(index < block_size)[0]]\n indices[i] = index\n\n # If x is not masked but v is, then turn the x into a masked\n # array.\n if not np.ma.isMA(x) and np.ma.isMA(v):\n x = x.view(np.ma.MaskedArray)\n\n # Copy the array to guarantee no other objects are corrupted\n x = x.copy()\n\n # Do the assignment\n try:\n x[tuple(indices)] = v\n except ValueError as e:\n raise ValueError(\n \"shape mismatch: value array could \" \"not be broadcast to indexing result\"\n ) from e\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosis_kurtosis.if_fisher_.else_.return.vals": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_kurtosis_kurtosis.if_fisher_.else_.return.vals", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 254, "end_line": 281, "span_ids": ["kurtosis"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(scipy.stats)\ndef kurtosis(a, axis=0, fisher=True, bias=True, nan_policy=\"propagate\"):\n if nan_policy != \"propagate\":\n raise NotImplementedError(\n \"`nan_policy` other than 'propagate' have not been implemented.\"\n )\n n = a.shape[axis] # noqa; for bias\n m2 = moment(a, 2, axis)\n m4 = moment(a, 4, axis)\n zero = m2 == 0\n olderr = np.seterr(all=\"ignore\")\n try:\n vals = da.where(zero, 0, m4 / m2**2.0)\n finally:\n np.seterr(**olderr)\n\n if not bias:\n # need a version of np.place\n raise NotImplementedError(\"bias=False is not implemented.\")\n\n if fisher:\n return vals - 3\n else:\n if vals.ndim == 0:\n # TODO: scalar, min is a workaround\n return vals.min()\n\n return vals", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_point_slicing_with_full_slice_test_h5py_tokenize.with_tmpfile_hdf5_as_f.with_tmpfile_hdf5_as_f.assert_tokenize_x1_to": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_point_slicing_with_full_slice_test_h5py_tokenize.with_tmpfile_hdf5_as_f.with_tmpfile_hdf5_as_f.assert_tokenize_x1_to", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2809, "end_line": 3030, "span_ids": ["test_index_with_integer_types", "test_empty_array", "test_point_slicing_with_full_slice", "test_view_fortran", "test_slice_with_floats", "test_slice_with_integer_types", "test_memmap", "test_to_npy_stack", "test_vindex_negative", "test_vindex_basic", "test_vindex_merge", "test_vindex_identity", "test_vindex_nd", "test_view", "test_h5py_tokenize", "test_vindex_errors"], "tokens": 2129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_point_slicing_with_full_slice():\n from dask.array.core import _get_axis, _vindex_transpose\n\n x = np.arange(4 * 5 * 6 * 7).reshape((4, 5, 6, 7))\n d = da.from_array(x, chunks=(2, 3, 3, 4))\n\n inds = [\n [[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],\n [[1, 2, 3], None, [4, 3, 2], None],\n [[1, 2, 3], [3, 2, 1]],\n [[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],\n [[], [], [], None],\n [np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],\n [None, None, [1, 2, 3], [4, 3, 2]],\n [None, [0, 2, 3], None, [0, 3, 2]],\n ]\n\n for ind in inds:\n slc = [\n i if isinstance(i, (np.ndarray, list)) else slice(None, None) for i in ind\n ]\n result = d.vindex[tuple(slc)]\n\n # Rotate the expected result accordingly\n axis = _get_axis(ind)\n expected = _vindex_transpose(x[tuple(slc)], axis)\n\n assert_eq(result, expected)\n\n # Always have the first axis be the length of the points\n k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))\n assert result.shape[0] == k\n\n\ndef test_slice_with_floats():\n d = da.ones((5,), chunks=(3,))\n with pytest.raises(IndexError):\n d[1.5]\n with pytest.raises(IndexError):\n d[0:1.5]\n with pytest.raises(IndexError):\n d[[1, 1.5]]\n\n\ndef test_slice_with_integer_types():\n x = np.arange(10)\n dx = da.from_array(x, chunks=5)\n inds = np.array([0, 3, 6], dtype=\"u8\")\n assert_eq(dx[inds], x[inds])\n assert_eq(dx[inds.astype(\"u4\")], x[inds.astype(\"u4\")])\n\n inds = np.array([0, 3, 6], dtype=np.int64)\n assert_eq(dx[inds], x[inds])\n assert_eq(dx[inds.astype(\"u4\")], x[inds.astype(\"u4\")])\n\n\ndef test_index_with_integer_types():\n x = np.arange(10)\n dx = da.from_array(x, chunks=5)\n inds = int(3)\n assert_eq(dx[inds], x[inds])\n\n inds = np.int64(3)\n assert_eq(dx[inds], x[inds])\n\n\ndef test_vindex_basic():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(3, 4))\n\n # cases where basic and advanced indexing coincide\n result = d.vindex[0]\n assert_eq(result, x[0])\n\n result = d.vindex[0, 1]\n assert_eq(result, x[0, 1])\n\n result = d.vindex[[0, 1], ::-1] # slices last\n assert_eq(result, x[:2, ::-1])\n\n\ndef test_vindex_nd():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(3, 4))\n\n result = d.vindex[[[0, 1], [6, 0]], [[0, 1], [0, 7]]]\n assert_eq(result, x[[[0, 1], [6, 0]], [[0, 1], [0, 7]]])\n\n result = d.vindex[np.arange(7)[:, None], np.arange(8)[None, :]]\n assert_eq(result, x)\n\n result = d.vindex[np.arange(7)[None, :], np.arange(8)[:, None]]\n assert_eq(result, x.T)\n\n\ndef test_vindex_negative():\n x = np.arange(10)\n d = da.from_array(x, chunks=(5, 5))\n\n result = d.vindex[np.array([0, -1])]\n assert_eq(result, x[np.array([0, -1])])\n\n\ndef test_vindex_errors():\n d = da.ones((5, 5, 5), chunks=(3, 3, 3))\n pytest.raises(IndexError, lambda: d.vindex[np.newaxis])\n pytest.raises(IndexError, lambda: d.vindex[[1, 2], [1, 2, 3]])\n pytest.raises(IndexError, lambda: d.vindex[[True] * 5])\n pytest.raises(IndexError, lambda: d.vindex[[0], [5]])\n pytest.raises(IndexError, lambda: d.vindex[[0], [-6]])\n\n\ndef test_vindex_merge():\n from dask.array.core import _vindex_merge\n\n locations = [1], [2, 0]\n values = [np.array([[1, 2, 3]]), np.array([[10, 20, 30], [40, 50, 60]])]\n\n assert (\n _vindex_merge(locations, values)\n == np.array([[40, 50, 60], [1, 2, 3], [10, 20, 30]])\n ).all()\n\n\ndef test_vindex_identity():\n rng = da.random.RandomState(42)\n a, b = 10, 20\n\n x = rng.random(a, chunks=a // 2)\n assert x is x.vindex[:]\n assert x is x.vindex[:a]\n pytest.raises(IndexError, lambda: x.vindex[: a - 1])\n pytest.raises(IndexError, lambda: x.vindex[1:])\n pytest.raises(IndexError, lambda: x.vindex[0:a:2])\n\n x = rng.random((a, b), chunks=(a // 2, b // 2))\n assert x is x.vindex[:, :]\n assert x is x.vindex[:a, :b]\n pytest.raises(IndexError, lambda: x.vindex[:, : b - 1])\n pytest.raises(IndexError, lambda: x.vindex[:, 1:])\n pytest.raises(IndexError, lambda: x.vindex[:, 0:b:2])\n\n\ndef test_empty_array():\n assert_eq(np.arange(0), da.arange(0, chunks=5))\n\n\ndef test_memmap():\n with tmpfile(\"npy\") as fn_1:\n with tmpfile(\"npy\") as fn_2:\n try:\n x = da.arange(100, chunks=15)\n target = np.memmap(fn_1, shape=x.shape, mode=\"w+\", dtype=x.dtype)\n\n x.store(target)\n\n assert_eq(target, x, check_type=False)\n\n np.save(fn_2, target)\n\n assert_eq(np.load(fn_2, mmap_mode=\"r\"), x, check_type=False)\n finally:\n target._mmap.close()\n\n\ndef test_to_npy_stack():\n x = np.arange(5 * 10 * 10).reshape((5, 10, 10))\n d = da.from_array(x, chunks=(2, 4, 4))\n\n with tmpdir() as dirname:\n stackdir = os.path.join(dirname, \"test\")\n da.to_npy_stack(stackdir, d, axis=0)\n assert os.path.exists(os.path.join(stackdir, \"0.npy\"))\n assert (np.load(os.path.join(stackdir, \"1.npy\")) == x[2:4]).all()\n\n e = da.from_npy_stack(stackdir)\n assert_eq(d, e)\n\n\ndef test_view():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(2, 3))\n\n assert_eq(x.view(), d.view())\n assert_eq(x.view(\"i4\"), d.view(\"i4\"))\n assert_eq(x.view(\"i2\"), d.view(\"i2\"))\n assert all(isinstance(s, int) for s in d.shape)\n\n x = np.arange(8, dtype=\"i1\")\n d = da.from_array(x, chunks=(4,))\n assert_eq(x.view(\"i4\"), d.view(\"i4\"))\n\n with pytest.raises(ValueError):\n x = np.arange(8, dtype=\"i1\")\n d = da.from_array(x, chunks=(3,))\n d.view(\"i4\")\n\n with pytest.raises(ValueError):\n d.view(\"i4\", order=\"asdf\")\n\n\ndef test_view_fortran():\n x = np.asfortranarray(np.arange(64).reshape((8, 8)))\n d = da.from_array(x, chunks=(2, 3))\n assert_eq(x.T.view(\"i4\").T, d.view(\"i4\", order=\"F\"))\n assert_eq(x.T.view(\"i2\").T, d.view(\"i2\", order=\"F\"))\n\n\ndef test_h5py_tokenize():\n h5py = pytest.importorskip(\"h5py\")\n with tmpfile(\"hdf5\") as fn1:\n with tmpfile(\"hdf5\") as fn2:\n f = h5py.File(fn1, mode=\"a\")\n g = h5py.File(fn2, mode=\"a\")\n\n f[\"x\"] = np.arange(10).astype(float)\n g[\"x\"] = np.ones(10).astype(float)\n\n x1 = f[\"x\"]\n x2 = g[\"x\"]\n\n assert tokenize(x1) != tokenize(x2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_zero_slice_dtypes_test_normalize_chunks_auto_3d.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_zero_slice_dtypes_test_normalize_chunks_auto_3d.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3931, "end_line": 4140, "span_ids": ["test_delayed_array_key_hygeine", "test_blockwise_zero_shape", "test_constructor_plugin", "test_elemwise_with_lists", "test_normalize_chunks_auto_2d", "test_normalize_chunks_auto_1d", "test_normalize_chunks_auto_3d", "test_stack_errs", "test_zero_sized_array_rechunk", "test_broadcast_against_zero_shape", "test_empty_chunks_in_array_len", "test_blockwise_with_numpy_arrays", "test_blockwise_zero_shape_new_axes", "test_from_array_name", "test_zero_slice_dtypes", "test_no_warnings_on_metadata", "test_meta", "test_concatenate_errs"], "tokens": 1968}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_zero_slice_dtypes():\n x = da.arange(5, chunks=1)\n y = x[[]]\n assert y.dtype == x.dtype\n assert y.shape == (0,)\n assert_eq(x[[]], np.arange(5)[[]])\n\n\ndef test_zero_sized_array_rechunk():\n x = da.arange(5, chunks=1)[:0]\n y = da.blockwise(identity, \"i\", x, \"i\", dtype=x.dtype)\n assert_eq(x, y)\n\n\ndef test_blockwise_zero_shape():\n da.blockwise(\n lambda x: x,\n \"i\",\n da.arange(10, chunks=10),\n \"i\",\n da.from_array(np.ones((0, 2)), ((0,), 2)),\n \"ab\",\n da.from_array(np.ones((0,)), ((0,),)),\n \"a\",\n dtype=\"float64\",\n )\n\n\ndef test_blockwise_zero_shape_new_axes():\n da.blockwise(\n lambda x: np.ones(42),\n \"i\",\n da.from_array(np.ones((0, 2)), ((0,), 2)),\n \"ab\",\n da.from_array(np.ones((0,)), ((0,),)),\n \"a\",\n dtype=\"float64\",\n new_axes={\"i\": 42},\n )\n\n\ndef test_broadcast_against_zero_shape():\n assert_eq(da.arange(1, chunks=1)[:0] + 0, np.arange(1)[:0] + 0)\n assert_eq(da.arange(1, chunks=1)[:0] + 0.1, np.arange(1)[:0] + 0.1)\n assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0, np.ones((5, 5))[:0] + 0)\n assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0.1, np.ones((5, 5))[:0] + 0.1)\n assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0, np.ones((5, 5))[:, :0] + 0)\n assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0.1, np.ones((5, 5))[:, :0] + 0.1)\n\n\ndef test_from_array_name():\n x = np.array([1, 2, 3, 4, 5])\n chunks = x.shape\n # Default is tokenize the array\n dx = da.from_array(x, chunks=chunks)\n hashed_name = dx.name\n assert da.from_array(x, chunks=chunks).name == hashed_name\n # Specify name directly\n assert da.from_array(x, chunks=chunks, name=\"x\").name == \"x\"\n # False gives a random name\n dx2 = da.from_array(x, chunks=chunks, name=False)\n dx3 = da.from_array(x, chunks=chunks, name=False)\n assert dx2.name != hashed_name\n assert dx3.name != hashed_name\n assert dx2.name != dx3.name\n\n\ndef test_concatenate_errs():\n with pytest.raises(ValueError, match=r\"Shapes.*\\(2, 1\\)\"):\n da.concatenate(\n [da.zeros((2, 1), chunks=(2, 1)), da.zeros((2, 3), chunks=(2, 3))]\n )\n\n with pytest.raises(ValueError):\n da.concatenate(\n [da.zeros((1, 2), chunks=(1, 2)), da.zeros((3, 2), chunks=(3, 2))], axis=1\n )\n\n\ndef test_stack_errs():\n with pytest.raises(ValueError) as e:\n da.stack([da.zeros((2,), chunks=2)] * 10 + [da.zeros((3,), chunks=3)] * 10)\n\n assert (\n str(e.value)\n == \"Stacked arrays must have the same shape. The first array had shape (2,), while array 11 has shape (3,).\"\n )\n assert len(str(e.value)) < 105\n\n\ndef test_blockwise_with_numpy_arrays():\n x = np.ones(10)\n y = da.ones(10, chunks=(5,))\n\n assert_eq(x + y, x + x)\n\n s = da.sum(x)\n assert any(x is v for v in s.dask.values())\n\n\n@pytest.mark.parametrize(\"chunks\", (100, 6))\n@pytest.mark.parametrize(\"other\", [[0, 0, 1], [2, 1, 3], (0, 0, 1)])\ndef test_elemwise_with_lists(chunks, other):\n x = np.arange(12).reshape((4, 3))\n d = da.arange(12, chunks=chunks).reshape((4, 3))\n\n x2 = np.vstack([x[:, 0], x[:, 1], x[:, 2]]).T\n d2 = da.vstack([d[:, 0], d[:, 1], d[:, 2]]).T\n\n assert_eq(x2, d2)\n\n x3 = x2 * other\n d3 = d2 * other\n\n assert_eq(x3, d3)\n\n\ndef test_constructor_plugin():\n L = []\n L2 = []\n with dask.config.set(array_plugins=[L.append, L2.append]):\n x = da.ones(10, chunks=5)\n y = x + 1\n\n assert L == L2 == [x, y]\n\n with dask.config.set(array_plugins=[lambda x: x.compute()]):\n x = da.ones(10, chunks=5)\n y = x + 1\n\n assert isinstance(y, np.ndarray)\n assert len(L) == 2\n\n\ndef test_no_warnings_on_metadata():\n x = da.ones(5, chunks=3)\n with warnings.catch_warnings(record=True) as record:\n da.arccos(x)\n\n assert not record\n\n\ndef test_delayed_array_key_hygeine():\n a = da.zeros((1,), chunks=(1,))\n d = delayed(identity)(a)\n b = da.from_delayed(d, shape=a.shape, dtype=a.dtype)\n assert_eq(a, b)\n\n\ndef test_empty_chunks_in_array_len():\n x = da.ones((), chunks=())\n with pytest.raises(TypeError) as exc_info:\n len(x)\n\n err_msg = \"len() of unsized object\"\n assert err_msg in str(exc_info.value)\n\n\n@pytest.mark.parametrize(\"dtype\", [None, [(\"a\", \"f4\"), (\"b\", object)]])\ndef test_meta(dtype):\n a = da.zeros((1,), chunks=(1,))\n assert a._meta.dtype == a.dtype\n assert isinstance(a._meta, np.ndarray)\n assert a.nbytes < 1000\n\n\n@pytest.mark.parametrize(\n \"shape,limit,expected\",\n [\n (100, 10, (10,) * 10),\n (20, 10, (10, 10)),\n (20, 5, (5, 5, 5, 5)),\n (24, 5, (4, 4, 4, 4, 4, 4)), # common factor is close, use it\n (23, 5, (5, 5, 5, 5, 3)), # relatively prime, don't use 1s\n (1000, 167, (125,) * 8), # find close value\n ],\n)\ndef test_normalize_chunks_auto_1d(shape, limit, expected):\n result = normalize_chunks(\"auto\", (shape,), limit=limit, dtype=np.uint8)\n assert result == (expected,)\n\n\n@pytest.mark.parametrize(\n \"shape,chunks,limit,expected\",\n [\n ((20, 20), (\"auto\", 2), 20, ((10, 10), (2,) * 10)),\n (\n (20, 20),\n (\"auto\", (2, 2, 2, 2, 2, 5, 5)),\n 20,\n ((4, 4, 4, 4, 4), (2, 2, 2, 2, 2, 5, 5)),\n ),\n ((1, 20), \"auto\", 10, ((1,), (10, 10))),\n ],\n)\ndef test_normalize_chunks_auto_2d(shape, chunks, limit, expected):\n result = normalize_chunks(chunks, shape, limit=limit, dtype=\"uint8\")\n assert result == expected\n\n\ndef test_normalize_chunks_auto_3d():\n result = normalize_chunks(\n (\"auto\", \"auto\", 2), (20, 20, 20), limit=200, dtype=\"uint8\"\n )\n expected = ((10, 10), (10, 10), (2,) * 10)\n assert result == expected\n\n result = normalize_chunks(\"auto\", (20, 20, 20), limit=8, dtype=\"uint8\")\n expected = ((2,) * 10,) * 3\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_np_test_array_function_dask.assert_eq_res_y_res_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_np_test_array_function_dask.assert_eq_res_y_res_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 46, "span_ids": ["test_array_function_dask", "imports"], "tokens": 394}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import assert_eq\n\nfrom .test_dispatch import EncapsulateNDArray, WrappedArray\n\n\n@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: np.append(x, x),\n lambda x: np.concatenate([x, x, x]),\n lambda x: np.cov(x, x),\n lambda x: np.dot(x, x),\n lambda x: np.dstack((x, x)),\n lambda x: np.flip(x, axis=0),\n lambda x: np.hstack((x, x)),\n lambda x: np.matmul(x, x),\n lambda x: np.mean(x),\n lambda x: np.stack([x, x]),\n lambda x: np.block([x, x]),\n lambda x: np.sum(x),\n lambda x: np.var(x),\n lambda x: np.vstack((x, x)),\n lambda x: np.linalg.norm(x),\n lambda x: np.min(x),\n lambda x: np.amin(x),\n lambda x: np.round(x),\n lambda x: np.insert(x, 0, 3, axis=0),\n lambda x: np.delete(x, 0, axis=0),\n lambda x: np.select(\n [x < 0.3, x < 0.6, x > 0.7], [x * 2, x, x / 2], default=0.65\n ),\n ],\n)\ndef test_array_function_dask(func):\n x = np.random.random((100, 100))\n y = da.from_array(x, chunks=(50, 50))\n res_x = func(x)\n res_y = func(y)\n\n assert isinstance(res_y, da.Array)\n assert_eq(res_y, res_x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_numpy_arg_test_blockwise_numpy_arg.assert_eq_x_np_arange_10": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_atop.py_test_blockwise_numpy_arg_test_blockwise_numpy_arg.assert_eq_x_np_arange_10", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_atop.py", "file_name": "test_atop.py", "file_type": "text/x-python", "category": "test", "start_line": 519, "end_line": 529, "span_ids": ["test_blockwise_numpy_arg"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_numpy_arg():\n x = da.arange(10, chunks=(5,))\n y = np.arange(1000)\n\n x = x.map_blocks(lambda x, y: x, 1.0)\n x = x.map_blocks(lambda x, y: x, \"abc\")\n x = x.map_blocks(lambda x, y: x, y)\n x = x.map_blocks(lambda x, y: x, \"abc\")\n x = x.map_blocks(lambda x, y: x, 1.0)\n x = x.map_blocks(lambda x, y, z: x, \"abc\", np.array([\"a\", \"b\"], dtype=object))\n assert_eq(x, np.arange(10))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_np_test__parse_gufunc_signature.None_3._parse_gufunc_signature_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_np_test__parse_gufunc_signature.None_3._parse_gufunc_signature_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["imports", "test__parse_gufunc_signature"], "tokens": 388}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\nfrom numpy.testing import assert_equal\n\nimport dask.array as da\nfrom dask.array.core import Array\nfrom dask.array.gufunc import (\n _parse_gufunc_signature,\n _validate_normalize_axes,\n apply_gufunc,\n as_gufunc,\n gufunc,\n)\nfrom dask.array.utils import assert_eq\n\n\n# Copied from `numpy.lib.test_test_function_base.py`:\ndef test__parse_gufunc_signature():\n assert_equal(_parse_gufunc_signature(\"(x)->()\"), ([(\"x\",)], ()))\n assert_equal(_parse_gufunc_signature(\"(x,y)->()\"), ([(\"x\", \"y\")], ()))\n # whitespace\n assert_equal(_parse_gufunc_signature(\" (x, y) ->()\"), ([(\"x\", \"y\")], ()))\n assert_equal(_parse_gufunc_signature(\"(x),(y)->()\"), ([(\"x\",), (\"y\",)], ()))\n assert_equal(_parse_gufunc_signature(\"(x)->(y)\"), ([(\"x\",)], (\"y\",)))\n assert_equal(_parse_gufunc_signature(\"(x)->(y),()\"), ([(\"x\",)], [(\"y\",), ()]))\n assert_equal(\n _parse_gufunc_signature(\"(),(a,b,c),(d)->(d,e)\"),\n ([(), (\"a\", \"b\", \"c\"), (\"d\",)], (\"d\", \"e\")),\n )\n with pytest.raises(ValueError):\n _parse_gufunc_signature(\"(x)(y)->()\")\n with pytest.raises(ValueError):\n _parse_gufunc_signature(\"(x),(y)->\")\n with pytest.raises(ValueError):\n _parse_gufunc_signature(\"((x))->(x)\")\n with pytest.raises(ValueError):\n _parse_gufunc_signature(\"(x)->(x),\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_with_meta_test_apply_gufunc_with_meta.assert_eq_expected_1_re": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_with_meta_test_apply_gufunc_with_meta.assert_eq_expected_1_re", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 617, "end_line": 626, "span_ids": ["test_apply_gufunc_with_meta"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_with_meta():\n def stats(x):\n return np.mean(x, axis=-1), np.std(x, axis=-1, dtype=np.float32)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n meta = (np.ones(0, dtype=np.float64), np.ones(0, dtype=np.float32))\n result = apply_gufunc(stats, \"(i)->(),()\", a, meta=meta)\n expected = stats(a.compute())\n assert_eq(expected[0], result[0])\n assert_eq(expected[1], result[1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_as_gufunc_with_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_as_gufunc_with_meta_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 629, "end_line": 646, "span_ids": ["test_as_gufunc_with_meta"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_as_gufunc_with_meta():\n stack = da.ones((1, 50, 60), chunks=(1, -1, -1))\n expected = (stack, stack.max())\n\n meta = (np.array((), dtype=np.float64), np.array((), dtype=np.float64))\n\n @da.as_gufunc(signature=\"(i,j) ->(i,j), ()\", meta=meta)\n def array_and_max(arr):\n return arr, np.atleast_1d(arr.max())\n\n result = array_and_max(stack)\n assert_eq(expected[0], result[0])\n\n # Because `np.max` returns a scalar instead of an `np.ndarray`, we cast\n # the expected output to a `np.ndarray`, as `meta` defines the output\n # should be.\n assert_eq(np.array([expected[1].compute()]), result[1].compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_os_random_images.with_tmpdir_as_dirname_.yield_os_path_join_dirnam": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_image.py_os_random_images.with_tmpdir_as_dirname_.yield_os_path_join_dirnam", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_image.py", "file_name": "test_image.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 22, "span_ids": ["imports", "random_images"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nfrom contextlib import contextmanager\n\nimport pytest\n\npytest.importorskip(\"skimage\")\nimport numpy as np\nfrom skimage.io import imsave\n\nfrom dask.array.image import imread as da_imread\nfrom dask.utils import tmpdir\n\n\n@contextmanager\ndef random_images(n, shape):\n with tmpdir() as dirname:\n for i in range(n):\n fn = os.path.join(dirname, \"image.%d.png\" % i)\n x = np.random.randint(0, 255, size=shape).astype(\"u1\")\n imsave(fn, x, check_contrast=False)\n\n yield os.path.join(dirname, \"*.png\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_cholesky_test_cholesky.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_cholesky_test_cholesky.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 805, "end_line": 821, "span_ids": ["test_cholesky"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (12, 3), (30, 3), (30, 6)])\ndef test_cholesky(shape, chunk):\n\n A = _get_symmat(shape)\n dA = da.from_array(A, (chunk, chunk))\n assert_eq(\n da.linalg.cholesky(dA).compute(),\n scipy.linalg.cholesky(A),\n check_graph=False,\n check_chunks=False,\n )\n assert_eq(\n da.linalg.cholesky(dA, lower=True),\n scipy.linalg.cholesky(A, lower=True),\n check_graph=False,\n check_chunks=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_np_test_basic.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_numpy_compat.py_np_test_basic.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_numpy_compat.py", "file_name": "test_numpy_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 30, "span_ids": ["test_basic", "imports", "index", "dtype"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq\n\n\n@pytest.fixture(\n params=[\n [(\"A\", (\"f4\", (3, 2))), (\"B\", (\"f4\", 3)), (\"C\", (\"f8\", 3))],\n [(\"A\", (\"i4\", (3, 2))), (\"B\", (\"f4\", 3)), (\"C\", (\"S4\", 3))],\n ]\n)\ndef dtype(request):\n return np.dtype(request.param)\n\n\n@pytest.fixture(params=[[\"A\"], [\"A\", \"B\"], [\"A\", \"B\", \"C\"]])\ndef index(request):\n return request.param\n\n\ndef test_basic():\n # sanity check\n dtype = [(\"a\", \"f8\"), (\"b\", \"f8\"), (\"c\", \"f8\")]\n x = np.ones((5, 3), dtype=dtype)\n dx = da.ones((5, 3), dtype=dtype, chunks=3)\n result = dx[[\"a\", \"b\"]]\n expected = x[[\"a\", \"b\"]]\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_array_creation_blockwise_fusion_test_array_creation_blockwise_fusion.assert_eq_a_np_full_3_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_array_creation_blockwise_fusion_test_array_creation_blockwise_fusion.assert_eq_a_np_full_3_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 398, "end_line": 411, "span_ids": ["test_array_creation_blockwise_fusion"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_creation_blockwise_fusion():\n \"\"\"\n Check that certain array creation routines work with blockwise and can be\n fused with other blockwise operations.\n \"\"\"\n x = da.ones(3, chunks=(3,))\n y = da.zeros(3, chunks=(3,))\n z = da.full(3, fill_value=2, chunks=(3,))\n a = x + y + z\n dsk1 = a.__dask_graph__()\n assert len(dsk1) == 5\n dsk2 = optimize_blockwise(dsk1)\n assert len(dsk2) == 1\n assert_eq(a, np.full(3, 3.0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_test_map_overlap_multiarray._are_not_somehow_shifted": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_test_map_overlap_multiarray._are_not_somehow_shifted", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 346, "end_line": 377, "span_ids": ["test_map_overlap_multiarray"], "tokens": 457}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_multiarray():\n # Same ndim, same numblocks, same chunks\n x = da.arange(10, chunks=5)\n y = da.arange(10, chunks=5)\n z = da.map_overlap(lambda x, y: x + y, x, y, depth=1, boundary=\"none\")\n assert_eq(z, 2 * np.arange(10))\n\n # Same ndim, same numblocks, different chunks\n x = da.arange(10, chunks=(2, 3, 5))\n y = da.arange(10, chunks=(5, 3, 2))\n z = da.map_overlap(lambda x, y: x + y, x, y, depth=1, boundary=\"none\")\n assert z.chunks == ((2, 3, 3, 2),)\n assert_eq(z, 2 * np.arange(10))\n\n # Same ndim, different numblocks, different chunks\n x = da.arange(10, chunks=(10,))\n y = da.arange(10, chunks=(4, 4, 2))\n z = da.map_overlap(lambda x, y: x + y, x, y, depth=1, boundary=\"none\")\n assert z.chunks == ((4, 4, 2),)\n assert_eq(z, 2 * np.arange(10))\n\n # Different ndim, different numblocks, different chunks\n x = da.arange(10, chunks=(10,))\n y = da.arange(10).reshape(1, 10).rechunk((1, (4, 4, 2)))\n z = da.map_overlap(lambda x, y: x + y, x, y, depth=1, boundary=\"none\")\n assert z.chunks == ((1,), (4, 4, 2))\n assert z.shape == (1, 10)\n assert_eq(z, 2 * np.arange(10)[np.newaxis])\n\n # Note: checks on arange equality in all of the above help ensure that\n # trimming is applied appropriately to result chunks (i.e. results\n # are not somehow shifted)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_defaults_test_map_overlap_multiarray_defaults.assert_eq_z_sum_20_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_defaults_test_map_overlap_multiarray_defaults.assert_eq_z_sum_20_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 380, "end_line": 389, "span_ids": ["test_map_overlap_multiarray_defaults"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_multiarray_defaults():\n # Check that by default, chunk alignment and arrays of varying dimensionality\n # are supported by with no effect on result shape\n # (i.e. defaults are pass-through to map_blocks)\n x = da.ones((10,), chunks=10)\n y = da.ones((1, 10), chunks=5)\n z = da.map_overlap(lambda x, y: x + y, x, y, boundary=\"none\")\n # func should be called twice and get (5,) and (1, 5) arrays of ones each time\n assert_eq(z.shape, (1, 10))\n assert_eq(z.sum(), 20.0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_block_broadcast_test_map_overlap_multiarray_block_broadcast.assert_eq_z_sum_4_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_block_broadcast_test_map_overlap_multiarray_block_broadcast.assert_eq_z_sum_4_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 432, "end_line": 448, "span_ids": ["test_map_overlap_multiarray_block_broadcast"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_multiarray_block_broadcast():\n def func(x, y):\n # Return result with expected padding\n z = x.size + y.size\n return np.ones((3, 3)) * z\n\n # Chunks in trailing dimension will be unified to two chunks of size 6\n # and block broadcast will allow chunks from x to repeat\n x = da.ones((12,), chunks=12) # numblocks = (1,) -> (2, 2) after broadcast\n y = da.ones((16, 12), chunks=(8, 6)) # numblocks = (2, 2)\n z = da.map_overlap(\n func, x, y, chunks=(3, 3), depth=1, trim=True, boundary=\"reflect\"\n )\n assert_eq(z, z)\n assert z.shape == (2, 2)\n # func call will receive (8,) and (10, 8) arrays for each of 4 blocks\n assert_eq(z.sum(), 4.0 * (10 * 8 + 8))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_ensure_minimum_chunksize_test_ensure_minimum_chunksize_raises_error.with_pytest_raises_ValueE.ensure_minimum_chunksize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_ensure_minimum_chunksize_test_ensure_minimum_chunksize_raises_error.with_pytest_raises_ValueE.ensure_minimum_chunksize_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 771, "end_line": 795, "span_ids": ["test_ensure_minimum_chunksize", "test_ensure_minimum_chunksize_raises_error"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"chunks,expected\",\n [\n [(10,), (10,)],\n [(10, 10), (10, 10)],\n [\n (10, 10, 1),\n (10, 11),\n ],\n [(20, 20, 20, 1), (20, 20, 11, 10)],\n [(20, 20, 10, 1), (20, 20, 11)],\n [(2, 20, 2, 20), (14, 10, 20)],\n [(1, 1, 1, 1, 7), (11,)],\n [(20, 20, 2, 20, 20, 2), (20, 12, 10, 20, 12, 10)],\n ],\n)\ndef test_ensure_minimum_chunksize(chunks, expected):\n actual = ensure_minimum_chunksize(10, chunks)\n assert actual == expected\n\n\ndef test_ensure_minimum_chunksize_raises_error():\n chunks = (5, 2, 1, 1)\n with pytest.raises(ValueError, match=\"overlapping depth 10 is larger than\"):\n ensure_minimum_chunksize(10, chunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_sliding_window_view_test_sliding_window_view.assert_eq_expected_actua": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_sliding_window_view_test_sliding_window_view.assert_eq_expected_actua", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 798, "end_line": 819, "span_ids": ["test_sliding_window_view"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, chunks, window_shape, axis\",\n [\n ((6, 7, 8), (6, (2, 2, 2, 1), 4), (3, 2), (1, 2)), # chunks vary along axis\n ((40, 30, 2), 5, (3,), (0,)), # window < chunk\n ((21,), 3, (7,), (0,)), # window > chunk\n ((9,), 3, 3, 0), # window == chunk, axis is integer\n ((9,), 3, 3, -1), # axis=-1\n ((9,), 3, 3, None), # axis=None\n ((9, 8), 3, (2, 4), None), # axis=None\n ((9,), 3, (3, 3, 3), (0, 0, 0)), # axis is repeated\n ((9,), 3, (3, 3), (0, -1)), # axis is repeated, with -1\n ((9,), 3, [3, 3], [0, -1]), # list instead of tuple\n ],\n)\ndef test_sliding_window_view(shape, chunks, window_shape, axis):\n from ..numpy_compat import sliding_window_view as np_sliding_window_view\n\n arr = da.from_array(np.arange(np.prod(shape)).reshape(shape), chunks=chunks)\n actual = sliding_window_view(arr, window_shape, axis)\n expected = np_sliding_window_view(arr.compute(), window_shape, axis)\n assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_sliding_window_errors_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_sliding_window_errors_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 822, "end_line": 837, "span_ids": ["test_sliding_window_errors"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"window_shape, axis\",\n [\n ((10,), 0), # window > axis shape\n ((2,), 3), # axis > ndim\n (-1, 0), # window shape is negative\n (2, (0, 1)), # len(window shape) < len(axis)\n (2, None), # len(window shape) < len(axis)\n (0, None), # window_shape = 0\n ],\n)\ndef test_sliding_window_errors(window_shape, axis):\n arr = da.zeros((4, 3))\n with pytest.raises(ValueError):\n sliding_window_view(arr, window_shape, axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_warnings_from_dask_utils_import_fu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_warnings_from_dask_utils_import_fu", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 23, "span_ids": ["imports"], "tokens": 106}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\nfrom itertools import product\n\nimport pytest\n\nnp = pytest.importorskip(\"numpy\")\n\nimport dask\nimport dask.array as da\nfrom dask.array.rechunk import (\n _breakpoints,\n _intersect_1d,\n _old_to_new,\n cumdims_label,\n divide_to_width,\n intersect_chunks,\n merge_to_number,\n normalize_chunks,\n plan_rechunk,\n rechunk,\n)\nfrom dask.array.utils import assert_eq\nfrom dask.utils import funcname", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_os_test_numel.None_1.for_sub_in_itertools_comb.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_os_test_numel.None_1.for_sub_in_itertools_comb.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 49, "span_ids": ["test_numel", "imports", "assert_eq"], "tokens": 394}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport warnings\nfrom itertools import permutations, zip_longest\n\nimport pytest\n\nnp = pytest.importorskip(\"numpy\")\n\nimport itertools\n\nimport dask.array as da\nimport dask.config as config\nfrom dask.array.utils import assert_eq as _assert_eq\nfrom dask.array.utils import same_keys\nfrom dask.core import get_deps\n\n\ndef assert_eq(a, b):\n _assert_eq(a, b, equal_nan=True)\n\n\n@pytest.mark.parametrize(\"dtype\", [\"f4\", \"i4\"])\n@pytest.mark.parametrize(\"keepdims\", [True, False])\ndef test_numel(dtype, keepdims):\n x = np.ones((2, 3, 4))\n\n assert_eq(\n da.reductions.numel(x, axis=(), keepdims=keepdims, dtype=dtype),\n np.sum(x, axis=(), keepdims=keepdims, dtype=dtype),\n )\n assert_eq(\n da.reductions.numel(x, axis=0, keepdims=keepdims, dtype=dtype),\n np.sum(x, axis=0, keepdims=keepdims, dtype=dtype),\n )\n\n for length in range(x.ndim):\n for sub in itertools.combinations([d for d in range(x.ndim)], length):\n assert_eq(\n da.reductions.numel(x, axis=sub, keepdims=keepdims, dtype=dtype),\n np.sum(x, axis=sub, keepdims=keepdims, dtype=dtype),\n )\n\n for length in range(x.ndim):\n for sub in itertools.combinations([d for d in range(x.ndim)], length):\n ssub = np.random.shuffle(list(sub))\n assert_eq(\n da.reductions.numel(x, axis=ssub, keepdims=keepdims, dtype=dtype),\n np.sum(x, axis=ssub, keepdims=keepdims, dtype=dtype),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_np_test_reshape_rechunk.assert_np_prod_list_map_l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reshape.py_np_test_reshape_rechunk.assert_np_prod_list_map_l", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 52, "span_ids": ["imports", "test_reshape_rechunk"], "tokens": 843}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array.reshape import contract_tuple, expand_tuple, reshape_rechunk\nfrom dask.array.utils import assert_eq\n\n\n@pytest.mark.parametrize(\n \"inshape,outshape,prechunks,inchunks,outchunks\",\n [\n ((4,), (4,), ((2, 2),), ((2, 2),), ((2, 2),)),\n ((4,), (2, 2), ((2, 2),), ((2, 2),), ((1, 1), (2,))),\n ((4,), (4, 1), ((2, 2),), ((2, 2),), ((2, 2), (1,))),\n ((4,), (1, 4), ((2, 2),), ((2, 2),), ((1,), (2, 2))),\n ((1, 4), (4,), ((1,), (2, 2)), ((1,), (2, 2)), ((2, 2),)),\n ((4, 1), (4,), ((2, 2), (1,)), ((2, 2), (1,)), ((2, 2),)),\n (\n (4, 1, 4),\n (4, 4),\n ((2, 2), (1,), (2, 2)),\n ((2, 2), (1,), (2, 2)),\n ((2, 2), (2, 2)),\n ),\n ((4, 4), (4, 1, 4), ((2, 2), (2, 2)), ((2, 2), (2, 2)), ((2, 2), (1,), (2, 2))),\n ((2, 2), (4,), ((2,), (2,)), ((2,), (2,)), ((4,),)),\n ((2, 2), (4,), ((1, 1), (2,)), ((1, 1), (2,)), ((2, 2),)),\n ((2, 2), (4,), ((2,), (1, 1)), ((1, 1), (2,)), ((2, 2),)),\n (\n (64,),\n (4, 4, 4),\n ((8, 8, 8, 8, 8, 8, 8, 8),),\n ((16, 16, 16, 16),),\n ((1, 1, 1, 1), (4,), (4,)),\n ),\n ((64,), (4, 4, 4), ((32, 32),), ((32, 32),), ((2, 2), (4,), (4,))),\n ((64,), (4, 4, 4), ((16, 48),), ((16, 48),), ((1, 3), (4,), (4,))),\n ((64,), (4, 4, 4), ((20, 44),), ((16, 48),), ((1, 3), (4,), (4,))),\n (\n (64, 4),\n (8, 8, 4),\n ((16, 16, 16, 16), (2, 2)),\n ((16, 16, 16, 16), (2, 2)),\n ((2, 2, 2, 2), (8,), (2, 2)),\n ),\n ],\n)\ndef test_reshape_rechunk(inshape, outshape, prechunks, inchunks, outchunks):\n result_in, result_out = reshape_rechunk(inshape, outshape, prechunks)\n assert result_in == inchunks\n assert result_out == outchunks\n assert np.prod(list(map(len, result_in))) == np.prod(list(map(len, result_out)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_rot90_test_rot90.try_.else_.if_len_axes_2_or_axes.else_.for_k_in_range_3_9_.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_rot90_test_rot90.try_.else_.if_len_axes_2_or_axes.else_.for_k_in_range_3_9_.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 224, "end_line": 251, "span_ids": ["test_rot90"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"kwargs\",\n [{}, {\"axes\": (1, 0)}, {\"axes\": (2, 3)}, {\"axes\": (0, 1, 2)}, {\"axes\": (1, 1)}],\n)\n@pytest.mark.parametrize(\"shape\", [tuple(), (4,), (4, 6), (4, 6, 8), (4, 6, 8, 10)])\ndef test_rot90(kwargs, shape):\n axes = kwargs.get(\"axes\", (0, 1))\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=1)\n\n np_func = getattr(np, \"rot90\")\n da_func = getattr(da, \"rot90\")\n\n try:\n for axis in axes[:2]:\n range(np_a.ndim)[axis]\n except IndexError:\n with pytest.raises(ValueError):\n da_func(da_a, **kwargs)\n else:\n if len(axes) != 2 or axes[0] == axes[1]:\n with pytest.raises(ValueError):\n da_func(da_a, **kwargs)\n else:\n for k in range(-3, 9):\n np_r = np_func(np_a, k=k, **kwargs)\n da_r = da_func(da_a, k=k, **kwargs)\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_test_bincount._can_bincount_result_be_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_bincount_test_bincount._can_bincount_result_be_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 628, "end_line": 641, "span_ids": ["test_bincount"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bincount():\n x = np.array([2, 1, 5, 2, 1])\n d = da.from_array(x, chunks=2)\n e = da.bincount(d, minlength=6)\n assert_eq(e, np.bincount(x, minlength=6))\n assert same_keys(da.bincount(d, minlength=6), e)\n assert e.shape == (6,) # shape equal to minlength\n assert e.chunks == ((6,),)\n\n assert da.bincount(d, minlength=6).name != da.bincount(d, minlength=7).name\n assert da.bincount(d, minlength=6).name == da.bincount(d, minlength=6).name\n\n expected_output = np.array([0, 2, 2, 0, 0, 1], dtype=e.dtype)\n assert_eq(e[0:], expected_output) # can bincount result be sliced", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_alternative_bins_range_test_histogramdd_alternative_bins_range.assert_same_keys_da_histo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_alternative_bins_range_test_histogramdd_alternative_bins_range.assert_same_keys_da_histo", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 869, "end_line": 887, "span_ids": ["test_histogramdd_alternative_bins_range"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_alternative_bins_range():\n # test for normal input\n n1, n2 = 600, 3\n x = da.random.uniform(0, 1, size=(n1, n2), chunks=((200, 200, 200), (3,)))\n bins = (3, 5, 4)\n ranges = ((0, 1),) * len(bins)\n (a1, b1) = da.histogramdd(x, bins=bins, range=ranges)\n (a2, b2) = np.histogramdd(x, bins=bins, range=ranges)\n (a3, b3) = np.histogramdd(x.compute(), bins=bins, range=ranges)\n assert_eq(a1, a2)\n assert_eq(a1, a3)\n bins = 4\n (a1, b1) = da.histogramdd(x, bins=bins, range=ranges)\n (a2, b2) = np.histogramdd(x, bins=bins, range=ranges)\n assert_eq(a1, a2)\n\n assert a1.sum() == n1\n assert a2.sum() == n1\n assert same_keys(da.histogramdd(x, bins=bins, range=ranges)[0], a1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_density_test_histogramdd_density.assert_same_keys_da_histo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_density_test_histogramdd_density.assert_same_keys_da_histo", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 910, "end_line": 921, "span_ids": ["test_histogramdd_density"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_density():\n n1, n2 = 800, 3\n x = da.random.uniform(0, 1, size=(n1, n2), chunks=(200, 3))\n bins = [[0, 0.5, 1], [0, 0.25, 0.85, 1], [0, 0.5, 0.8, 1]]\n (a1, b1) = da.histogramdd(x, bins=bins, density=True)\n (a2, b2) = np.histogramdd(x, bins=bins, density=True)\n (a3, b3) = da.histogramdd(x, bins=bins, normed=True)\n (a4, b4) = np.histogramdd(x.compute(), bins=bins, density=True)\n assert_eq(a1, a2)\n assert_eq(a1, a3)\n assert_eq(a1, a4)\n assert same_keys(da.histogramdd(x, bins=bins, density=True)[0], a1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raises_incompat_bins_or_range_test_histogramdd_raises_incompat_bins_or_range.None_2.da_histogramdd_data_bins": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raises_incompat_bins_or_range_test_histogramdd_raises_incompat_bins_or_range.None_2.da_histogramdd_data_bins", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 907, "end_line": 933, "span_ids": ["test_histogramdd_raises_incompat_bins_or_range"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_raises_incompat_bins_or_range():\n data = da.random.random(size=(10, 4), chunks=(5, 4))\n bins = (2, 3, 4, 5)\n ranges = ((0, 1),) * len(bins)\n\n # bad number of bins defined (should be data.shape[1])\n bins = (2, 3, 4)\n with pytest.raises(\n ValueError,\n match=\"The dimension of bins must be equal to the dimension of the sample.\",\n ):\n da.histogramdd(data, bins=bins, range=ranges)\n\n # one range per dimension is required.\n bins = (2, 3, 4, 5)\n ranges = ((0, 1),) * 3\n with pytest.raises(\n ValueError,\n match=\"range argument requires one entry, a min max pair, per dimension.\",\n ):\n da.histogramdd(data, bins=bins, range=ranges)\n\n # has range elements that are not pairs\n with pytest.raises(\n ValueError, match=\"range argument should be a sequence of pairs\"\n ):\n da.histogramdd(data, bins=bins, range=((0, 1), (0, 1, 2), 3, 5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_cov_test_cov.with_pytest_raises_ValueE.da_cov_d_ddof_1_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_cov_test_cov.with_pytest_raises_ValueE.da_cov_d_ddof_1_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1169, "end_line": 1188, "span_ids": ["test_cov"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cov():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n assert_eq(da.cov(d), np.cov(x))\n assert_eq(da.cov(d, rowvar=0), np.cov(x, rowvar=0))\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning) # dof <= 0 for slice\n assert_eq(da.cov(d, ddof=10), np.cov(x, ddof=10))\n assert_eq(da.cov(d, bias=1), np.cov(x, bias=1))\n assert_eq(da.cov(d, d), np.cov(x, x))\n\n y = np.arange(8)\n e = da.from_array(y, chunks=(4,))\n\n assert_eq(da.cov(d, e), np.cov(x, y))\n assert_eq(da.cov(e, d), np.cov(y, x))\n\n with pytest.raises(ValueError):\n da.cov(d, ddof=1.5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_power_divergence_invalid_test_skew_single_return_type.assert_isinstance_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_power_divergence_invalid_test_skew_single_return_type.assert_isinstance_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 134, "end_line": 153, "span_ids": ["test_skew_single_return_type", "test_power_divergence_invalid", "test_skew_raises"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_power_divergence_invalid():\n a = np.random.random(size=30)\n a_ = da.from_array(a, 3)\n\n with pytest.raises(ValueError):\n dask.array.stats.power_divergence(a_, lambda_=\"wrong\")\n\n\ndef test_skew_raises():\n a = da.ones((7,), chunks=(7,))\n with pytest.raises(ValueError, match=\"7 samples\"):\n dask.array.stats.skewtest(a)\n\n\ndef test_skew_single_return_type():\n \"\"\"This function tests the return type for the skew method for a 1d array.\"\"\"\n numpy_array = np.random.random(size=(30,))\n dask_array = da.from_array(numpy_array, 3)\n result = dask.array.stats.skew(dask_array).compute()\n assert isinstance(result, np.float64)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_kurtosis_single_return_type_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_test_kurtosis_single_return_type_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 156, "end_line": 164, "span_ids": ["test_kurtosis_single_return_type"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_kurtosis_single_return_type():\n \"\"\"This function tests the return type for the kurtosis method for a 1d array.\"\"\"\n numpy_array = np.random.random(size=(30,))\n dask_array = da.from_array(numpy_array, 3)\n result = dask.array.stats.kurtosis(dask_array).compute()\n result_non_fisher = dask.array.stats.kurtosis(dask_array, fisher=False).compute()\n assert isinstance(result, np.float64)\n assert isinstance(result_non_fisher, np.float64)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_xml.etree.ElementTree_test_basic.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_svg.py_xml.etree.ElementTree_test_basic.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_svg.py", "file_name": "test_svg.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 21, "span_ids": ["imports", "parses", "test_basic"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import xml.etree.ElementTree\n\nimport pytest\n\nimport dask.array as da\nfrom dask.array.svg import draw_sizes\n\n\ndef parses(text):\n cleaned = text.replace(\"→\", \"\") # xml doesn't like righarrow character\n assert xml.etree.ElementTree.fromstring(cleaned) is not None # parses cleanly\n\n\ndef test_basic():\n parses(da.ones(10).to_svg())\n parses(da.ones((10, 10)).to_svg())\n parses(da.ones((10, 10, 10)).to_svg())\n parses(da.ones((10, 10, 10, 10)).to_svg())\n parses(da.ones((10, 10, 10, 10, 10)).to_svg())\n parses(da.ones((10, 10, 10, 10, 10, 10)).to_svg())\n parses(da.ones((10, 10, 10, 10, 10, 10, 10)).to_svg())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_xarray.py_pytest_test_asanyarray.assert_eq_y_y_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_xarray.py_pytest_test_asanyarray.assert_eq_y_y_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_xarray.py", "file_name": "test_xarray.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 25, "span_ids": ["test_asarray", "imports", "test_asanyarray", "test_mean"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask.array as da\n\nfrom ..utils import assert_eq\n\nxr = pytest.importorskip(\"xarray\")\n\n\ndef test_mean():\n y = da.mean(xr.DataArray([1, 2, 3.0]))\n assert isinstance(y, da.Array)\n assert_eq(y, y)\n\n\ndef test_asarray():\n y = da.asarray(xr.DataArray([1, 2, 3.0]))\n assert isinstance(y, da.Array)\n assert_eq(y, y)\n\n\ndef test_asanyarray():\n y = da.asanyarray(xr.DataArray([1, 2, 3.0]))\n assert isinstance(y, da.Array)\n assert_eq(y, y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_xarray.py_test_asarray_xarray_intersphinx_workaround_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_xarray.py_test_asarray_xarray_intersphinx_workaround_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_xarray.py", "file_name": "test_xarray.py", "file_type": "text/x-python", "category": "test", "start_line": 28, "end_line": 39, "span_ids": ["test_asarray_xarray_intersphinx_workaround"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_asarray_xarray_intersphinx_workaround():\n # test that the intersphinx workaround in https://github.com/pydata/xarray/issues/4279 works\n module = xr.DataArray.__module__\n try:\n xr.DataArray.__module__ = \"xarray\"\n y = da.asarray(xr.DataArray([1, 2, 3.0]))\n assert isinstance(y, da.Array)\n assert type(y._meta).__name__ == \"ndarray\"\n assert_eq(y, y)\n finally:\n xr.DataArray.__module__ = module", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_from_functools_import_par___array_wrap__.return.x___array_wrap___numpy_uf": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ufunc.py_from_functools_import_par___array_wrap__.return.x___array_wrap___numpy_uf", "embedding": null, "metadata": {"file_path": "dask/array/ufunc.py", "file_name": "ufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["imports", "__array_wrap__"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from functools import partial\nfrom operator import getitem\n\nimport numpy as np\n\nfrom .. import core\nfrom ..base import is_dask_collection, normalize_function\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import (\n derived_from,\n funcname,\n is_dataframe_like,\n is_index_like,\n is_series_like,\n)\nfrom .core import Array, apply_infer_dtype, asarray, blockwise, elemwise\n\n\ndef __array_wrap__(numpy_ufunc, x, *args, **kwargs):\n return x.__array_wrap__(numpy_ufunc(x, *args, **kwargs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__check_chunks__check_chunks.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py__check_chunks__check_chunks.return.x", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 217, "end_line": 230, "span_ids": ["_check_chunks"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _check_chunks(x, scheduler=None):\n x = x.persist(scheduler=scheduler)\n for idx in itertools.product(*(range(len(c)) for c in x.chunks)):\n chunk = x.dask[(x.name,) + idx]\n if hasattr(chunk, \"result\"): # it's a future\n chunk = chunk.result()\n if not hasattr(chunk, \"dtype\"):\n chunk = np.array(chunk, dtype=\"O\")\n expected_shape = tuple(c[i] for c, i in zip(x.chunks, idx))\n assert_eq_shape(expected_shape, chunk.shape, check_nan=False)\n assert (\n chunk.dtype == x.dtype\n ), \"maybe you forgot to pass the scheduler to `assert_eq`?\"\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_safe_wraps__dtype_of.try_.except_AttributeError_.return.np_asanyarray_a_dtype": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_safe_wraps__dtype_of.try_.except_AttributeError_.return.np_asanyarray_a_dtype", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 362, "end_line": 380, "span_ids": ["safe_wraps", "_dtype_of"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def safe_wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS):\n \"\"\"Like functools.wraps, but safe to use even if wrapped is not a function.\n\n Only needed on Python 2.\n \"\"\"\n if all(hasattr(wrapped, attr) for attr in assigned):\n return functools.wraps(wrapped, assigned=assigned)\n else:\n return lambda x: x\n\n\ndef _dtype_of(a):\n \"\"\"Determine dtype of an array-like.\"\"\"\n try:\n # Check for the attribute before using asanyarray, because some types\n # (notably sparse arrays) don't work with it.\n return a.dtype\n except AttributeError:\n return np.asanyarray(a).dtype", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_arange_safe_arange_safe.if_like_is_None_.else_.try_.except_TypeError_.return.np_arange_args_kwargs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_arange_safe_arange_safe.if_like_is_None_.else_.try_.except_TypeError_.return.np_arange_args_kwargs", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 436, "end_line": 448, "span_ids": ["arange_safe"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def arange_safe(*args, like, **kwargs):\n \"\"\"\n Use the `like=` from `np.arange` to create a new array dispatching\n to the downstream library. If that fails, falls back to the\n default NumPy behavior, resulting in a `numpy.ndarray`.\n \"\"\"\n if like is None:\n return np.arange(*args, **kwargs)\n else:\n try:\n return np.arange(*args, like=meta_from_array(like), **kwargs)\n except TypeError:\n return np.arange(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_svd_flip_svd_flip.return.u_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_svd_flip_svd_flip.return.u_v", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 526, "end_line": 565, "span_ids": ["svd_flip"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def svd_flip(u, v, u_based_decision=False):\n \"\"\"Sign correction to ensure deterministic output from SVD.\n\n This function is useful for orienting eigenvectors such that\n they all lie in a shared but arbitrary half-space. This makes\n it possible to ensure that results are equivalent across SVD\n implementations and random number generator states.\n\n Parameters\n ----------\n\n u : (M, K) array_like\n Left singular vectors (in columns)\n v : (K, N) array_like\n Right singular vectors (in rows)\n u_based_decision: bool\n Whether or not to choose signs based\n on `u` rather than `v`, by default False\n\n Returns\n -------\n\n u : (M, K) array_like\n Left singular vectors with corrected sign\n v: (K, N) array_like\n Right singular vectors with corrected sign\n \"\"\"\n # Determine half-space in which all singular vectors\n # lie relative to an arbitrary vector; summation\n # equivalent to dot product with row vector of ones\n if u_based_decision:\n dtype = u.dtype\n signs = np.sum(u, axis=0, keepdims=True)\n else:\n dtype = v.dtype\n signs = np.sum(v, axis=1, keepdims=True).T\n signs = 2.0 * ((signs >= 0) - 0.5).astype(dtype)\n # Force all singular vectors into same half-space\n u, v = u * signs, v * signs.T\n return u, v", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_shape_as_first_arg_wrap_func_shape_as_first_arg.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap_func_shape_as_first_arg_wrap_func_shape_as_first_arg.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 78, "span_ids": ["wrap_func_shape_as_first_arg"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrap_func_shape_as_first_arg(func, *args, **kwargs):\n \"\"\"\n Transform np creation function into blocked version\n \"\"\"\n if \"shape\" not in kwargs:\n shape, args = args[0], args[1:]\n else:\n shape = kwargs.pop(\"shape\")\n\n if isinstance(shape, Array):\n raise TypeError(\n \"Dask array input not supported. \"\n \"Please use tuple, list, or a 1D numpy array instead.\"\n )\n\n parsed = _parse_wrap_args(func, args, kwargs, shape)\n shape = parsed[\"shape\"]\n dtype = parsed[\"dtype\"]\n chunks = parsed[\"chunks\"]\n name = parsed[\"name\"]\n kwargs = parsed[\"kwargs\"]\n func = partial(func, dtype=dtype, **kwargs)\n\n out_ind = dep_ind = tuple(range(len(shape)))\n graph = core_blockwise(\n func,\n name,\n out_ind,\n ArrayChunkShapeDep(chunks),\n dep_ind,\n numblocks={},\n )\n\n return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get(\"meta\", None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro_to_avro.files.open_files_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro_to_avro.files.open_files_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 171, "end_line": 260, "span_ids": ["to_avro"], "tokens": 764}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_avro(\n b,\n filename,\n schema,\n name_function=None,\n storage_options=None,\n codec=\"null\",\n sync_interval=16000,\n metadata=None,\n compute=True,\n **kwargs,\n):\n \"\"\"Write bag to set of avro files\n\n The schema is a complex dictionary describing the data, see\n https://avro.apache.org/docs/1.8.2/gettingstartedpython.html#Defining+a+schema\n and https://fastavro.readthedocs.io/en/latest/writer.html .\n It's structure is as follows::\n\n {'name': 'Test',\n 'namespace': 'Test',\n 'doc': 'Descriptive text',\n 'type': 'record',\n 'fields': [\n {'name': 'a', 'type': 'int'},\n ]}\n\n where the \"name\" field is required, but \"namespace\" and \"doc\" are optional\n descriptors; \"type\" must always be \"record\". The list of fields should\n have an entry for every key of the input records, and the types are\n like the primitive, complex or logical types of the Avro spec\n ( https://avro.apache.org/docs/1.8.2/spec.html ).\n\n Results in one avro file per input partition.\n\n Parameters\n ----------\n b: dask.bag.Bag\n filename: list of str or str\n Filenames to write to. If a list, number must match the number of\n partitions. If a string, must include a glob character \"*\", which will\n be expanded using name_function\n schema: dict\n Avro schema dictionary, see above\n name_function: None or callable\n Expands integers into strings, see\n ``dask.bytes.utils.build_name_function``\n storage_options: None or dict\n Extra key/value options to pass to the backend file-system\n codec: 'null', 'deflate', or 'snappy'\n Compression algorithm\n sync_interval: int\n Number of records to include in each block within a file\n metadata: None or dict\n Included in the file header\n compute: bool\n If True, files are written immediately, and function blocks. If False,\n returns delayed objects, which can be computed by the user where\n convenient.\n kwargs: passed to compute(), if compute=True\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence([{'name': 'Alice', 'value': 100},\n ... {'name': 'Bob', 'value': 200}])\n >>> schema = {'name': 'People', 'doc': \"Set of people's scores\",\n ... 'type': 'record',\n ... 'fields': [\n ... {'name': 'name', 'type': 'string'},\n ... {'name': 'value', 'type': 'int'}]}\n >>> b.to_avro('my-data.*.avro', schema) # doctest: +SKIP\n ['my-data.0.avro', 'my-data.1.avro']\n \"\"\"\n # TODO infer schema from first partition of data\n from dask.utils import import_required\n\n import_required(\n \"fastavro\", \"fastavro is a required dependency for using bag.to_avro().\"\n )\n _verify_schema(schema)\n\n storage_options = storage_options or {}\n files = open_files(\n filename,\n \"wb\",\n name_function=name_function,\n num=b.npartitions,\n **storage_options,\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro.name_to_avro.if_compute_.else_.return.out_to_delayed_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/avro.py_to_avro.name_to_avro.if_compute_.else_.return.out_to_delayed_", "embedding": null, "metadata": {"file_path": "dask/bag/avro.py", "file_name": "avro.py", "file_type": "text/x-python", "category": "implementation", "start_line": 261, "end_line": 280, "span_ids": ["to_avro"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_avro(\n b,\n filename,\n schema,\n name_function=None,\n storage_options=None,\n codec=\"null\",\n sync_interval=16000,\n metadata=None,\n compute=True,\n **kwargs,\n):\n # ... other code\n name = \"to-avro-\" + uuid.uuid4().hex\n dsk = {\n (name, i): (\n _write_avro_part,\n (b.name, i),\n f,\n schema,\n codec,\n sync_interval,\n metadata,\n )\n for i, f in enumerate(files)\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])\n out = type(b)(graph, name, b.npartitions)\n if compute:\n out.compute(**kwargs)\n return [f.path for f in files]\n else:\n return out.to_delayed()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_temporary_directory_test_temporary_directory.with_ProcessPoolExecutor_.with_dask_config_set_temp.assert_any_fn_endswith_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_temporary_directory_test_temporary_directory.with_ProcessPoolExecutor_.with_dask_config_set_temp.assert_any_fn_endswith_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1450, "end_line": 1459, "span_ids": ["test_temporary_directory"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_temporary_directory(tmpdir):\n b = db.range(10, npartitions=4)\n\n # We use a pool to avoid a race condition between the pool close\n # cleaning up files, and the assert below.\n with ProcessPoolExecutor(4) as pool:\n with dask.config.set(temporary_directory=str(tmpdir), pool=pool):\n b2 = b.groupby(lambda x: x % 2)\n b2.compute()\n assert any(fn.endswith(\".partd\") for fn in os.listdir(str(tmpdir)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_from_functools_import_par_fmt_bs_enc_path._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_from_functools_import_par_fmt_bs_enc_path._", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_text.py", "file_name": "test_text.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 42, "span_ids": ["imports"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from functools import partial\n\nimport pytest\nfrom fsspec.compression import compr\nfrom tlz import concat\n\nimport dask\nfrom dask import compute\nfrom dask.bag.text import read_text\nfrom dask.bytes import utils\nfrom dask.utils import filetexts\n\ncompute = partial(compute, scheduler=\"sync\")\n\n\nfiles = {\n \".test.accounts.1.json\": (\n '{\"amount\": 100, \"name\": \"Alice\"}\\n'\n '{\"amount\": 200, \"name\": \"Bob\"}\\n'\n '{\"amount\": 300, \"name\": \"Charlie\"}\\n'\n '{\"amount\": 400, \"name\": \"Dennis\"}\\n'\n ),\n \".test.accounts.2.json\": (\n '{\"amount\": 500, \"name\": \"Alice\"}\\n'\n '{\"amount\": 600, \"name\": \"Bob\"}\\n'\n '{\"amount\": 700, \"name\": \"Charlie\"}\\n'\n '{\"amount\": 800, \"name\": \"Dennis\"}\\n'\n ),\n}\n\n\nexpected = \"\".join([files[v] for v in sorted(files)])\n\nfmt_bs = [(fmt, None) for fmt in compr] + [(None, \"10 B\")] # type: ignore\n\nencodings = [\"ascii\", \"utf-8\"] # + ['utf-16', 'utf-16-le', 'utf-16-be']\nfmt_bs_enc_path = [\n (fmt, bs, encoding, include_path)\n for fmt, bs in fmt_bs\n for encoding in encodings\n for include_path in (True, False)\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_read_text_unicode_no_collection_test_files_per_partition.with_filetexts_files3_.with_dask_config_set_sc.assert_len_b_unique_paths": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_read_text_unicode_no_collection_test_files_per_partition.with_filetexts_files3_.with_dask_config_set_sc.assert_len_b_unique_paths", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_text.py", "file_name": "test_text.py", "file_type": "text/x-python", "category": "test", "start_line": 88, "end_line": 124, "span_ids": ["test_read_text_unicode_no_collection", "test_files_per_partition"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text_unicode_no_collection(tmp_path):\n data = b\"abcd\\xc3\\xa9\"\n fn = tmp_path / \"data.txt\"\n with open(fn, \"wb\") as f:\n f.write(b\"\\n\".join([data, data]))\n\n f = read_text(fn, collection=False)\n\n result = f[0].compute()\n assert len(result) == 2\n\n\ndef test_files_per_partition():\n files3 = {f\"{n:02}.txt\": \"line from {:02}\" for n in range(20)}\n with filetexts(files3):\n # single-threaded scheduler to ensure the warning happens in the\n # same thread as the pytest.warns\n with dask.config.set({\"scheduler\": \"single-threaded\"}):\n with pytest.warns(UserWarning):\n b = read_text(\"*.txt\", files_per_partition=10)\n l = len(b.take(100, npartitions=1))\n\n assert l == 10, \"10 files should be grouped into one partition\"\n\n assert b.count().compute() == 20, \"All 20 lines should be read\"\n\n with pytest.warns(UserWarning):\n b = read_text(\"*.txt\", files_per_partition=10, include_path=True)\n p = b.take(100, npartitions=1)\n\n p_paths = tuple(zip(*p))[1]\n p_unique_paths = set(p_paths)\n assert len(p_unique_paths) == 10\n\n b_paths = tuple(zip(*b.compute()))[1]\n b_unique_paths = set(b_paths)\n assert len(b_unique_paths) == 20", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_errors_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_text.py_test_errors_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_text.py", "file_name": "test_text.py", "file_type": "text/x-python", "category": "test", "start_line": 127, "end_line": 153, "span_ids": ["test_errors", "test_complex_delimiter"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_errors():\n with filetexts({\".test.foo\": b\"Jos\\xe9\\nAlice\"}, mode=\"b\"):\n with pytest.raises(UnicodeDecodeError):\n read_text(\".test.foo\", encoding=\"ascii\").compute()\n\n result = read_text(\".test.foo\", encoding=\"ascii\", errors=\"ignore\")\n result = result.compute(scheduler=\"sync\")\n assert result == [\"Jos\\n\", \"Alice\"]\n\n\ndef test_complex_delimiter():\n longstr = \"abc\\ndef\\n123\\n$$$$\\ndog\\ncat\\nfish\\n\\n\\r\\n$$$$hello\"\n with filetexts({\".test.delim.txt\": longstr}):\n assert read_text(\".test.delim.txt\", linedelimiter=\"$$$$\").count().compute() == 3\n assert (\n read_text(\".test.delim.txt\", linedelimiter=\"$$$$\", blocksize=2)\n .count()\n .compute()\n == 3\n )\n vals = read_text(\".test.delim.txt\", linedelimiter=\"$$$$\").compute()\n assert vals[-1] == \"hello\"\n assert vals[0].endswith(\"$$$$\")\n vals = read_text(\".test.delim.txt\", linedelimiter=\"$$$$\", blocksize=2).compute()\n assert vals[-1] == \"hello\"\n assert vals[0].endswith(\"$$$$\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_read_text.if_blocksize_is_None__read_text.return.blocks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_read_text.if_blocksize_is_None__read_text.return.blocks", "embedding": null, "metadata": {"file_path": "dask/bag/text.py", "file_name": "text.py", "file_type": "text/x-python", "category": "implementation", "start_line": 92, "end_line": 159, "span_ids": ["read_text"], "tokens": 536}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_text(\n urlpath,\n blocksize=None,\n compression=\"infer\",\n encoding=system_encoding,\n errors=\"strict\",\n linedelimiter=None,\n collection=True,\n storage_options=None,\n files_per_partition=None,\n include_path=False,\n):\n # ... other code\n\n if blocksize is None:\n if linedelimiter in [None, \"\", \"\\n\", \"\\r\", \"\\r\\n\"]:\n newline = linedelimiter\n linedelimiter = None\n else:\n newline = \"\"\n files = open_files(\n urlpath,\n mode=\"rt\",\n encoding=encoding,\n errors=errors,\n compression=compression,\n newline=newline,\n **(storage_options or {}),\n )\n if files_per_partition is None:\n blocks = [\n delayed(list)(\n delayed(\n partial(file_to_blocks, include_path, delimiter=linedelimiter)\n )(fil)\n )\n for fil in files\n ]\n else:\n blocks = []\n for start in range(0, len(files), files_per_partition):\n block_files = files[start : (start + files_per_partition)]\n block_lines = delayed(concat)(\n delayed(map)(\n partial(file_to_blocks, include_path, delimiter=linedelimiter),\n block_files,\n )\n )\n blocks.append(block_lines)\n else:\n # special case for linedelimiter=None: we will need to split on an actual bytestring\n # and the line reader will then use \"universal\" mode. Just as well that \\r\\n and \\n\n # will both work (thankfully \\r for MacOS is no longer a thing)\n o = read_bytes(\n urlpath,\n delimiter=linedelimiter.encode() if linedelimiter is not None else b\"\\n\",\n blocksize=blocksize,\n sample=False,\n compression=compression,\n include_path=include_path,\n **(storage_options or {}),\n )\n raw_blocks = o[1]\n blocks = [\n delayed(decode)(b, encoding, errors, linedelimiter)\n for b in concat(raw_blocks)\n ]\n if include_path:\n paths = list(\n concat([[path] * len(raw_blocks[i]) for i, path in enumerate(o[2])])\n )\n blocks = [\n delayed(attach_path)(entry, path) for entry, path in zip(blocks, paths)\n ]\n\n if not blocks:\n raise ValueError(\"No files found\", urlpath)\n\n if collection:\n blocks = from_delayed(blocks)\n\n return blocks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_file_to_blocks_attach_path.for_p_in_block_.yield_p_path_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_file_to_blocks_attach_path.for_p_in_block_.yield_p_path_", "embedding": null, "metadata": {"file_path": "dask/bag/text.py", "file_name": "text.py", "file_type": "text/x-python", "category": "implementation", "start_line": 162, "end_line": 181, "span_ids": ["attach_path", "file_to_blocks"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def file_to_blocks(include_path, lazy_file, delimiter=None):\n # blocksize is None branch\n with lazy_file as f:\n if delimiter is not None:\n text = f.read()\n if not text:\n return []\n parts = text.split(delimiter)\n yield from (\n (line, lazy_file.path) if include_path else line\n for line in [line + delimiter for line in parts[:-1]] + parts[-1:]\n )\n else:\n for line in f:\n yield (line, lazy_file.path) if include_path else line\n\n\ndef attach_path(block, path):\n for p in block:\n yield (p, path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_decode_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/text.py_decode_", "embedding": null, "metadata": {"file_path": "dask/bag/text.py", "file_name": "text.py", "file_type": "text/x-python", "category": "implementation", "start_line": 184, "end_line": 198, "span_ids": ["decode"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def decode(block, encoding, errors, line_delimiter):\n # blocksize is not None branch\n text = block.decode(encoding, errors)\n if line_delimiter in [None, \"\", \"\\n\", \"\\r\", \"\\r\\n\"]:\n lines = io.StringIO(text, newline=line_delimiter)\n return list(lines)\n else:\n if not text:\n return []\n parts = text.split(line_delimiter)\n out = [t + line_delimiter for t in parts[:-1]] + (\n parts[-1:] if not text.endswith(line_delimiter) else []\n )\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_pandas_register_pandas.normalize_series.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_pandas_register_pandas.normalize_series.return._", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 910, "end_line": 955, "span_ids": ["register_pandas"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@normalize_token.register_lazy(\"pandas\")\ndef register_pandas():\n import pandas as pd\n\n PANDAS_GT_130 = parse_version(pd.__version__) >= parse_version(\"1.3.0\")\n\n @normalize_token.register(pd.Index)\n def normalize_index(ind):\n values = ind.array\n return [ind.name, normalize_token(values)]\n\n @normalize_token.register(pd.MultiIndex)\n def normalize_index(ind):\n codes = ind.codes\n return (\n [ind.name]\n + [normalize_token(x) for x in ind.levels]\n + [normalize_token(x) for x in codes]\n )\n\n @normalize_token.register(pd.Categorical)\n def normalize_categorical(cat):\n return [normalize_token(cat.codes), normalize_token(cat.dtype)]\n\n @normalize_token.register(pd.arrays.PeriodArray)\n @normalize_token.register(pd.arrays.DatetimeArray)\n @normalize_token.register(pd.arrays.TimedeltaArray)\n def normalize_period_array(arr):\n return [normalize_token(arr.asi8), normalize_token(arr.dtype)]\n\n @normalize_token.register(pd.arrays.IntervalArray)\n def normalize_interval_array(arr):\n return [\n normalize_token(arr.left),\n normalize_token(arr.right),\n normalize_token(arr.closed),\n ]\n\n @normalize_token.register(pd.Series)\n def normalize_series(s):\n return [\n s.name,\n s.dtype,\n normalize_token(s._values),\n normalize_token(s.index),\n ]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_pandas.normalize_dataframe_register_pandas.normalize_period_dtype.return.normalize_token_dtype_nam": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_register_pandas.normalize_dataframe_register_pandas.normalize_period_dtype.return.normalize_token_dtype_nam", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 944, "end_line": 971, "span_ids": ["register_pandas"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@normalize_token.register_lazy(\"pandas\")\ndef register_pandas():\n # ... other code\n\n @normalize_token.register(pd.DataFrame)\n def normalize_dataframe(df):\n mgr = df._data\n\n if PANDAS_GT_130:\n # for compat with ArrayManager, pandas 1.3.0 introduced a `.arrays`\n # attribute that returns the column arrays/block arrays for both\n # BlockManager and ArrayManager\n data = list(mgr.arrays)\n else:\n data = [block.values for block in mgr.blocks]\n data.extend([df.columns, df.index])\n return list(map(normalize_token, data))\n\n @normalize_token.register(pd.api.extensions.ExtensionArray)\n def normalize_extension_array(arr):\n import numpy as np\n\n return normalize_token(np.asarray(arr))\n\n # Dtypes\n @normalize_token.register(pd.api.types.CategoricalDtype)\n def normalize_categorical_dtype(dtype):\n return [normalize_token(dtype.categories), normalize_token(dtype.ordered)]\n\n @normalize_token.register(pd.api.extensions.ExtensionDtype)\n def normalize_period_dtype(dtype):\n return normalize_token(dtype.name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDep_BlockwiseDep.get.try_.except_KeyError_.return.default": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDep_BlockwiseDep.get.try_.except_KeyError_.return.default", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 27, "end_line": 63, "span_ids": ["BlockwiseDep.__getitem__", "BlockwiseDep.get", "BlockwiseDep"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockwiseDep:\n \"\"\"Blockwise-IO argument\n\n This is the base class for indexable Blockwise-IO arguments.\n When constructing a ``Blockwise`` Layer, one or more of the\n collection tuples passed in with ``indices`` may contain a\n ``BlockwiseDep`` instance (in place of a \"real\" collection name).\n This allows a new collection to be created (via IO) within a\n ``Blockwise`` layer.\n\n All ``BlockwiseDep`` instances must define a ``numblocks``\n attribute to speficy the number of blocks/partitions the\n object can support along each dimension. The object should\n also define a ``produces_tasks`` attribute to specify if\n any nested tasks will be passed to the Blockwise function.\n\n See Also\n --------\n dask.blockwise.Blockwise\n dask.blockwise.BlockwiseDepDict\n \"\"\"\n\n numblocks: tuple[int, ...]\n produces_tasks: bool\n\n def __getitem__(self, idx: tuple[int, ...]) -> Any:\n \"\"\"Return Blockwise-function arguments for a specific index\"\"\"\n raise NotImplementedError(\n \"Must define `__getitem__` for `BlockwiseDep` subclass.\"\n )\n\n def get(self, idx: tuple[int, ...], default) -> Any:\n \"\"\"BlockwiseDep ``__getitem__`` Wrapper\"\"\"\n try:\n return self.__getitem__(idx)\n except KeyError:\n return default", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDep.__dask_distributed_pack___BlockwiseDep.__dask_distributed_pack__.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDep.__dask_distributed_pack___BlockwiseDep.__dask_distributed_pack__.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 65, "end_line": 76, "span_ids": ["BlockwiseDep.__dask_distributed_pack__"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockwiseDep:\n\n def __dask_distributed_pack__(\n self, required_indices: list[tuple[int, ...]] | None = None\n ):\n \"\"\"Client-side serialization for ``BlockwiseDep`` objects.\n\n Should return a ``state`` dictionary, with msgpack-serializable\n values, that can be used to initialize a new ``BlockwiseDep`` object\n on a scheduler process.\n \"\"\"\n raise NotImplementedError(\n \"Must define `__dask_distributed_pack__` for `BlockwiseDep` subclass.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDep.__dask_distributed_unpack___BlockwiseDep.__repr__.return.f_type_self___name___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDep.__dask_distributed_unpack___BlockwiseDep.__repr__.return.f_type_self___name___", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 85, "end_line": 97, "span_ids": ["BlockwiseDep.__dask_distributed_unpack__", "BlockwiseDep.__repr__"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockwiseDep:\n\n @classmethod\n def __dask_distributed_unpack__(cls, state):\n \"\"\"Scheduler-side deserialization for ``BlockwiseDep`` objects.\n\n Should use an input ``state`` dictionary to initialize a new\n ``BlockwiseDep`` object.\n \"\"\"\n raise NotImplementedError(\n \"Must define `__dask_distributed_unpack__` for `BlockwiseDep` subclass.\"\n )\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__} {self.numblocks}>\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDepDict_BlockwiseDepDict.__dask_distributed_unpack__.return.cls_state_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockwiseDepDict_BlockwiseDepDict.__dask_distributed_unpack__.return.cls_state_", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 93, "end_line": 180, "span_ids": ["BlockwiseDepDict.__dask_distributed_pack__", "BlockwiseDepDict.__dask_distributed_unpack__", "BlockwiseDepDict", "BlockwiseDepDict.__getitem__", "BlockwiseDepDict.__init__"], "tokens": 634}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockwiseDepDict(BlockwiseDep):\n \"\"\"Dictionary-based Blockwise-IO argument\n\n This is a dictionary-backed instance of ``BlockwiseDep``.\n The purpose of this class is to simplify the construction\n of IO-based Blockwise Layers with block/partition-dependent\n function arguments that are difficult to calculate at\n graph-materialization time.\n\n Examples\n --------\n\n Specify an IO-based function for the Blockwise Layer. Note\n that the function will be passed a single input object when\n the task is executed (e.g. a single ``tuple`` or ``dict``):\n\n >>> import pandas as pd\n >>> func = lambda x: pd.read_csv(**x)\n\n Use ``BlockwiseDepDict`` to define the input argument to\n ``func`` for each block/partition:\n\n >>> dep = BlockwiseDepDict(\n ... mapping={\n ... (0,) : {\n ... \"filepath_or_buffer\": \"data.csv\",\n ... \"skiprows\": 1,\n ... \"nrows\": 2,\n ... \"names\": [\"a\", \"b\"],\n ... },\n ... (1,) : {\n ... \"filepath_or_buffer\": \"data.csv\",\n ... \"skiprows\": 3,\n ... \"nrows\": 2,\n ... \"names\": [\"a\", \"b\"],\n ... },\n ... }\n ... )\n\n Construct a Blockwise Layer with ``dep`` speficied\n in the ``indices`` list:\n\n >>> layer = Blockwise(\n ... output=\"collection-name\",\n ... output_indices=\"i\",\n ... dsk={\"collection-name\": (func, '_0')},\n ... indices=[(dep, \"i\")],\n ... numblocks={},\n ... )\n\n See Also\n --------\n dask.blockwise.Blockwise\n dask.blockwise.BlockwiseDep\n \"\"\"\n\n def __init__(\n self,\n mapping: dict,\n numblocks: tuple[int, ...] | None = None,\n produces_tasks: bool = False,\n ):\n self.mapping = mapping\n self.produces_tasks = produces_tasks\n\n # By default, assume 1D shape\n self.numblocks = numblocks or (len(mapping),)\n\n def __getitem__(self, idx: tuple[int, ...]) -> Any:\n return self.mapping[idx]\n\n def __dask_distributed_pack__(\n self, required_indices: list[tuple[int, ...]] | None = None\n ):\n from distributed.protocol import to_serialize\n\n if required_indices is None:\n required_indices = self.mapping.keys()\n\n return {\n \"mapping\": {k: to_serialize(self.mapping[k]) for k in required_indices},\n \"numblocks\": self.numblocks,\n \"produces_tasks\": self.produces_tasks,\n }\n\n @classmethod\n def __dask_distributed_unpack__(cls, state):\n return cls(**state)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_subs_blockwise_token.return.prefix_d_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_subs_blockwise_token.return.prefix_d_i", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 191, "end_line": 219, "span_ids": ["index_subs", "subs", "impl", "blockwise_token"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def subs(task, substitution):\n \"\"\"Create a new task with the values substituted\n\n This is like dask.core.subs, but takes a dict of many substitutions to\n perform simultaneously. It is not as concerned with micro performance.\n \"\"\"\n if isinstance(task, dict):\n return {k: subs(v, substitution) for k, v in task.items()}\n if type(task) in (tuple, list, set):\n return type(task)([subs(x, substitution) for x in task])\n try:\n return substitution[task]\n except (KeyError, TypeError):\n return task\n\n\ndef index_subs(ind, substitution):\n \"\"\"A simple subs function that works both on tuples and strings\"\"\"\n if ind is None:\n return ind\n else:\n return tuple(substitution.get(c, c) for c in ind)\n\n\n_BLOCKWISE_DEFAULT_PREFIX = \"__dask_blockwise__\"\n\n\ndef blockwise_token(i, prefix=_BLOCKWISE_DEFAULT_PREFIX):\n return prefix + \"%d\" % i", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise_Blockwise.output_blocks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise_Blockwise.output_blocks", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 333, "end_line": 394, "span_ids": ["Blockwise"], "tokens": 610}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n \"\"\"Tensor Operation\n\n This is a lazily constructed mapping for tensor operation graphs.\n This defines a dictionary using an operation and an indexing pattern.\n It is built for many operations like elementwise, transpose, tensordot, and\n so on. We choose to keep these as symbolic mappings rather than raw\n dictionaries because we are able to fuse them during optimization,\n sometimes resulting in much lower overhead.\n\n Parameters\n ----------\n output: str\n The name of the output collection. Used in keynames\n output_indices: tuple\n The output indices, like ``('i', 'j', 'k')`` used to determine the\n structure of the block computations\n dsk: dict\n A small graph to apply per-output-block. May include keys from the\n input indices.\n indices: tuple[tuple[str, tuple[str, ...] | None], ...]\n An ordered mapping from input key name, like ``'x'``\n to input indices, like ``('i', 'j')``\n Or includes literals, which have ``None`` for an index value.\n In place of input-key names, the first tuple element may also be a\n ``BlockwiseDep`` object.\n numblocks: Mapping[key, Sequence[int]]\n Number of blocks along each dimension for each input\n concatenate: bool\n Whether or not to pass contracted dimensions as a list of inputs or a\n single input to the block function\n new_axes: Mapping\n New index dimensions that may have been created and their size,\n e.g. ``{'j': 2, 'k': 3}``\n output_blocks: set[tuple[int, ...]]\n Specify a specific set of required output blocks. Since the graph\n will only contain the necessary tasks to generate these outputs,\n this kwarg can be used to \"cull\" the abstract layer (without needing\n to materialize the low-level graph).\n annotations: dict (optional)\n Layer annotations\n io_deps: dict[str, BlockwiseDep] (optional)\n Dictionary containing the mapping between \"place-holder\" collection\n keys and ``BlockwiseDep``-based objects.\n **WARNING**: This argument should only be used internally (for culling,\n fusion and cloning of existing Blockwise layers). Explicit use of this\n argument will be deprecated in the future.\n\n See Also\n --------\n dask.blockwise.blockwise\n dask.array.blockwise\n \"\"\"\n\n output: str\n output_indices: tuple[str, ...]\n dsk: Mapping[str, tuple]\n indices: tuple[tuple[str, tuple[str, ...] | None], ...]\n numblocks: Mapping[str, Sequence[int]]\n concatenate: bool | None\n new_axes: Mapping[str, int]\n output_blocks: set[tuple[int, ...]] | None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.clone_Blockwise.clone.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.clone_Blockwise.clone.return._", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 746, "end_line": 804, "span_ids": ["Blockwise.clone"], "tokens": 485}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n def clone(\n self,\n keys: set,\n seed: Hashable,\n bind_to: Hashable = None,\n ) -> tuple[Layer, bool]:\n names = {get_name_from_key(k) for k in keys}\n # We assume that 'keys' will contain either all or none of the output keys of\n # each of the layers, because clone/bind are always invoked at collection level.\n # Asserting this is very expensive, so we only check it during unit tests.\n if \"PYTEST_CURRENT_TEST\" in os.environ:\n assert not self.get_output_keys() - keys\n for name, nb in self.numblocks.items():\n if name in names:\n for block in product(*(list(range(nbi)) for nbi in nb)):\n assert (name, *block) in keys\n\n is_leaf = True\n\n indices = []\n for k, idxv in self.indices:\n if k in names:\n is_leaf = False\n k = clone_key(k, seed)\n indices.append((k, idxv))\n\n numblocks = {}\n for k, nbv in self.numblocks.items():\n if k in names:\n is_leaf = False\n k = clone_key(k, seed)\n numblocks[k] = nbv\n\n dsk = {clone_key(k, seed): v for k, v in self.dsk.items()}\n\n if bind_to is not None and is_leaf:\n from .graph_manipulation import chunks\n\n # It's always a Delayed generated by dask.graph_manipulation.checkpoint;\n # the layer name always matches the key\n assert isinstance(bind_to, str)\n dsk = {k: (chunks.bind, v, f\"_{len(indices)}\") for k, v in dsk.items()}\n indices.append((bind_to, None))\n\n return (\n Blockwise(\n output=clone_key(self.output, seed),\n output_indices=self.output_indices,\n dsk=dsk,\n indices=indices,\n numblocks=numblocks,\n concatenate=self.concatenate,\n new_axes=self.new_axes,\n output_blocks=self.output_blocks,\n annotations=self.annotations,\n io_deps=self.io_deps,\n ),\n (bind_to is not None and is_leaf),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.if_numblocks_is_None__make_blockwise_graph._Create_argument_lists": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.if_numblocks_is_None__make_blockwise_graph._Create_argument_lists", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 984, "end_line": 1029, "span_ids": ["make_blockwise_graph"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_blockwise_graph(\n func,\n output,\n out_indices,\n *arrind_pairs,\n numblocks=None,\n concatenate=None,\n new_axes=None,\n output_blocks=None,\n dims=None,\n deserializing=False,\n func_future_args=None,\n return_key_deps=False,\n io_deps=None,\n **kwargs,\n):\n\n if numblocks is None:\n raise ValueError(\"Missing required numblocks argument.\")\n new_axes = new_axes or {}\n io_deps = io_deps or {}\n argpairs = list(toolz.partition(2, arrind_pairs))\n\n if return_key_deps:\n key_deps = {}\n\n if deserializing:\n from distributed.protocol.serialize import to_serialize\n\n if concatenate is True:\n from dask.array.core import concatenate_axes as concatenate\n\n # Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions\n dims = dims or _make_dims(argpairs, numblocks, new_axes)\n\n # Generate the abstract \"plan\" before constructing\n # the actual graph\n (coord_maps, concat_axes, dummies) = _get_coord_mapping(\n dims,\n output,\n out_indices,\n numblocks,\n argpairs,\n concatenate,\n )\n\n # Unpack delayed objects in kwargs\n dsk2 = {}\n if kwargs:\n task, dsk2 = unpack_collections(kwargs)\n if dsk2:\n kwargs2 = task\n else:\n kwargs2 = kwargs\n\n # Apply Culling.\n # Only need to construct the specified set of output blocks\n output_blocks = output_blocks or itertools.product(\n *[range(dims[i]) for i in out_indices]\n )\n\n dsk = {}\n # Create argument lists\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.for_out_coords_in_output__make_blockwise_graph.None_7.else_.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_make_blockwise_graph.for_out_coords_in_output__make_blockwise_graph.None_7.else_.return.dsk", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1030, "end_line": 1101, "span_ids": ["make_blockwise_graph"], "tokens": 622}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_blockwise_graph(\n func,\n output,\n out_indices,\n *arrind_pairs,\n numblocks=None,\n concatenate=None,\n new_axes=None,\n output_blocks=None,\n dims=None,\n deserializing=False,\n func_future_args=None,\n return_key_deps=False,\n io_deps=None,\n **kwargs,\n):\n # ... other code\n for out_coords in output_blocks:\n deps = set()\n coords = out_coords + dummies\n args = []\n for cmap, axes, (arg, ind) in zip(coord_maps, concat_axes, argpairs):\n if ind is None:\n if deserializing:\n args.append(stringify_collection_keys(arg))\n else:\n args.append(arg)\n else:\n arg_coords = tuple(coords[c] for c in cmap)\n if axes:\n tups = lol_product((arg,), arg_coords)\n if arg not in io_deps:\n deps.update(flatten(tups))\n\n if concatenate:\n tups = (concatenate, tups, axes)\n else:\n tups = (arg,) + arg_coords\n if arg not in io_deps:\n deps.add(tups)\n # Replace \"place-holder\" IO keys with \"real\" args\n if arg in io_deps:\n # We don't want to stringify keys for args\n # we are replacing here\n idx = tups[1:]\n args.append(io_deps[arg].get(idx, idx))\n elif deserializing:\n args.append(stringify_collection_keys(tups))\n else:\n args.append(tups)\n out_key = (output,) + out_coords\n\n if deserializing:\n deps.update(func_future_args)\n args += list(func_future_args)\n\n if deserializing and isinstance(func, bytes):\n # Construct a function/args/kwargs dict if we\n # do not have a nested task (i.e. concatenate=False).\n # TODO: Avoid using the iterate_collection-version\n # of to_serialize if we know that are no embeded\n # Serialized/Serialize objects in args and/or kwargs.\n if kwargs:\n dsk[out_key] = {\n \"function\": func,\n \"args\": to_serialize(args),\n \"kwargs\": to_serialize(kwargs2),\n }\n else:\n dsk[out_key] = {\"function\": func, \"args\": to_serialize(args)}\n else:\n if kwargs:\n val = (apply, func, args, kwargs2)\n else:\n args.insert(0, func)\n val = tuple(args)\n # May still need to serialize (if concatenate=True)\n dsk[out_key] = to_serialize(val) if deserializing else val\n\n if return_key_deps:\n key_deps[out_key] = deps\n\n if dsk2:\n dsk.update(ensure_dict(dsk2))\n\n if return_key_deps:\n return dsk, key_deps\n else:\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_copy_read_bytes.if_blocksize_is_not_None_.blocksize.int_blocksize_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/core.py_copy_read_bytes.if_blocksize_is_not_None_.blocksize.int_blocksize_", "embedding": null, "metadata": {"file_path": "dask/bytes/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 91, "span_ids": ["read_bytes", "imports"], "tokens": 820}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import copy\nimport os\n\nfrom fsspec.core import OpenFile, get_fs_token_paths\nfrom fsspec.utils import infer_compression, read_block\n\nfrom ..base import tokenize\nfrom ..delayed import delayed\nfrom ..utils import is_integer, parse_bytes\n\n\ndef read_bytes(\n urlpath,\n delimiter=None,\n not_zero=False,\n blocksize=\"128 MiB\",\n sample=\"10 kiB\",\n compression=None,\n include_path=False,\n **kwargs,\n):\n \"\"\"Given a path or paths, return delayed objects that read from those paths.\n\n The path may be a filename like ``'2015-01-01.csv'`` or a globstring\n like ``'2015-*-*.csv'``.\n\n The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if\n those libraries are installed.\n\n This cleanly breaks data by a delimiter if given, so that block boundaries\n start directly after a delimiter and end on the delimiter.\n\n Parameters\n ----------\n urlpath : string or list\n Absolute or relative filepath(s). Prefix with a protocol like ``s3://``\n to read from alternative filesystems. To read from multiple files you\n can pass a globstring or a list of paths, with the caveat that they\n must all have the same protocol.\n delimiter : bytes\n An optional delimiter, like ``b'\\\\n'`` on which to split blocks of\n bytes.\n not_zero : bool\n Force seek of start-of-file delimiter, discarding header.\n blocksize : int, str\n Chunk size in bytes, defaults to \"128 MiB\"\n compression : string or None\n String like 'gzip' or 'xz'. Must support efficient random access.\n sample : int, string, or boolean\n Whether or not to return a header sample.\n Values can be ``False`` for \"no sample requested\"\n Or an integer or string value like ``2**20`` or ``\"1 MiB\"``\n include_path : bool\n Whether or not to include the path with the bytes representing a particular file.\n Default is False.\n **kwargs : dict\n Extra options that make sense to a particular storage connection, e.g.\n host, port, username, password, etc.\n\n Examples\n --------\n >>> sample, blocks = read_bytes('2015-*-*.csv', delimiter=b'\\\\n') # doctest: +SKIP\n >>> sample, blocks = read_bytes('s3://bucket/2015-*-*.csv', delimiter=b'\\\\n') # doctest: +SKIP\n >>> sample, paths, blocks = read_bytes('2015-*-*.csv', include_path=True) # doctest: +SKIP\n\n Returns\n -------\n sample : bytes\n The sample header\n blocks : list of lists of ``dask.Delayed``\n Each list corresponds to a file, and each delayed object computes to a\n block of bytes from that file.\n paths : list of strings, only included if include_path is True\n List of same length as blocks, where each item is the path to the file\n represented in the corresponding block.\n\n \"\"\"\n if not isinstance(urlpath, (str, list, tuple, os.PathLike)):\n raise TypeError(\"Path should be a string, os.PathLike, list or tuple\")\n\n fs, fs_token, paths = get_fs_token_paths(urlpath, mode=\"rb\", storage_options=kwargs)\n\n if len(paths) == 0:\n raise OSError(\"%s resolved to no files\" % urlpath)\n\n if blocksize is not None:\n if isinstance(blocksize, str):\n blocksize = parse_bytes(blocksize)\n if not is_integer(blocksize):\n raise TypeError(\"blocksize must be an integer\")\n blocksize = int(blocksize)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_test_read_text.with_ProcessPoolExecutor_.assert_result_a_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_hdfs.py_test_read_text_test_read_text.with_ProcessPoolExecutor_.assert_result_a_b", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_hdfs.py", "file_name": "test_hdfs.py", "file_type": "text/x-python", "category": "test", "start_line": 153, "end_line": 179, "span_ids": ["test_read_text"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_text(hdfs):\n import multiprocessing as mp\n from concurrent.futures import ProcessPoolExecutor\n\n ctx = mp.get_context(\"spawn\")\n\n with ProcessPoolExecutor(2, ctx) as pool:\n with hdfs.open(\"%s/text.1.txt\" % basedir, \"wb\") as f:\n f.write(b\"Alice 100\\nBob 200\\nCharlie 300\")\n\n with hdfs.open(\"%s/text.2.txt\" % basedir, \"wb\") as f:\n f.write(b\"Dan 400\\nEdith 500\\nFrank 600\")\n\n with hdfs.open(\"%s/other.txt\" % basedir, \"wb\") as f:\n f.write(b\"a b\\nc d\")\n\n b = db.read_text(\"hdfs://%s/text.*.txt\" % basedir)\n with dask.config.set(pool=pool):\n result = b.str.strip().str.split().map(len).compute()\n\n assert result == [2, 2, 2, 2, 2, 2]\n\n b = db.read_text(\"hdfs://%s/other.txt\" % basedir)\n with dask.config.set(pool=pool):\n result = b.str.split().flatten().compute()\n\n assert result == [\"a\", \"b\", \"c\", \"d\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_bag_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_bag_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 174, "end_line": 199, "span_ids": ["test_bag", "test_read_csv"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.flaky(\n reruns=10, reruns_delay=5, reason=\"https://github.com/dask/dask/issues/3696\"\n)\n@pytest.mark.network\ndef test_bag():\n # This test pulls from different hosts\n urls = [\n \"https://raw.githubusercontent.com/weierophinney/pastebin/\"\n \"master/public/js-src/dojox/data/tests/stores/patterns.csv\",\n \"https://en.wikipedia.org\",\n ]\n b = db.read_text(urls)\n assert b.npartitions == 2\n b.compute()\n\n\n@pytest.mark.network\ndef test_read_csv():\n dd = pytest.importorskip(\"dask.dataframe\")\n url = (\n \"https://raw.githubusercontent.com/weierophinney/pastebin/\"\n \"master/public/js-src/dojox/data/tests/stores/patterns.csv\"\n )\n b = dd.read_csv(url)\n b.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_parse_sample_bytes_test_with_urls.with_filetexts_files_mod.assert_sum_map_len_value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_parse_sample_bytes_test_with_urls.with_filetexts_files_mod.assert_sum_map_len_value", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 135, "span_ids": ["test_read_bytes_blocksize_float_errs", "test_with_urls", "test_read_bytes_blocksize_none", "test_read_bytes_include_path", "test_read_bytes_blocksize_types", "test_parse_sample_bytes", "test_read_bytes_no_sample"], "tokens": 384}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parse_sample_bytes():\n with filetexts(files, mode=\"b\"):\n sample, values = read_bytes(\".test.accounts.*\", sample=\"40 B\")\n assert len(sample) == 40\n\n\ndef test_read_bytes_no_sample():\n with filetexts(files, mode=\"b\"):\n sample, _ = read_bytes(\".test.accounts.1.json\", sample=False)\n assert sample is False\n\n\ndef test_read_bytes_blocksize_none():\n with filetexts(files, mode=\"b\"):\n sample, values = read_bytes(\".test.accounts.*\", blocksize=None)\n assert sum(map(len, values)) == len(files)\n\n\n@pytest.mark.parametrize(\"blocksize\", [5.0, \"5 B\"])\ndef test_read_bytes_blocksize_types(blocksize):\n with filetexts(files, mode=\"b\"):\n sample, vals = read_bytes(\".test.account*\", blocksize=blocksize)\n results = compute(*concat(vals))\n ourlines = b\"\".join(results).split(b\"\\n\")\n testlines = b\"\".join(files.values()).split(b\"\\n\")\n assert set(ourlines) == set(testlines)\n\n\ndef test_read_bytes_blocksize_float_errs():\n with filetexts(files, mode=\"b\"):\n with pytest.raises(TypeError):\n read_bytes(\".test.account*\", blocksize=5.5)\n\n\ndef test_read_bytes_include_path():\n with filetexts(files, mode=\"b\"):\n _, _, paths = read_bytes(\".test.accounts.*\", include_path=True)\n assert {os.path.split(path)[1] for path in paths} == files.keys()\n\n\ndef test_with_urls():\n with filetexts(files, mode=\"b\"):\n # OS-independent file:// URI with glob *\n url = to_uri(\".test.accounts.\") + \"*\"\n sample, values = read_bytes(url, blocksize=None)\n assert sum(map(len, values)) == len(files)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/utils.py_bz2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/utils.py_bz2_", "embedding": null, "metadata": {"file_path": "dask/bytes/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["imports", "zip_compress", "impl"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import bz2\nimport gzip\nimport io\nimport lzma\nimport zipfile\n\n\ndef zip_compress(data):\n \"\"\"Write data into zipfile and return the bytes\"\"\"\n out = io.BytesIO()\n with zipfile.ZipFile(file=out, mode=\"w\") as z:\n with z.open(\"myfile\", \"w\") as zf:\n zf.write(data)\n out.seek(0)\n return out.read()\n\n\ncompress = {\n \"gzip\": gzip.compress,\n \"bz2\": bz2.compress,\n None: lambda x: x,\n \"xz\": lzma.compress,\n \"zip\": zip_compress,\n}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_sys_Cache._pretask.self_starttimes_key_de": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/cache.py_sys_Cache._pretask.self_starttimes_key_de", "embedding": null, "metadata": {"file_path": "dask/cache.py", "file_name": "cache.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 53, "span_ids": ["Cache._pretask", "Cache", "imports", "Cache._start", "Cache.__init__"], "tokens": 357}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\nfrom numbers import Number\nfrom timeit import default_timer\n\nfrom .callbacks import Callback\n\noverhead = sys.getsizeof(1.23) * 4 + sys.getsizeof(()) * 4\n\n\nclass Cache(Callback):\n \"\"\"Use cache for computation\n\n Examples\n --------\n\n >>> cache = Cache(1e9) # doctest: +SKIP\n\n The cache can be used locally as a context manager around ``compute`` or\n ``get`` calls:\n\n >>> with cache: # doctest: +SKIP\n ... result = x.compute()\n\n You can also register a cache globally, so that it works for all\n computations:\n\n >>> cache.register() # doctest: +SKIP\n >>> cache.unregister() # doctest: +SKIP\n \"\"\"\n\n def __init__(self, cache, *args, **kwargs):\n try:\n import cachey\n except ImportError as ex:\n raise ImportError(\n 'Cache requires cachey, \"{ex}\" problem ' \"importing\".format(ex=str(ex))\n ) from ex\n self._nbytes = cachey.nbytes\n if isinstance(cache, Number):\n cache = cachey.Cache(cache, *args, **kwargs)\n else:\n assert not args and not kwargs\n self.cache = cache\n self.starttimes = dict()\n\n def _start(self, dsk):\n self.durations = dict()\n overlap = set(dsk) & set(self.cache.data)\n for key in overlap:\n dsk[key] = self.cache.data[key]\n\n def _pretask(self, key, dsk, state):\n self.starttimes[key] = default_timer()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.product__Frame.min.return.self__reduction_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.product__Frame.min.return.self__reduction_agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1851, "end_line": 1869, "span_ids": ["_Frame:11", "_Frame.min", "_Frame.max"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n product = prod # aliased dd.product\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def max(\n self, axis=None, skipna=True, split_every=False, out=None, numeric_only=None\n ):\n return self._reduction_agg(\n \"max\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def min(\n self, axis=None, skipna=True, split_every=False, out=None, numeric_only=None\n ):\n return self._reduction_agg(\n \"min\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.kurtosis__Frame.kurtosis.if_axis_1_.else_.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.kurtosis__Frame.kurtosis.if_axis_1_.else_.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2306, "end_line": 2356, "span_ids": ["_Frame.kurtosis"], "tokens": 360}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def kurtosis(\n self,\n axis=None,\n fisher=True,\n bias=True,\n nan_policy=\"propagate\",\n out=None,\n numeric_only=None,\n ):\n \"\"\"\n .. note::\n\n This implementation follows the dask.array.stats implementation\n of kurtosis and calculates kurtosis without taking into account\n a bias term for finite sample size, which corresponds to the\n default settings of the scipy.stats kurtosis calculation. This differs\n from pandas.\n\n Further, this method currently does not support filtering out NaN\n values, which is again a difference to Pandas.\n \"\"\"\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"kurtosis\")\n meta = self._meta_nonempty.kurtosis()\n if axis == 1:\n result = map_partitions(\n M.kurtosis,\n self,\n meta=meta,\n token=self._token_prefix + \"kurtosis\",\n axis=axis,\n enforce_metadata=False,\n )\n return handle_out(out, result)\n else:\n if self.ndim == 1:\n result = self._kurtosis_1d(\n self, fisher=fisher, bias=bias, nan_policy=nan_policy\n )\n return handle_out(out, result)\n else:\n result = self._kurtosis_numeric(\n fisher=fisher, bias=bias, nan_policy=nan_policy\n )\n\n if isinstance(self, DataFrame):\n result.divisions = (self.columns.min(), self.columns.max())\n\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._kurtosis_1d__Frame._kurtosis_1d.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._kurtosis_1d__Frame._kurtosis_1d.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2211, "end_line": 2240, "span_ids": ["_Frame._kurtosis_1d"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _kurtosis_1d(self, column, fisher=True, bias=True, nan_policy=\"propagate\"):\n \"\"\"1D version of the kurtosis calculation.\n\n Uses the array version from da.stats in case we are passing in a single series\n \"\"\"\n # import depends on scipy, not installed by default\n from ..array import stats as da_stats\n\n if pd.api.types.is_integer_dtype(column._meta_nonempty):\n column = column.astype(\"f8\")\n\n if not np.issubdtype(column.dtype, np.number):\n column = column.astype(\"f8\")\n\n name = self._token_prefix + \"kurtosis-1d-\" + tokenize(column)\n\n array_kurtosis = da_stats.kurtosis(\n column.values, axis=0, fisher=fisher, bias=bias, nan_policy=nan_policy\n )\n\n layer = {\n (name, 0): (methods.wrap_kurtosis_reduction, (array_kurtosis._name,), None)\n }\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[array_kurtosis]\n )\n\n return new_dd_object(\n graph, name, column._meta_nonempty.kurtosis(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._kurtosis_numeric__Frame._kurtosis_numeric.return.new_dd_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._kurtosis_numeric__Frame._kurtosis_numeric.return.new_dd_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2242, "end_line": 2277, "span_ids": ["_Frame._kurtosis_numeric"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _kurtosis_numeric(self, fisher=True, bias=True, nan_policy=\"propagate\"):\n \"\"\"Method for dataframes with numeric columns.\n\n Maps the array version from da.stats onto the numeric array of columns.\n \"\"\"\n # import depends on scipy, not installed by default\n from ..array import stats as da_stats\n\n num = self.select_dtypes(include=[\"number\", \"bool\"], exclude=[np.timedelta64])\n\n values_dtype = num.values.dtype\n array_values = num.values\n\n if not np.issubdtype(values_dtype, np.number):\n array_values = num.values.astype(\"f8\")\n\n array_kurtosis = da_stats.kurtosis(\n array_values, axis=0, fisher=fisher, bias=bias, nan_policy=nan_policy\n )\n\n name = self._token_prefix + \"kurtosis-numeric\" + tokenize(num)\n cols = num._meta.columns if is_dataframe_like(num) else None\n\n kurtosis_shape = num._meta_nonempty.values.var(axis=0).shape\n array_kurtosis_name = (array_kurtosis._name,) + (0,) * len(kurtosis_shape)\n\n layer = {\n (name, 0): (methods.wrap_kurtosis_reduction, array_kurtosis_name, cols)\n }\n graph = HighLevelGraph.from_collections(\n name, layer, dependencies=[array_kurtosis]\n )\n\n return new_dd_object(\n graph, name, num._meta_nonempty.kurtosis(), divisions=[None, None]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.astype__Frame.astype.return.self_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.astype__Frame.astype.return.self_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2662, "end_line": 2683, "span_ids": ["_Frame.astype"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def astype(self, dtype):\n # XXX: Pandas will segfault for empty dataframes when setting\n # categorical dtypes. This operation isn't allowed currently anyway. We\n # get the metadata with a non-empty frame to throw the error instead of\n # segfaulting.\n if is_dataframe_like(self._meta) and is_categorical_dtype(dtype):\n meta = self._meta_nonempty.astype(dtype)\n else:\n meta = self._meta.astype(dtype)\n if hasattr(dtype, \"items\"):\n set_unknown = [\n k\n for k, v in dtype.items()\n if is_categorical_dtype(v) and getattr(v, \"categories\", None) is None\n ]\n meta = clear_known_categories(meta, cols=set_unknown)\n elif is_categorical_dtype(dtype) and getattr(dtype, \"categories\", None) is None:\n meta = clear_known_categories(meta)\n return self.map_partitions(\n M.astype, dtype=dtype, meta=meta, enforce_metadata=False\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.sort_values_DataFrame.sort_values.return.sort_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.sort_values_DataFrame.sort_values.return.sort_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4492, "end_line": 4542, "span_ids": ["DataFrame.sort_values"], "tokens": 365}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def sort_values(\n self,\n by,\n npartitions=None,\n ascending=True,\n na_position=\"last\",\n sort_function=None,\n sort_function_kwargs=None,\n **kwargs,\n ):\n \"\"\"Sort the dataset by a single column.\n\n Sorting a parallel dataset requires expensive shuffles and is generally\n not recommended. See ``set_index`` for implementation details.\n\n Parameters\n ----------\n by: string\n npartitions: int, None, or 'auto'\n The ideal number of output partitions. If None, use the same as\n the input. If 'auto' then decide by memory use.\n ascending: bool, optional\n Sort ascending vs. descending.\n Defaults to True.\n na_position: {'last', 'first'}, optional\n Puts NaNs at the beginning if 'first', puts NaN at the end if 'last'.\n Defaults to 'last'.\n sort_function: function, optional\n Sorting function to use when sorting underlying partitions.\n If None, defaults to ``M.sort_values`` (the partition library's\n implementation of ``sort_values``).\n sort_function_kwargs: dict, optional\n Additional keyword arguments to pass to the partition sorting function.\n By default, ``by``, ``ascending``, and ``na_position`` are provided.\n\n Examples\n --------\n >>> df2 = df.sort_values('x') # doctest: +SKIP\n \"\"\"\n from .shuffle import sort_values\n\n return sort_values(\n self,\n by,\n ascending=ascending,\n npartitions=npartitions,\n na_position=na_position,\n sort_function=sort_function,\n sort_function_kwargs=sort_function_kwargs,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__map_freq_to_period_start__map_freq_to_period_start.try_.except_AttributeError_.return.freq": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__map_freq_to_period_start__map_freq_to_period_start.try_.except_AttributeError_.return.freq", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6326, "end_line": 6362, "span_ids": ["_map_freq_to_period_start"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _map_freq_to_period_start(freq):\n \"\"\"Ensure that the frequency pertains to the **start** of a period.\n\n If e.g. `freq='M'`, then the divisions are:\n - 2021-31-1 00:00:00 (start of February partition)\n - 2021-2-28 00:00:00 (start of March partition)\n - ...\n\n but this **should** be:\n - 2021-2-1 00:00:00 (start of February partition)\n - 2021-3-1 00:00:00 (start of March partition)\n - ...\n\n Therefore, we map `freq='M'` to `freq='MS'` (same for quarter and year).\n \"\"\"\n\n if not isinstance(freq, str):\n return freq\n\n offset = pd.tseries.frequencies.to_offset(freq)\n offset_type_name = type(offset).__name__\n\n if not offset_type_name.endswith(\"End\"):\n return freq\n\n new_offset = offset_type_name[: -len(\"End\")] + \"Begin\"\n try:\n new_offset_type = getattr(pd.tseries.offsets, new_offset)\n if \"-\" in freq:\n _, anchor = freq.split(\"-\")\n anchor = \"-\" + anchor\n else:\n anchor = \"\"\n n = str(offset.n) if offset.n != 1 else \"\"\n return f\"{n}{new_offset_type._prefix}{anchor}\"\n except AttributeError:\n return freq", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py_bisect__IndexerBase._make_meta.if_cindexer_is_None_.else_.return.self__meta_indexer_cin": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py_bisect__IndexerBase._make_meta.if_cindexer_is_None_.else_.return.self__meta_indexer_cin", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 37, "span_ids": ["imports", "_IndexerBase._name", "_IndexerBase", "_IndexerBase.__init__", "_IndexerBase._meta_indexer", "_IndexerBase._make_meta"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import bisect\nfrom collections import defaultdict\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_bool_dtype\n\nfrom ..array.core import Array\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom . import methods\nfrom ._compat import PANDAS_GT_130\nfrom .core import Series, new_dd_object\nfrom .utils import is_index_like, is_series_like, meta_nonempty\n\n\nclass _IndexerBase:\n def __init__(self, obj):\n self.obj = obj\n\n @property\n def _name(self):\n return self.obj._name\n\n @property\n def _meta_indexer(self):\n raise NotImplementedError\n\n def _make_meta(self, iindexer, cindexer):\n \"\"\"\n get metadata\n \"\"\"\n if cindexer is None:\n return self.obj\n else:\n return self._meta_indexer[:, cindexer]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partitions_of_index_values__coerce_loc_index.return.o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__partitions_of_index_values__coerce_loc_index.return.o", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 307, "end_line": 339, "span_ids": ["_partitions_of_index_values", "_coerce_loc_index"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _partitions_of_index_values(divisions, values):\n \"\"\"Return defaultdict of division and values pairs\n Each key corresponds to the division which values are index values belong\n to the division.\n\n >>> sorted(_partitions_of_index_values([0, 5, 10], [3]).items())\n [(0, [3])]\n >>> sorted(_partitions_of_index_values([0, 5, 10], [3, 8, 5]).items())\n [(0, [3]), (1, [8, 5])]\n \"\"\"\n if divisions[0] is None:\n msg = \"Can not use loc on DataFrame without known divisions\"\n raise ValueError(msg)\n\n results = defaultdict(list)\n values = pd.Index(values, dtype=object)\n for val in values:\n i = bisect.bisect_right(divisions, val)\n div = min(len(divisions) - 2, max(0, i - 1))\n results[div].append(val)\n return results\n\n\ndef _coerce_loc_index(divisions, o):\n \"\"\"Transform values to be comparable against divisions\n\n This is particularly valuable to use with pandas datetimes\n \"\"\"\n if divisions and isinstance(divisions[0], datetime):\n return pd.Timestamp(o)\n if divisions and isinstance(divisions[0], np.datetime64):\n return np.datetime64(o).astype(divisions[0].dtype)\n return o", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__maybe_partial_time_string_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/indexing.py__maybe_partial_time_string_", "embedding": null, "metadata": {"file_path": "dask/dataframe/indexing.py", "file_name": "indexing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 342, "end_line": 376, "span_ids": ["_maybe_partial_time_string"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _maybe_partial_time_string(index, indexer):\n \"\"\"\n Convert indexer for partial string selection\n if data has DatetimeIndex/PeriodIndex\n \"\"\"\n # do not pass dd.Index\n assert is_index_like(index)\n\n if not isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex)):\n return indexer\n\n if PANDAS_GT_130:\n kind_option = {}\n else:\n kind_option = {\"kind\": \"loc\"}\n\n if isinstance(indexer, slice):\n if isinstance(indexer.start, str):\n start = index._maybe_cast_slice_bound(indexer.start, \"left\", **kind_option)\n else:\n start = indexer.start\n\n if isinstance(indexer.stop, str):\n stop = index._maybe_cast_slice_bound(indexer.stop, \"right\", **kind_option)\n else:\n stop = indexer.stop\n return slice(start, stop)\n\n elif isinstance(indexer, str):\n start = index._maybe_cast_slice_bound(indexer, \"left\", **kind_option)\n stop = index._maybe_cast_slice_bound(indexer, \"right\", **kind_option)\n return slice(min(start, stop), max(start, stop))\n\n return indexer", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/__init__.py_demo_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/__init__.py_demo_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 26, "span_ids": ["imports"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from . import demo\nfrom .csv import read_csv, read_fwf, read_table, to_csv\nfrom .hdf import read_hdf, to_hdf\nfrom .io import (\n dataframe_from_ctable,\n from_array,\n from_bcolz,\n from_dask_array,\n from_delayed,\n from_pandas,\n to_bag,\n to_records,\n)\nfrom .json import read_json, to_json\nfrom .sql import read_sql, read_sql_query, read_sql_table, to_sql\n\ntry:\n from .parquet import read_parquet, to_parquet\nexcept ImportError:\n pass\n\ntry:\n from .orc import read_orc, to_orc\nexcept ImportError:\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVFunctionWrapper.__call___CSVFunctionWrapper.__call__.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVFunctionWrapper.__call___CSVFunctionWrapper.__call__.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 87, "end_line": 141, "span_ids": ["CSVFunctionWrapper.__call__"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CSVFunctionWrapper:\n\n def __call__(self, part):\n\n # Part will be a 3-element tuple\n block, path, is_first, is_last = part\n\n # Construct `path_info`\n if path is not None:\n path_info = (\n self.colname,\n path,\n sorted(list(self.head[self.colname].cat.categories)),\n )\n else:\n path_info = None\n\n # Deal with arguments that are special\n # for the first block of each file\n write_header = False\n rest_kwargs = self.kwargs.copy()\n if not is_first:\n write_header = True\n rest_kwargs.pop(\"skiprows\", None)\n if rest_kwargs.get(\"header\", 0) is not None:\n rest_kwargs.pop(\"header\", None)\n if not is_last:\n rest_kwargs.pop(\"skipfooter\", None)\n\n # Deal with column projection\n columns = self.full_columns\n project_after_read = False\n if self.columns is not None:\n if self.kwargs:\n # To be safe, if any kwargs are defined, avoid\n # changing `usecols` here. Instead, we can just\n # select columns after the read\n project_after_read = True\n else:\n columns = self.columns\n rest_kwargs[\"usecols\"] = columns\n\n # Call `pandas_read_text`\n df = pandas_read_text(\n self.reader,\n block,\n self.header,\n rest_kwargs,\n self.dtypes,\n columns,\n write_header,\n self.enforce,\n path_info,\n )\n if project_after_read:\n return df[self.columns]\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_text_blocks_to_pandas_text_blocks_to_pandas.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_text_blocks_to_pandas_text_blocks_to_pandas.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 287, "end_line": 400, "span_ids": ["text_blocks_to_pandas"], "tokens": 813}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def text_blocks_to_pandas(\n reader,\n block_lists,\n header,\n head,\n kwargs,\n enforce=False,\n specified_dtypes=None,\n path=None,\n blocksize=None,\n urlpath=None,\n):\n \"\"\"Convert blocks of bytes to a dask.dataframe\n\n This accepts a list of lists of values of bytes where each list corresponds\n to one file, and the value of bytes concatenate to comprise the entire\n file, in order.\n\n Parameters\n ----------\n reader : callable\n ``pd.read_csv`` or ``pd.read_table``.\n block_lists : list of lists of delayed values of bytes\n The lists of bytestrings where each list corresponds to one logical file\n header : bytestring\n The header, found at the front of the first file, to be prepended to\n all blocks\n head : pd.DataFrame\n An example Pandas DataFrame to be used for metadata.\n kwargs : dict\n Keyword arguments to pass down to ``reader``\n path : tuple, optional\n A tuple containing column name for path and the path_converter if provided\n\n Returns\n -------\n A dask.dataframe\n \"\"\"\n dtypes = head.dtypes.to_dict()\n # dtypes contains only instances of CategoricalDtype, which causes issues\n # in coerce_dtypes for non-uniform categories across partitions.\n # We will modify `dtype` (which is inferred) to\n # 1. contain instances of CategoricalDtypes for user-provided types\n # 2. contain 'category' for data inferred types\n categoricals = head.select_dtypes(include=[\"category\"]).columns\n\n if isinstance(specified_dtypes, Mapping):\n known_categoricals = [\n k\n for k in categoricals\n if isinstance(specified_dtypes.get(k), CategoricalDtype)\n and specified_dtypes.get(k).categories is not None\n ]\n unknown_categoricals = categoricals.difference(known_categoricals)\n else:\n unknown_categoricals = categoricals\n\n # Fixup the dtypes\n for k in unknown_categoricals:\n dtypes[k] = \"category\"\n\n columns = list(head.columns)\n\n blocks = tuple(flatten(block_lists))\n # Create mask of first blocks from nested block_lists\n is_first = tuple(block_mask(block_lists))\n is_last = tuple(block_mask_last(block_lists))\n\n if path:\n colname, path_converter = path\n paths = [b[1].path for b in blocks]\n if path_converter:\n paths = [path_converter(p) for p in paths]\n head = head.assign(\n **{\n colname: pd.Categorical.from_codes(\n np.zeros(len(head), dtype=int), set(paths)\n )\n }\n )\n path = (colname, paths)\n\n if len(unknown_categoricals):\n head = clear_known_categories(head, cols=unknown_categoricals)\n\n # Define parts\n parts = []\n colname, paths = path or (None, None)\n for i in range(len(blocks)):\n parts.append([blocks[i], paths[i] if paths else None, is_first[i], is_last[i]])\n\n # Create Blockwise layer\n label = \"read-csv-\"\n name = label + tokenize(reader, urlpath, columns, enforce, head, blocksize)\n layer = DataFrameIOLayer(\n name,\n columns,\n parts,\n CSVFunctionWrapper(\n columns,\n None,\n colname,\n head,\n header,\n reader,\n dtypes,\n enforce,\n kwargs,\n ),\n label=label,\n produces_tasks=True,\n )\n graph = HighLevelGraph({name: layer}, {name: set()})\n return new_dd_object(graph, name, head, (None,) * (len(blocks) + 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_block_mask_AUTO_BLOCKSIZE._infer_block_size_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_block_mask_AUTO_BLOCKSIZE._infer_block_size_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 403, "end_line": 454, "span_ids": ["auto_blocksize", "block_mask", "_infer_block_size", "impl:5", "block_mask_last"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def block_mask(block_lists):\n \"\"\"\n Yields a flat iterable of booleans to mark the zeroth elements of the\n nested input ``block_lists`` in a flattened output.\n\n >>> list(block_mask([[1, 2], [3, 4], [5]]))\n [True, False, True, False, True]\n \"\"\"\n for block in block_lists:\n if not block:\n continue\n yield True\n yield from (False for _ in block[1:])\n\n\ndef block_mask_last(block_lists):\n \"\"\"\n Yields a flat iterable of booleans to mark the last element of the\n nested input ``block_lists`` in a flattened output.\n\n >>> list(block_mask_last([[1, 2], [3, 4], [5]]))\n [False, True, False, True, True]\n \"\"\"\n for block in block_lists:\n if not block:\n continue\n yield from (False for _ in block[:-1])\n yield True\n\n\ndef auto_blocksize(total_memory, cpu_count):\n memory_factor = 10\n blocksize = int(total_memory // cpu_count / memory_factor)\n return min(blocksize, int(64e6))\n\n\ndef _infer_block_size():\n default = 2**25\n if psutil is not None:\n with catch_warnings():\n simplefilter(\"ignore\", RuntimeWarning)\n mem = psutil.virtual_memory().total\n cpu = psutil.cpu_count()\n\n if mem and cpu:\n return auto_blocksize(mem, cpu)\n\n return default\n\n\n# guess blocksize if psutil is installed or use acceptable default one if not\nAUTO_BLOCKSIZE = _infer_block_size()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_MakeTimeseriesPart_MakeTimeseriesPart.__call__.return.make_timeseries_part_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_MakeTimeseriesPart_MakeTimeseriesPart.__call__.return.make_timeseries_part_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 67, "end_line": 98, "span_ids": ["MakeTimeseriesPart", "MakeTimeseriesPart.__init__", "MakeTimeseriesPart.__call__", "MakeTimeseriesPart.project_columns"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MakeTimeseriesPart:\n \"\"\"\n Wrapper Class for ``make_timeseries_part``\n Makes a timeseries partition.\n \"\"\"\n\n def __init__(self, dtypes, freq, kwargs, columns=None):\n self.columns = columns or list(dtypes.keys())\n self.dtypes = {c: dtypes[c] for c in self.columns}\n self.freq = freq\n self.kwargs = kwargs\n\n def project_columns(self, columns):\n \"\"\"Return a new MakeTimeseriesPart object with\n a sub-column projection.\n \"\"\"\n if columns == self.columns:\n return self\n return MakeTimeseriesPart(\n self.dtypes,\n self.freq,\n self.kwargs,\n columns=columns,\n )\n\n def __call__(self, part):\n divisions, state_data = part\n if isinstance(state_data, int):\n state_data = random_state_data(1, state_data)\n return make_timeseries_part(\n divisions[0], divisions[1], self.dtypes, self.freq, state_data, self.kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_os__pd_to_hdf.return.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_os__pd_to_hdf.return.None", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 32, "span_ids": ["imports", "_pd_to_hdf"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport uuid\nfrom fnmatch import fnmatch\nfrom glob import glob\nfrom warnings import warn\n\nimport pandas as pd\nfrom fsspec.utils import build_name_function, stringify_path\nfrom tlz import merge\n\nfrom ... import config, multiprocessing\nfrom ...base import compute_as_if_collection, get_scheduler, tokenize\nfrom ...delayed import Delayed, delayed\nfrom ...highlevelgraph import HighLevelGraph\nfrom ...layers import DataFrameIOLayer\nfrom ...utils import get_scheduler_lock\nfrom ..core import DataFrame, new_dd_object\nfrom .io import _link\n\n\ndef _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):\n \"\"\"A wrapper function around pd_to_hdf that enables locking\"\"\"\n\n if lock:\n lock.acquire()\n try:\n pd_to_hdf(*args, **kwargs)\n finally:\n if lock:\n lock.release()\n\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_os_lock.Lock_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_os_lock.Lock_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["imports"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nfrom math import ceil\nfrom operator import getitem\nfrom threading import Lock\n\nimport numpy as np\nimport pandas as pd\nfrom tlz import merge\n\nfrom ... import array as da\nfrom ...base import tokenize\nfrom ...dataframe.core import new_dd_object\nfrom ...delayed import delayed\nfrom ...highlevelgraph import HighLevelGraph\nfrom ...utils import M, _deprecated, ensure_dict\nfrom ..core import DataFrame, Index, Series, has_parallel_type, new_dd_object\nfrom ..shuffle import set_partition\nfrom ..utils import check_meta, insert_meta_param_description, is_series_like, make_meta\n\nlock = Lock()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_create_metadata_file_create_metadata_file.paths_root_dir_fns__s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_create_metadata_file_create_metadata_file.paths_root_dir_fns__s", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 652, "end_line": 729, "span_ids": ["create_metadata_file"], "tokens": 763}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def create_metadata_file(\n paths,\n root_dir=None,\n out_dir=None,\n engine=\"pyarrow\",\n storage_options=None,\n split_every=32,\n compute=True,\n compute_kwargs=None,\n fs=None,\n):\n \"\"\"Construct a global _metadata file from a list of parquet files.\n\n Dask's read_parquet function is designed to leverage a global\n _metadata file whenever one is available. The to_parquet\n function will generate this file automatically by default, but it\n may not exist if the dataset was generated outside of Dask. This\n utility provides a mechanism to generate a _metadata file from a\n list of existing parquet files.\n\n NOTE: This utility is not yet supported for the \"fastparquet\" engine.\n\n Parameters\n ----------\n paths : list(string)\n List of files to collect footer metadata from.\n root_dir : string, optional\n Root directory of dataset. The `file_path` fields in the new\n _metadata file will relative to this directory. If None, a common\n root directory will be inferred.\n out_dir : string or False, optional\n Directory location to write the final _metadata file. By default,\n this will be set to `root_dir`. If False is specified, the global\n metadata will be returned as an in-memory object (and will not be\n written to disk).\n engine : str or Engine, default 'pyarrow'\n Parquet Engine to use. Only 'pyarrow' is supported if a string\n is passed.\n storage_options : dict, optional\n Key/value pairs to be passed on to the file-system backend, if any.\n split_every : int, optional\n The final metadata object that is written to _metadata can be much\n smaller than the list of footer metadata. In order to avoid the\n aggregation of all metadata within a single task, a tree reduction\n is used. This argument specifies the maximum number of metadata\n inputs to be handled by any one task in the tree. Defaults to 32.\n compute : bool, optional\n If True (default) then the result is computed immediately. If False\n then a ``dask.delayed`` object is returned for future computation.\n compute_kwargs : dict, optional\n Options to be passed in to the compute method\n fs : fsspec object, optional\n File-system instance to use for file handling. If prefixes have\n been removed from the elements of ``paths`` before calling this\n function, an ``fs`` argument must be provided to ensure correct\n behavior on remote file systems (\"naked\" paths cannot be used\n to infer file-system information).\n \"\"\"\n\n # Get engine.\n # Note that \"fastparquet\" is not yet supported\n if isinstance(engine, str):\n if engine not in (\"pyarrow\", \"arrow\"):\n raise ValueError(\n f\"{engine} is not a supported engine for create_metadata_file \"\n \"Try engine='pyarrow'.\"\n )\n engine = get_engine(engine)\n\n # Process input path list\n if fs is None:\n # Only do this if an fsspec file-system object is not\n # already defined. The prefixes may already be stripped.\n fs, _, paths = get_fs_token_paths(\n paths, mode=\"rb\", storage_options=storage_options\n )\n ap_kwargs = {\"root\": root_dir} if root_dir else {}\n paths, root_dir, fns = _sort_and_analyze_paths(paths, fs, **ap_kwargs)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_None_2_sorted_columns.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_None_2_sorted_columns.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 863, "end_line": 907, "span_ids": ["sorted_columns", "get_engine"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#####################\n# Utility Functions #\n#####################\n\n\ndef sorted_columns(statistics):\n \"\"\"Find sorted columns given row-group statistics\n\n This finds all columns that are sorted, along with appropriate divisions\n values for those columns\n\n Returns\n -------\n out: List of {'name': str, 'divisions': List[str]} dictionaries\n \"\"\"\n if not statistics:\n return []\n\n out = []\n for i, c in enumerate(statistics[0][\"columns\"]):\n if not all(\n \"min\" in s[\"columns\"][i] and \"max\" in s[\"columns\"][i] for s in statistics\n ):\n continue\n divisions = [c[\"min\"]]\n max = c[\"max\"]\n success = c[\"min\"] is not None\n for stats in statistics[1:]:\n c = stats[\"columns\"][i]\n if c[\"min\"] is None:\n success = False\n break\n if c[\"min\"] >= max:\n divisions.append(c[\"min\"])\n max = c[\"max\"]\n else:\n success = False\n break\n\n if success:\n divisions.append(max)\n assert divisions == sorted(divisions)\n out.append({\"name\": c[\"name\"], \"divisions\": divisions})\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__sort_and_analyze_paths__analyze_paths._join_path._scrub.return.p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__sort_and_analyze_paths__analyze_paths._join_path._scrub.return.p", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 395, "end_line": 418, "span_ids": ["_sort_and_analyze_paths", "_analyze_paths"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sort_and_analyze_paths(file_list, fs, root=False):\n file_list = sorted(file_list, key=natural_sort_key)\n base, fns = _analyze_paths(file_list, fs, root=root)\n return file_list, base, fns\n\n\ndef _analyze_paths(file_list, fs, root=False):\n \"\"\"Consolidate list of file-paths into parquet relative paths\n\n Note: This function was mostly copied from dask/fastparquet to\n use in both `FastParquetEngine` and `ArrowEngine`.\"\"\"\n\n def _join_path(*path):\n def _scrub(i, p):\n # Convert path to standard form\n # this means windows path separators are converted to linux\n p = p.replace(fs.sep, \"/\")\n if p == \"\": # empty path is assumed to be a relative path\n return \".\"\n if p[-1] == \"/\": # trailing slashes are not allowed\n p = p[:-1]\n if i > 0 and p[0] == \"/\": # only the first path can start with /\n p = p[1:]\n return p\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_no_args_test_make_timeseries_blockwise.assert_isinstance_layers_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_make_timeseries_no_args_test_make_timeseries_blockwise.assert_isinstance_layers_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_demo.py", "file_name": "test_demo.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 101, "span_ids": ["test_make_timeseries_blockwise", "test_make_timeseries_no_args"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_make_timeseries_no_args():\n df = dd.demo.make_timeseries()\n assert 1 < df.npartitions < 1000\n assert len(df.columns) > 1\n assert len(set(df.dtypes)) > 1\n\n\ndef test_make_timeseries_blockwise():\n df = dd.demo.make_timeseries()\n df = df[[\"x\", \"y\"]]\n keys = [(df._name, i) for i in range(df.npartitions)]\n\n # Check that `optimize_dataframe_getitem` changes the\n # `columns` attribute of the \"make-timeseries\" layer\n graph = optimize_dataframe_getitem(df.__dask_graph__(), keys)\n key = [k for k in graph.layers.keys() if k.startswith(\"make-timeseries-\")][0]\n assert set(graph.layers[key].columns) == {\"x\", \"y\"}\n\n # Check that `optimize_blockwise` fuses both\n # `Blockwise` layers together into a singe `Blockwise` layer\n graph = optimize_blockwise(df.__dask_graph__(), keys)\n layers = graph.layers\n name = list(layers.keys())[0]\n assert len(layers) == 1\n assert isinstance(layers[name], Blockwise)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_os_test_to_hdf.None_3.tm_assert_frame_equal_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_os_test_to_hdf.None_3.tm_assert_frame_equal_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 46, "span_ids": ["imports", "test_to_hdf"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport pathlib\nfrom time import sleep\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.optimize import optimize_dataframe_getitem\nfrom dask.dataframe.utils import assert_eq\nfrom dask.layers import DataFrameIOLayer\nfrom dask.utils import dependency_depth, tmpdir, tmpfile\n\n\ndef test_to_hdf():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n a = dd.from_pandas(df, 2)\n\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data\")\n out = pd.read_hdf(fn, \"/data\")\n tm.assert_frame_equal(df, out[:])\n\n with tmpfile(\"h5\") as fn:\n a.x.to_hdf(fn, \"/data\")\n out = pd.read_hdf(fn, \"/data\")\n tm.assert_series_equal(df.x, out[:])\n\n a = dd.from_pandas(df, 1)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data\")\n out = pd.read_hdf(fn, \"/data\")\n tm.assert_frame_equal(df, out[:])\n\n # test compute = False\n with tmpfile(\"h5\") as fn:\n r = a.to_hdf(fn, \"/data\", compute=False)\n r.compute()\n out = pd.read_hdf(fn, \"/data\")\n tm.assert_frame_equal(df, out[:])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_files_test_to_hdf_multiple_files.with_tmpfile_h5_as_fn_.assert_eq_df_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_files_test_to_hdf_multiple_files.with_tmpfile_h5_as_fn_.assert_eq_df_out_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 139, "end_line": 229, "span_ids": ["test_to_hdf_multiple_files"], "tokens": 793}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_multiple_files():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n a = dd.from_pandas(df, 2)\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n b = dd.from_pandas(df16, 16)\n\n # saving to multiple files\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.h5\")\n a.to_hdf(fn, \"/data\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n # saving to multiple files making sure order is kept\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.h5\")\n b.to_hdf(fn, \"/data\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df16, out)\n\n # saving to multiple files where first file is longer\n # https://github.com/dask/dask/issues/8023\n with tmpdir() as dn:\n fn1 = os.path.join(dn, \"data_1.h5\")\n fn2 = os.path.join(dn, \"data_2.h5\")\n b.to_hdf(fn1, \"/data\")\n a.to_hdf(fn2, \"/data\")\n out = dd.read_hdf([fn1, fn2], \"/data\")\n assert_eq(pd.concat([df16, df]), out)\n\n # saving to multiple files with custom name_function\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.h5\")\n a.to_hdf(fn, \"/data\", name_function=lambda i: \"a\" * (i + 1))\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n out = pd.read_hdf(os.path.join(dn, \"data_a.h5\"), \"/data\")\n tm.assert_frame_equal(out, df.iloc[:2])\n out = pd.read_hdf(os.path.join(dn, \"data_aa.h5\"), \"/data\")\n tm.assert_frame_equal(out, df.iloc[2:])\n\n # test hdf object\n with tmpfile(\"h5\") as fn:\n with pd.HDFStore(fn) as hdf:\n a.to_hdf(hdf, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_test_to_records._TODO_make_check_type_p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_records_test_to_records._TODO_make_check_type_p", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 519, "end_line": 531, "span_ids": ["test_to_records"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_records():\n pytest.importorskip(\"dask.array\")\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n assert_eq(\n df.to_records(), ddf.to_records(), check_type=False\n ) # TODO: make check_type pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_glob_ddf.dd_from_pandas_df_nparti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_glob_ddf.dd_from_pandas_df_nparti", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 96, "span_ids": ["imports", "impl:47"], "tokens": 777}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import glob\nimport math\nimport os\nimport sys\nimport warnings\nfrom decimal import Decimal\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom packaging.version import parse as parse_version\n\nimport dask\nimport dask.dataframe as dd\nimport dask.multiprocessing\nfrom dask.blockwise import Blockwise, optimize_blockwise\nfrom dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130\nfrom dask.dataframe.io.parquet.utils import _parse_pandas_metadata\nfrom dask.dataframe.optimize import optimize_dataframe_getitem\nfrom dask.dataframe.utils import assert_eq\nfrom dask.layers import DataFrameIOLayer\nfrom dask.utils import natural_sort_key\nfrom dask.utils_test import hlg_layer\n\ntry:\n import fastparquet\nexcept ImportError:\n fastparquet = False\n fastparquet_version = parse_version(\"0\")\nelse:\n fastparquet_version = parse_version(fastparquet.__version__)\n\n\ntry:\n import pyarrow as pa\nexcept ImportError:\n pa = False\n pa_version = parse_version(\"0\")\nelse:\n pa_version = parse_version(pa.__version__)\n\ntry:\n import pyarrow.parquet as pq\nexcept ImportError:\n pq = False\n\n\nSKIP_FASTPARQUET = not fastparquet\nFASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason=\"fastparquet not found\")\n\nif sys.platform == \"win32\" and pa and pa_version == parse_version(\"2.0.0\"):\n SKIP_PYARROW = True\n SKIP_PYARROW_REASON = (\n \"skipping pyarrow 2.0.0 on windows: \"\n \"https://github.com/dask/dask/issues/6093\"\n \"|https://github.com/dask/dask/issues/6754\"\n )\nelse:\n SKIP_PYARROW = not pq\n SKIP_PYARROW_REASON = \"pyarrow not found\"\nPYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)\n\n# \"Legacy\" and \"Dataset\"-specific MARK definitions\nSKIP_PYARROW_LE = SKIP_PYARROW\nSKIP_PYARROW_LE_REASON = \"pyarrow not found\"\nSKIP_PYARROW_DS = SKIP_PYARROW\nSKIP_PYARROW_DS_REASON = \"pyarrow not found\"\nif not SKIP_PYARROW_LE:\n # NOTE: We should use PYARROW_LE_MARK to skip\n # pyarrow-legacy tests once pyarrow officially\n # removes ParquetDataset support in the future.\n PYARROW_LE_MARK = pytest.mark.filterwarnings(\n \"ignore::DeprecationWarning\",\n \"ignore::FutureWarning\",\n )\nelse:\n PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)\nPYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)\n\nANY_ENGINE_MARK = pytest.mark.skipif(\n SKIP_FASTPARQUET and SKIP_PYARROW,\n reason=\"No parquet engine (fastparquet or pyarrow) found\",\n)\n\n\nnrows = 40\nnpartitions = 15\ndf = pd.DataFrame(\n {\n \"x\": [i * 7 % 5 for i in range(nrows)], # Not sorted\n \"y\": [i * 2.5 for i in range(nrows)], # Sorted\n },\n index=pd.Index([10 * i for i in range(nrows)], name=\"myindex\"),\n)\n\nddf = dd.from_pandas(df, npartitions=npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_dict_column_test_append_dict_column.assert_eq_expect_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_append_dict_column_test_append_dict_column.assert_eq_expect_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 855, "end_line": 881, "span_ids": ["test_append_dict_column"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_append_dict_column(tmpdir, engine):\n # See: https://github.com/dask/dask/issues/7492\n\n if engine == \"fastparquet\":\n pytest.xfail(\"Fastparquet engine is missing dict-column support\")\n elif pa_version < parse_version(\"1.0.1\"):\n pytest.skip(\"PyArrow 1.0.1+ required for dict-column support.\")\n\n tmp = str(tmpdir)\n dts = pd.date_range(\"2020-01-01\", \"2021-01-01\")\n df = pd.DataFrame(\n {\"value\": [{\"x\": x} for x in range(len(dts))]},\n index=dts,\n )\n ddf1 = dd.from_pandas(df, npartitions=1)\n\n # Write ddf1 to tmp, and then append it again\n ddf1.to_parquet(tmp, append=True, engine=engine)\n ddf1.to_parquet(tmp, append=True, engine=engine, ignore_divisions=True)\n\n # Read back all data (ddf1 + ddf1)\n ddf2 = dd.read_parquet(tmp, engine=engine)\n\n # Check computed result\n expect = pd.concat([df, df])\n result = ddf2.compute()\n assert_eq(expect, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_dir_nometa_test_read_dir_nometa.if_statistics_is_False_or.else_.assert_ddf2_divisions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_dir_nometa_test_read_dir_nometa.if_statistics_is_False_or.else_.assert_ddf2_divisions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2264, "end_line": 2284, "span_ids": ["test_read_dir_nometa"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"statistics\", [True, False, None])\n@pytest.mark.parametrize(\"remove_common\", [True, False])\n@write_read_engines()\ndef test_read_dir_nometa(tmpdir, write_engine, read_engine, statistics, remove_common):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n if os.path.exists(os.path.join(tmp_path, \"_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_metadata\"))\n files = os.listdir(tmp_path)\n assert \"_metadata\" not in files\n\n if remove_common and os.path.exists(os.path.join(tmp_path, \"_common_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_common_metadata\"))\n\n ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=statistics)\n assert_eq(ddf, ddf2, check_divisions=False)\n assert ddf.divisions == tuple(range(0, 420, 30))\n if statistics is False or statistics is None and read_engine.startswith(\"pyarrow\"):\n assert ddf2.divisions == (None,) * 14\n else:\n assert ddf2.divisions == tuple(range(0, 420, 30))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_statistics_nometa_test_statistics_nometa.assert_ddf2_divisions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_statistics_nometa_test_statistics_nometa.assert_ddf2_divisions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2287, "end_line": 2295, "span_ids": ["test_statistics_nometa"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_statistics_nometa(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine, write_metadata_file=False)\n\n ddf2 = dd.read_parquet(tmp_path, engine=read_engine, gather_statistics=True)\n assert_eq(ddf, ddf2)\n assert ddf.divisions == tuple(range(0, 420, 30))\n assert ddf2.divisions == tuple(range(0, 420, 30))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_blockwise_parquet_annotations_test_blockwise_parquet_annotations.assert_layer_annotations_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_blockwise_parquet_annotations_test_blockwise_parquet_annotations.assert_layer_annotations_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2504, "end_line": 2518, "span_ids": ["test_blockwise_parquet_annotations"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@ANY_ENGINE_MARK\ndef test_blockwise_parquet_annotations(tmpdir):\n df = pd.DataFrame({\"a\": np.arange(40, dtype=np.int32)})\n expect = dd.from_pandas(df, npartitions=2)\n expect.to_parquet(str(tmpdir))\n\n with dask.annotate(foo=\"bar\"):\n ddf = dd.read_parquet(str(tmpdir))\n\n # `ddf` should now have ONE Blockwise layer\n layers = ddf.__dask_graph__().layers\n assert len(layers) == 1\n layer = next(iter(layers.values()))\n assert isinstance(layer, DataFrameIOLayer)\n assert layer.annotations == {\"foo\": \"bar\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_column_overlap_test_partitioned_column_overlap.if_write_cols_part_.else_.with_pytest_raises_ValueE.dd_read_parquet_path_eng": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_column_overlap_test_partitioned_column_overlap.if_write_cols_part_.else_.with_pytest_raises_ValueE.dd_read_parquet_path_eng", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2942, "end_line": 2975, "span_ids": ["test_partitioned_column_overlap"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"write_cols\",\n [[\"part\", \"col\"], [\"part\", \"kind\", \"col\"]],\n)\ndef test_partitioned_column_overlap(tmpdir, engine, write_cols):\n\n tmpdir.mkdir(\"part=a\")\n tmpdir.mkdir(\"part=b\")\n path0 = str(tmpdir.mkdir(\"part=a/kind=x\"))\n path1 = str(tmpdir.mkdir(\"part=b/kind=x\"))\n path0 = os.path.join(path0, \"data.parquet\")\n path1 = os.path.join(path1, \"data.parquet\")\n\n _df1 = pd.DataFrame({\"part\": \"a\", \"kind\": \"x\", \"col\": range(5)})\n _df2 = pd.DataFrame({\"part\": \"b\", \"kind\": \"x\", \"col\": range(5)})\n df1 = _df1[write_cols]\n df2 = _df2[write_cols]\n df1.to_parquet(path0, index=False)\n df2.to_parquet(path1, index=False)\n\n if engine == \"fastparquet\":\n path = [path0, path1]\n else:\n path = str(tmpdir)\n\n if write_cols == [\"part\", \"kind\", \"col\"]:\n result = dd.read_parquet(path, engine=engine)\n expect = pd.concat([_df1, _df2], ignore_index=True)\n assert_eq(result, expect, check_index=False)\n else:\n # For now, partial overlap between partition columns and\n # real columns is not allowed\n with pytest.raises(ValueError):\n dd.read_parquet(path, engine=engine)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_rename_columns_test_roundtrip_rename_columns.assert_eq_df1_ddf2_compu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_rename_columns_test_roundtrip_rename_columns.assert_eq_df1_ddf2_compu", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3461, "end_line": 3473, "span_ids": ["test_roundtrip_rename_columns"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_roundtrip_rename_columns(tmpdir, engine):\n # https://github.com/dask/dask/issues/7017\n\n path = os.path.join(str(tmpdir), \"test.parquet\")\n df1 = pd.DataFrame(columns=[\"a\", \"b\", \"c\"], data=np.random.uniform(size=(10, 3)))\n df1.to_parquet(path)\n\n # read it with dask and rename columns\n ddf2 = dd.read_parquet(path, engine=engine)\n ddf2.columns = [\"d\", \"e\", \"f\"]\n df1.columns = [\"d\", \"e\", \"f\"]\n\n assert_eq(df1, ddf2.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_io_db.with_tmpfile_as_f_.yield_uri": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_io_db.with_tmpfile_as_f_.yield_uri", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 41, "span_ids": ["imports", "db"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport sys\nfrom contextlib import contextmanager\n\nimport pytest\n\n# import dask\nfrom dask.dataframe.io.sql import read_sql, read_sql_query, read_sql_table\nfrom dask.dataframe.utils import PANDAS_GT_120, assert_eq\nfrom dask.utils import tmpfile\n\npd = pytest.importorskip(\"pandas\")\ndd = pytest.importorskip(\"dask.dataframe\")\npytest.importorskip(\"sqlalchemy\")\npytest.importorskip(\"sqlite3\")\nnp = pytest.importorskip(\"numpy\")\n\nif not PANDAS_GT_120:\n pytestmark = pytest.mark.filterwarnings(\"ignore\")\n\n\ndata = \"\"\"\nname,number,age,negish\nAlice,0,33,-5\nBob,1,40,-3\nChris,2,22,3\nDora,3,16,5\nEdith,4,53,0\nFrancis,5,30,0\nGarreth,6,20,0\n\"\"\"\n\ndf = pd.read_csv(io.StringIO(data), index_col=\"number\")\n\n\n@pytest.fixture\ndef db():\n with tmpfile() as f:\n uri = \"sqlite:///%s\" % f\n df.to_sql(\"test\", uri, index=True, if_exists=\"replace\")\n yield uri", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py_json__get_pyarrow_dtypes.return.dtypes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py_json__get_pyarrow_dtypes.return.dtypes", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 65, "span_ids": ["_is_local_fs", "imports", "_get_pyarrow_dtypes"], "tokens": 455}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import json\nfrom uuid import uuid4\n\nimport fsspec\nimport pandas as pd\nfrom fsspec.implementations.local import LocalFileSystem\nfrom packaging.version import parse as parse_version\n\ntry:\n import fsspec.parquet as fsspec_parquet\nexcept ImportError:\n fsspec_parquet = None\n\n\ndef _is_local_fs(fs):\n \"\"\"Check if an fsspec file-system is local\"\"\"\n return fs and isinstance(fs, LocalFileSystem)\n\n\ndef _get_pyarrow_dtypes(schema, categories):\n \"\"\"Convert a pyarrow.Schema object to pandas dtype dict\"\"\"\n\n # Check for pandas metadata\n has_pandas_metadata = schema.metadata is not None and b\"pandas\" in schema.metadata\n if has_pandas_metadata:\n pandas_metadata = json.loads(schema.metadata[b\"pandas\"].decode(\"utf8\"))\n pandas_metadata_dtypes = {\n c.get(\"field_name\", c.get(\"name\", None)): c[\"numpy_type\"]\n for c in pandas_metadata.get(\"columns\", [])\n }\n tz = {\n c.get(\"field_name\", c.get(\"name\", None)): c[\"metadata\"].get(\n \"timezone\", None\n )\n for c in pandas_metadata.get(\"columns\", [])\n if c[\"pandas_type\"] in (\"datetime\", \"datetimetz\") and c[\"metadata\"]\n }\n else:\n pandas_metadata_dtypes = {}\n\n dtypes = {}\n for i in range(len(schema)):\n field = schema[i]\n\n # Get numpy_dtype from pandas metadata if available\n if field.name in pandas_metadata_dtypes:\n if field.name in tz:\n numpy_dtype = (\n pd.Series([], dtype=\"M8[ns]\").dt.tz_localize(tz[field.name]).dtype\n )\n else:\n numpy_dtype = pandas_metadata_dtypes[field.name]\n else:\n try:\n numpy_dtype = field.type.to_pandas_dtype()\n except NotImplementedError:\n continue # Skip this field (in case we aren't reading it anyway)\n\n dtypes[field.name] = numpy_dtype\n\n if categories:\n for cat in categories:\n dtypes[cat] = \"category\"\n\n return dtypes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py___asciitable": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py___asciitable", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 93, "span_ids": ["imports", "docstring"], "tokens": 712}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nAlgorithms that Involve Multiple DataFrames\n===========================================\n\nThe pandas operations ``concat``, ``join``, and ``merge`` combine multiple\nDataFrames. This module contains analogous algorithms in the parallel case.\n\nThere are two important cases:\n\n1. We combine along a partitioned index\n2. We combine along an unpartitioned index or other column\n\nIn the first case we know which partitions of each dataframe interact with\nwhich others. This lets us be significantly more clever and efficient.\n\nIn the second case each partition from one dataset interacts with all\npartitions from the other. We handle this through a shuffle operation.\n\nPartitioned Joins\n-----------------\n\nIn the first case where we join along a partitioned index we proceed in the\nfollowing stages.\n\n1. Align the partitions of all inputs to be the same. This involves a call\n to ``dd.repartition`` which will split up and concat existing partitions as\n necessary. After this step all inputs have partitions that align with\n each other. This step is relatively cheap.\n See the function ``align_partitions``.\n2. Remove unnecessary partitions based on the type of join we perform (left,\n right, inner, outer). We can do this at the partition level before any\n computation happens. We'll do it again on each partition when we call the\n in-memory function. See the function ``require``.\n3. Embarrassingly parallel calls to ``pd.concat``, ``pd.join``, or\n ``pd.merge``. Now that the data is aligned and unnecessary blocks have\n been removed we can rely on the fast in-memory Pandas join machinery to\n execute joins per-partition. We know that all intersecting records exist\n within the same partition\n\n\nHash Joins via Shuffle\n----------------------\n\nWhen we join along an unpartitioned index or along an arbitrary column any\npartition from one input might interact with any partition in another. In\nthis case we perform a hash-join by shuffling data in each input by that\ncolumn. This results in new inputs with the same partition structure cleanly\nseparated along that column.\n\nWe proceed with hash joins in the following stages:\n\n1. Shuffle each input on the specified column. See the function\n ``dask.dataframe.shuffle.shuffle``.\n2. Perform embarrassingly parallel join across shuffled inputs.\n\"\"\"\nimport math\nimport pickle\nimport warnings\nfrom functools import partial, wraps\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_categorical_dtype, is_dtype_equal\nfrom tlz import merge_sorted, unique\n\nfrom ..base import is_dask_collection, tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..layers import BroadcastJoinLayer\nfrom ..utils import M, apply\nfrom . import methods\nfrom .core import (\n DataFrame,\n Index,\n Series,\n _concat,\n _Frame,\n _maybe_from_pandas,\n is_broadcastable,\n map_partitions,\n new_dd_object,\n prefix_reduction,\n suffix_reduction,\n)\nfrom .dispatch import group_split_dispatch, hash_object_dispatch\nfrom .io import from_pandas\nfrom .shuffle import partitioning_index, rearrange_by_divisions, shuffle, shuffle_group\nfrom .utils import (\n asciitable,\n is_dataframe_like,\n is_series_like,\n make_meta,\n strip_unknown_categories,\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat.if_not_isinstance_dfs_li_concat.if_axis_1_.else_.if_all_df_known_divisions.else_.return.stack_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_concat.if_not_isinstance_dfs_li_concat.if_axis_1_.else_.if_all_df_known_divisions.else_.return.stack_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1194, "end_line": 1262, "span_ids": ["concat"], "tokens": 657}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concat(\n dfs,\n axis=0,\n join=\"outer\",\n interleave_partitions=False,\n ignore_unknown_divisions=False,\n ignore_order=False,\n **kwargs,\n):\n\n if not isinstance(dfs, list):\n raise TypeError(\"dfs must be a list of DataFrames/Series objects\")\n if len(dfs) == 0:\n raise ValueError(\"No objects to concatenate\")\n if len(dfs) == 1:\n if axis == 1 and isinstance(dfs[0], Series):\n return dfs[0].to_frame()\n else:\n return dfs[0]\n\n if join not in (\"inner\", \"outer\"):\n raise ValueError(\"'join' must be 'inner' or 'outer'\")\n\n axis = DataFrame._validate_axis(axis)\n dasks = [df for df in dfs if isinstance(df, _Frame)]\n dfs = _maybe_from_pandas(dfs)\n\n if axis == 1:\n if all(df.known_divisions for df in dasks):\n return concat_indexed_dataframes(\n dfs, axis=axis, join=join, ignore_order=ignore_order, **kwargs\n )\n elif (\n len(dasks) == len(dfs)\n and all(not df.known_divisions for df in dfs)\n and len({df.npartitions for df in dasks}) == 1\n ):\n if not ignore_unknown_divisions:\n warnings.warn(\n \"Concatenating dataframes with unknown divisions.\\n\"\n \"We're assuming that the indices of each dataframes\"\n \" are \\n aligned. This assumption is not generally \"\n \"safe.\"\n )\n return concat_unindexed_dataframes(dfs, ignore_order=ignore_order, **kwargs)\n else:\n raise ValueError(\n \"Unable to concatenate DataFrame with unknown \"\n \"division specifying axis=1\"\n )\n else:\n if all(df.known_divisions for df in dasks):\n # each DataFrame's division must be greater than previous one\n if all(\n dfs[i].divisions[-1] < dfs[i + 1].divisions[0]\n for i in range(len(dfs) - 1)\n ):\n divisions = []\n for df in dfs[:-1]:\n # remove last to concatenate with next\n divisions += df.divisions[:-1]\n divisions += dfs[-1].divisions\n return stack_partitions(\n dfs, divisions, join=join, ignore_order=ignore_order, **kwargs\n )\n elif interleave_partitions:\n return concat_indexed_dataframes(\n dfs, join=join, ignore_order=ignore_order, **kwargs\n )\n else:\n divisions = [None] * (sum(df.npartitions for df in dfs) + 1)\n return stack_partitions(\n dfs, divisions, join=join, ignore_order=ignore_order, **kwargs\n )\n else:\n divisions = [None] * (sum(df.npartitions for df in dfs) + 1)\n return stack_partitions(\n dfs, divisions, join=join, ignore_order=ignore_order, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__contains_index_name__contains_index_name.if_isinstance_columns_or_.else_.return._is_index_level_reference": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__contains_index_name__contains_index_name.if_isinstance_columns_or_.else_.return._is_index_level_reference", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1263, "end_line": 1283, "span_ids": ["_contains_index_name"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _contains_index_name(df, columns_or_index):\n \"\"\"\n Test whether ``columns_or_index`` contains a reference\n to the index of ``df\n\n This is the local (non-collection) version of\n ``dask.core.DataFrame._contains_index_name``.\n \"\"\"\n\n def _is_index_level_reference(x, key):\n return (\n x.index.name is not None\n and (np.isscalar(key) or isinstance(key, tuple))\n and key == x.index.name\n and key not in getattr(x, \"columns\", ())\n )\n\n if isinstance(columns_or_index, list):\n return any(_is_index_level_reference(df, n) for n in columns_or_index)\n else:\n return _is_index_level_reference(df, columns_or_index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__select_columns_or_index__select_columns_or_index.return.selected_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__select_columns_or_index__select_columns_or_index.return.selected_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1286, "end_line": 1311, "span_ids": ["_select_columns_or_index"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _select_columns_or_index(df, columns_or_index):\n \"\"\"\n Returns a DataFrame with columns corresponding to each\n column or index level in columns_or_index. If included,\n the column corresponding to the index level is named _index.\n\n This is the local (non-collection) version of\n ``dask.core.DataFrame._select_columns_or_index``.\n \"\"\"\n\n def _is_column_label_reference(df, key):\n return (np.isscalar(key) or isinstance(key, tuple)) and key in df.columns\n\n # Ensure columns_or_index is a list\n columns_or_index = (\n columns_or_index if isinstance(columns_or_index, list) else [columns_or_index]\n )\n\n column_names = [n for n in columns_or_index if _is_column_label_reference(df, n)]\n\n selected_df = df[column_names]\n if _contains_index_name(df, columns_or_index):\n # Index name was included\n selected_df = selected_df.assign(_index=df.index)\n\n return selected_df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__split_partition__merge_chunk_wrapper.return.merge_chunk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__split_partition__merge_chunk_wrapper.return.merge_chunk_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1314, "end_line": 1365, "span_ids": ["_split_partition", "_concat_wrapper", "_merge_chunk_wrapper"], "tokens": 388}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _split_partition(df, on, nsplits):\n \"\"\"\n Split-by-hash a DataFrame into `nsplits` groups.\n\n Hashing will be performed on the columns or index specified by `on`.\n \"\"\"\n\n if isinstance(on, bytes):\n on = pickle.loads(on)\n\n if isinstance(on, str) or pd.api.types.is_list_like(on):\n # If `on` is a column name or list of column names, we\n # can hash/split by those columns.\n on = [on] if isinstance(on, str) else list(on)\n nset = set(on)\n if nset.intersection(set(df.columns)) == nset:\n ind = hash_object_dispatch(df[on], index=False)\n ind = ind % nsplits\n return group_split_dispatch(df, ind.values, nsplits, ignore_index=False)\n\n # We are not joining (purely) on columns. Need to\n # add a \"_partitions\" column to perform the split.\n if not isinstance(on, _Frame):\n on = _select_columns_or_index(df, on)\n partitions = partitioning_index(on, nsplits)\n df2 = df.assign(_partitions=partitions)\n return shuffle_group(\n df2,\n [\"_partitions\"],\n 0,\n nsplits,\n nsplits,\n False,\n nsplits,\n )\n\n\ndef _concat_wrapper(dfs):\n \"\"\"Concat and remove temporary \"_partitions\" column\"\"\"\n df = _concat(dfs, False)\n if \"_partitions\" in df.columns:\n del df[\"_partitions\"]\n return df\n\n\ndef _merge_chunk_wrapper(*args, **kwargs):\n return merge_chunk(\n *args,\n **{\n k: pickle.loads(v) if isinstance(v, bytes) else v for k, v in kwargs.items()\n },\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_broadcast_join_broadcast_join._is_NOT_broadcasted_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_broadcast_join_broadcast_join._is_NOT_broadcasted_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1368, "end_line": 1475, "span_ids": ["broadcast_join"], "tokens": 785}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_join(\n lhs,\n left_on,\n rhs,\n right_on,\n how=\"inner\",\n npartitions=None,\n suffixes=(\"_x\", \"_y\"),\n shuffle=None,\n indicator=False,\n parts_out=None,\n):\n \"\"\"Join two DataFrames on particular columns by broadcasting\n\n This broadcasts the partitions of the smaller DataFrame to each\n partition of the larger DataFrame, joins each partition pair,\n and then concatenates the new data for each output partition.\n \"\"\"\n\n if npartitions:\n # Repartition the larger collection before the merge\n if lhs.npartitions < rhs.npartitions:\n rhs = rhs.repartition(npartitions=npartitions)\n else:\n lhs = lhs.repartition(npartitions=npartitions)\n\n if how not in (\"inner\", \"left\", \"right\"):\n # Broadcast algorithm cannot handle an \"outer\" join\n raise ValueError(\n \"Only 'inner', 'left' and 'right' broadcast joins are supported.\"\n )\n\n if how == \"left\" and lhs.npartitions < rhs.npartitions:\n # Must broadcast rhs for a \"left\" broadcast join\n raise ValueError(\"'left' broadcast join requires rhs broadcast.\")\n\n if how == \"right\" and rhs.npartitions <= lhs.npartitions:\n # Must broadcast lhs for a \"right\" broadcast join\n raise ValueError(\"'right' broadcast join requires lhs broadcast.\")\n\n # TODO: It *may* be beneficial to perform the hash\n # split for \"inner\" join as well (even if it is not\n # technically needed for correctness). More testing\n # is needed here.\n if how != \"inner\":\n # Shuffle to-be-broadcasted side by hash. This\n # means that we will need to perform a local\n # shuffle and split on each partition of the\n # \"other\" collection (with the same hashing\n # approach) to ensure the correct rows are\n # joined by `merge_chunk`. The local hash and\n # split of lhs is in `_split_partition`.\n if lhs.npartitions < rhs.npartitions:\n lhs2 = shuffle_func(\n lhs,\n left_on,\n shuffle=\"tasks\",\n )\n lhs_name = lhs2._name\n lhs_dep = lhs2\n rhs_name = rhs._name\n rhs_dep = rhs\n else:\n rhs2 = shuffle_func(\n rhs,\n right_on,\n shuffle=\"tasks\",\n )\n lhs_name = lhs._name\n lhs_dep = lhs\n rhs_name = rhs2._name\n rhs_dep = rhs2\n else:\n lhs_name = lhs._name\n lhs_dep = lhs\n rhs_name = rhs._name\n rhs_dep = rhs\n\n if isinstance(left_on, Index):\n left_on = None\n left_index = True\n else:\n left_index = False\n\n if isinstance(right_on, Index):\n right_on = None\n right_index = True\n else:\n right_index = False\n\n merge_kwargs = dict(\n how=how,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n indicator=indicator,\n )\n\n # dummy result\n meta = lhs._meta_nonempty.merge(rhs._meta_nonempty, **merge_kwargs)\n merge_kwargs[\"empty_index_dtype\"] = meta.index.dtype\n merge_kwargs[\"categorical_columns\"] = meta.select_dtypes(include=\"category\").columns\n\n # Assume the output partitions/divisions\n # should correspond to the collection that\n # is NOT broadcasted.\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_sys_get_dummies._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_sys_get_dummies._", "embedding": null, "metadata": {"file_path": "dask/dataframe/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 106, "span_ids": ["imports", "get_dummies"], "tokens": 802}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_list_like, is_scalar\n\nfrom ..utils import M\nfrom . import methods\nfrom .core import DataFrame, Series, apply_concat_apply, map_partitions\nfrom .utils import has_known_categories\n\n###############################################################\n# Dummies\n###############################################################\n\n\ndef get_dummies(\n data,\n prefix=None,\n prefix_sep=\"_\",\n dummy_na=False,\n columns=None,\n sparse=False,\n drop_first=False,\n dtype=np.uint8,\n **kwargs,\n):\n \"\"\"\n Convert categorical variable into dummy/indicator variables.\n\n Data must have category dtype to infer result's ``columns``.\n\n Parameters\n ----------\n data : Series, or DataFrame\n For Series, the dtype must be categorical.\n For DataFrame, at least one column must be categorical.\n prefix : string, list of strings, or dict of strings, default None\n String to append DataFrame column names.\n Pass a list with length equal to the number of columns\n when calling get_dummies on a DataFrame. Alternatively, `prefix`\n can be a dictionary mapping column names to prefixes.\n prefix_sep : string, default '_'\n If appending prefix, separator/delimiter to use. Or pass a\n list or dictionary as with `prefix.`\n dummy_na : bool, default False\n Add a column to indicate NaNs, if False NaNs are ignored.\n columns : list-like, default None\n Column names in the DataFrame to be encoded.\n If `columns` is None then all the columns with\n `category` dtype will be converted.\n sparse : bool, default False\n Whether the dummy columns should be sparse or not. Returns\n SparseDataFrame if `data` is a Series or if all columns are included.\n Otherwise returns a DataFrame with some SparseBlocks.\n\n .. versionadded:: 0.18.2\n\n drop_first : bool, default False\n Whether to get k-1 dummies out of k categorical levels by removing the\n first level.\n\n dtype : dtype, default np.uint8\n Data type for new columns. Only a single dtype is allowed.\n\n .. versionadded:: 0.18.2\n\n Returns\n -------\n dummies : DataFrame\n\n Examples\n --------\n Dask's version only works with Categorical data, as this is the only way to\n know the output shape without computing all the data.\n\n >>> import pandas as pd\n >>> import dask.dataframe as dd\n >>> s = dd.from_pandas(pd.Series(list('abca')), npartitions=2)\n >>> dd.get_dummies(s)\n Traceback (most recent call last):\n ...\n NotImplementedError: `get_dummies` with non-categorical dtypes is not supported...\n\n With categorical data:\n\n >>> s = dd.from_pandas(pd.Series(list('abca'), dtype='category'), npartitions=2)\n >>> dd.get_dummies(s) # doctest: +NORMALIZE_WHITESPACE\n Dask DataFrame Structure:\n a b c\n npartitions=2\n 0 uint8 uint8 uint8\n 2 ... ... ...\n 3 ... ... ...\n Dask Name: get_dummies, 4 tasks\n >>> dd.get_dummies(s).compute() # doctest: +ELLIPSIS\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n\n See Also\n --------\n pandas.get_dummies\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py__calculate_divisions__calculate_divisions.return.divisions_mins_maxes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py__calculate_divisions__calculate_divisions.return.divisions_mins_maxes", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 25, "end_line": 70, "span_ids": ["_calculate_divisions"], "tokens": 426}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _calculate_divisions(\n df,\n partition_col,\n repartition,\n npartitions,\n upsample=1.0,\n partition_size=128e6,\n):\n \"\"\"\n Utility function to calculate divisions for calls to `map_partitions`\n \"\"\"\n sizes = df.map_partitions(sizeof) if repartition else []\n divisions = partition_col._repartition_quantiles(npartitions, upsample=upsample)\n mins = partition_col.map_partitions(M.min)\n maxes = partition_col.map_partitions(M.max)\n divisions, sizes, mins, maxes = base.compute(divisions, sizes, mins, maxes)\n divisions = methods.tolist(divisions)\n if type(sizes) is not list:\n sizes = methods.tolist(sizes)\n mins = methods.tolist(mins)\n maxes = methods.tolist(maxes)\n\n empty_dataframe_detected = pd.isnull(divisions).all()\n if repartition or empty_dataframe_detected:\n total = sum(sizes)\n npartitions = max(math.ceil(total / partition_size), 1)\n npartitions = min(npartitions, df.npartitions)\n n = len(divisions)\n try:\n divisions = np.interp(\n x=np.linspace(0, n - 1, npartitions + 1),\n xp=np.linspace(0, n - 1, n),\n fp=divisions,\n ).tolist()\n except (TypeError, ValueError): # str type\n indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)\n divisions = [divisions[i] for i in indexes]\n\n mins = remove_nans(mins)\n maxes = remove_nans(maxes)\n if pd.api.types.is_categorical_dtype(partition_col.dtype):\n dtype = partition_col.dtype\n mins = pd.Categorical(mins, dtype=dtype).codes.tolist()\n maxes = pd.Categorical(maxes, dtype=dtype).codes.tolist()\n\n return divisions, mins, maxes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_sort_values_sort_values.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_sort_values_sort_values.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 74, "end_line": 169, "span_ids": ["sort_values"], "tokens": 683}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def sort_values(\n df,\n by,\n npartitions=None,\n ascending=True,\n na_position=\"last\",\n upsample=1.0,\n partition_size=128e6,\n sort_function=None,\n sort_function_kwargs=None,\n **kwargs,\n):\n \"\"\"See DataFrame.sort_values for docstring\"\"\"\n if na_position not in (\"first\", \"last\"):\n raise ValueError(\"na_position must be either 'first' or 'last'\")\n if not isinstance(by, list):\n by = [by]\n if len(by) > 1 and df.npartitions > 1 or any(not isinstance(b, str) for b in by):\n raise NotImplementedError(\n \"Dataframes only support sorting by named columns which must be passed as a \"\n \"string or a list of strings; multi-partition dataframes only support sorting \"\n \"by a single column.\\n\"\n \"You passed %s\" % str(by)\n )\n\n sort_kwargs = {\n \"by\": by,\n \"ascending\": ascending,\n \"na_position\": na_position,\n }\n if sort_function is None:\n sort_function = M.sort_values\n if sort_function_kwargs is not None:\n sort_kwargs.update(sort_function_kwargs)\n\n if df.npartitions == 1:\n return df.map_partitions(sort_function, **sort_kwargs)\n\n if npartitions == \"auto\":\n repartition = True\n npartitions = max(100, df.npartitions)\n else:\n if npartitions is None:\n npartitions = df.npartitions\n repartition = False\n\n sort_by_col = df[by[0]]\n\n divisions, mins, maxes = _calculate_divisions(\n df, sort_by_col, repartition, npartitions, upsample, partition_size\n )\n\n if len(divisions) == 2:\n return df.repartition(npartitions=1).map_partitions(\n sort_function, **sort_kwargs\n )\n\n if not isinstance(ascending, bool):\n # support [True] as input\n if (\n isinstance(ascending, list)\n and len(ascending) == 1\n and isinstance(ascending[0], bool)\n ):\n ascending = ascending[0]\n else:\n raise NotImplementedError(\n f\"Dask currently only supports a single boolean for ascending. You passed {str(ascending)}\"\n )\n\n if (\n all(not pd.isna(x) for x in divisions)\n and mins == sorted(mins, reverse=not ascending)\n and maxes == sorted(maxes, reverse=not ascending)\n and all(\n mx < mn\n for mx, mn in zip(\n maxes[:-1] if ascending else maxes[1:],\n mins[1:] if ascending else mins[:-1],\n )\n )\n and npartitions == df.npartitions\n ):\n # divisions are in the right place\n return df.map_partitions(sort_function, **sort_kwargs)\n\n df = rearrange_by_divisions(\n df,\n by,\n divisions,\n ascending=ascending,\n na_position=na_position,\n duplicates=False,\n )\n df = df.map_partitions(sort_function, **sort_kwargs)\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_barrier_collect.with_ensure_cleanup_on_ex.return.res_if_len_res_0_else_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_barrier_collect.with_ensure_cleanup_on_ex.return.res_if_len_res_0_else_", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 708, "end_line": 744, "span_ids": ["collect", "barrier", "cleanup_partd_files"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def barrier(args):\n list(args)\n return 0\n\n\ndef cleanup_partd_files(p, keys):\n \"\"\"\n Cleanup the files in a partd.File dataset.\n\n Parameters\n ----------\n p : partd.Interface\n File or Encode wrapping a file should be OK.\n keys: List\n Just for scheduling purposes, not actually used.\n \"\"\"\n import partd\n\n if isinstance(p, partd.Encode):\n maybe_file = p.partd\n else:\n maybe_file\n\n if isinstance(maybe_file, partd.File):\n path = maybe_file.path\n else:\n path = None\n\n if path:\n shutil.rmtree(path, ignore_errors=True)\n\n\ndef collect(p, part, meta, barrier_token):\n \"\"\"Collect partitions from partd, yield dataframes\"\"\"\n with ensure_cleanup_on_exception(p):\n res = p.get(part)\n return res if len(res) > 0 else meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_partitions_pre_set_partitions_pre.return.partitions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_set_partitions_pre_set_partitions_pre.return.partitions", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 793, "end_line": 813, "span_ids": ["set_partitions_pre"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def set_partitions_pre(s, divisions, ascending=True, na_position=\"last\"):\n try:\n if ascending:\n partitions = divisions.searchsorted(s, side=\"right\") - 1\n else:\n partitions = len(divisions) - divisions.searchsorted(s, side=\"right\") - 1\n except TypeError:\n # `searchsorted` fails if `s` contains nulls and strings\n partitions = np.empty(len(s), dtype=\"int32\")\n not_null = s.notna()\n if ascending:\n partitions[not_null] = divisions.searchsorted(s[not_null], side=\"right\") - 1\n else:\n partitions[not_null] = (\n len(divisions) - divisions.searchsorted(s[not_null], side=\"right\") - 1\n )\n partitions[(partitions < 0) | (partitions >= len(divisions) - 1)] = (\n len(divisions) - 2 if ascending else 0\n )\n partitions[s.isna().values] = len(divisions) - 2 if na_position == \"last\" else 0\n return partitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_cat_test_str_accessor_cat.for_o_in_foo_foo_.with_pytest_raises_TypeEr.ddf_str_col_str_cat_o_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_cat_test_str_accessor_cat.for_o_in_foo_foo_.with_pytest_raises_TypeEr.ddf_str_col_str_cat_o_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 207, "end_line": 220, "span_ids": ["test_str_accessor_cat"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_str_accessor_cat(df_ddf):\n df, ddf = df_ddf\n sol = df.str_col.str.cat(df.str_col.str.upper(), sep=\":\")\n assert_eq(ddf.str_col.str.cat(ddf.str_col.str.upper(), sep=\":\"), sol)\n assert_eq(ddf.str_col.str.cat(df.str_col.str.upper(), sep=\":\"), sol)\n assert_eq(\n ddf.str_col.str.cat([ddf.str_col.str.upper(), df.str_col.str.lower()], sep=\":\"),\n df.str_col.str.cat([df.str_col.str.upper(), df.str_col.str.lower()], sep=\":\"),\n )\n assert_eq(ddf.str_col.str.cat(sep=\":\"), df.str_col.str.cat(sep=\":\"))\n\n for o in [\"foo\", [\"foo\"]]:\n with pytest.raises(TypeError):\n ddf.str_col.str.cat(o)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_warnings_test_arithmetics._Arithmetics": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_warnings_test_arithmetics._Arithmetics", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 45, "span_ids": ["imports", "test_arithmetics"], "tokens": 643}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.api.types import is_scalar\n\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import PANDAS_GT_120, PANDAS_VERSION\nfrom dask.dataframe.utils import assert_dask_graph, assert_eq, make_meta\n\ntry:\n import scipy\nexcept ImportError:\n scipy = None\n\n\n@pytest.mark.slow\ndef test_arithmetics():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n }\n meta = make_meta(\n {\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"), parent_meta=pd.DataFrame()\n )\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n pdf1 = ddf1.compute()\n\n pdf2 = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7, 8], \"b\": [5, 6, 7, 8, 1, 2, 3, 4]})\n pdf3 = pd.DataFrame({\"a\": [5, 6, 7, 8, 4, 3, 2, 1], \"b\": [2, 4, 5, 3, 4, 2, 1, 0]})\n ddf2 = dd.from_pandas(pdf2, 3)\n ddf3 = dd.from_pandas(pdf3, 2)\n\n dsk4 = {\n (\"y\", 0): pd.DataFrame({\"a\": [3, 2, 1], \"b\": [7, 8, 9]}, index=[0, 1, 3]),\n (\"y\", 1): pd.DataFrame({\"a\": [5, 2, 8], \"b\": [4, 2, 3]}, index=[5, 6, 8]),\n (\"y\", 2): pd.DataFrame({\"a\": [1, 4, 10], \"b\": [1, 0, 5]}, index=[9, 9, 9]),\n }\n ddf4 = dd.DataFrame(dsk4, \"y\", meta, [0, 4, 9, 9])\n pdf4 = ddf4.compute()\n\n # Arithmetics\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.for_dds_pds_in__test_reductions.for_dds_pds_in_.None_19": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions.for_dds_pds_in__test_reductions.for_dds_pds_in_.None_19", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 701, "end_line": 779, "span_ids": ["test_reductions"], "tokens": 990}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions(split_every):\n # ... other code\n\n for dds, pds in [\n (ddf1.a, pdf1.a),\n (ddf1.b, pdf1.b),\n (ddf1.c, pdf1.c),\n (ddf1[\"a\"], pdf1[\"a\"]),\n (ddf1[\"b\"], pdf1[\"b\"]),\n (nands1, nans1),\n (nands2, nans2),\n (nands3, nans3),\n (boolds, bools),\n ]:\n assert isinstance(dds, dd.Series)\n assert isinstance(pds, pd.Series)\n\n assert_eq(dds.sum(split_every=split_every), pds.sum())\n assert_eq(dds.prod(split_every=split_every), pds.prod())\n assert_eq(dds.product(split_every=split_every), pds.product())\n assert_eq(dds.min(split_every=split_every), pds.min())\n assert_eq(dds.max(split_every=split_every), pds.max())\n assert_eq(dds.count(split_every=split_every), pds.count())\n\n if scipy:\n # pandas uses unbiased skew, need to correct for that\n n = pds.shape[0]\n bias_factor = (n * (n - 1)) ** 0.5 / (n - 2)\n assert_eq(dds.skew(), pds.skew() / bias_factor)\n\n if scipy:\n # pandas uses a bias factor for kurtosis, need to correct for that\n n = pds.shape[0]\n factor = ((n - 1) * (n + 1)) / ((n - 2) * (n - 3))\n offset = (6 * (n - 1)) / ((n - 2) * (n - 3))\n assert_eq(factor * dds.kurtosis() + offset, pds.kurtosis())\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n # runtime warnings; https://github.com/dask/dask/issues/2381\n assert_eq(dds.std(split_every=split_every), pds.std())\n assert_eq(dds.var(split_every=split_every), pds.var())\n assert_eq(dds.sem(split_every=split_every), pds.sem())\n\n with warnings.catch_warnings():\n # dask.dataframe should probably filter this, to match pandas, but\n # it seems quite difficult.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(dds.std(ddof=0, split_every=split_every), pds.std(ddof=0))\n assert_eq(dds.var(ddof=0, split_every=split_every), pds.var(ddof=0))\n assert_eq(dds.sem(ddof=0, split_every=split_every), pds.sem(ddof=0))\n assert_eq(dds.mean(split_every=split_every), pds.mean())\n assert_eq(dds.nunique(split_every=split_every), pds.nunique())\n\n assert_eq(dds.sum(skipna=False, split_every=split_every), pds.sum(skipna=False))\n assert_eq(\n dds.prod(skipna=False, split_every=split_every), pds.prod(skipna=False)\n )\n assert_eq(\n dds.product(skipna=False, split_every=split_every),\n pds.product(skipna=False),\n )\n assert_eq(dds.min(skipna=False, split_every=split_every), pds.min(skipna=False))\n assert_eq(dds.max(skipna=False, split_every=split_every), pds.max(skipna=False))\n assert_eq(dds.std(skipna=False, split_every=split_every), pds.std(skipna=False))\n assert_eq(dds.var(skipna=False, split_every=split_every), pds.var(skipna=False))\n assert_eq(dds.sem(skipna=False, split_every=split_every), pds.sem(skipna=False))\n assert_eq(\n dds.std(skipna=False, ddof=0, split_every=split_every),\n pds.std(skipna=False, ddof=0),\n )\n assert_eq(\n dds.var(skipna=False, ddof=0, split_every=split_every),\n pds.var(skipna=False, ddof=0),\n )\n assert_eq(\n dds.sem(skipna=False, ddof=0, split_every=split_every),\n pds.sem(skipna=False, ddof=0),\n )\n assert_eq(\n dds.mean(skipna=False, split_every=split_every), pds.mean(skipna=False)\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reduction_series_invalid_axis_test_reduction_series_invalid_axis.for_axis_in_1_columns_.for_s_in_ddf1_a_pdf1_a_.None_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reduction_series_invalid_axis_test_reduction_series_invalid_axis.for_axis_in_1_columns_.for_s_in_ddf1_a_pdf1_a_.None_9", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 969, "end_line": 993, "span_ids": ["test_reduction_series_invalid_axis"], "tokens": 386}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reduction_series_invalid_axis():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n }\n meta = make_meta(\n {\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"), parent_meta=pd.DataFrame()\n )\n ddf1 = dd.DataFrame(dsk, \"x\", meta, [0, 4, 9, 9])\n pdf1 = ddf1.compute()\n\n for axis in [1, \"columns\"]:\n for s in [ddf1.a, pdf1.a]: # both must behave the same\n pytest.raises(ValueError, lambda: s.sum(axis=axis))\n pytest.raises(ValueError, lambda: s.prod(axis=axis))\n pytest.raises(ValueError, lambda: s.product(axis=axis))\n pytest.raises(ValueError, lambda: s.min(axis=axis))\n pytest.raises(ValueError, lambda: s.max(axis=axis))\n # only count doesn't have axis keyword\n pytest.raises(TypeError, lambda: s.count(axis=axis))\n pytest.raises(ValueError, lambda: s.std(axis=axis))\n pytest.raises(ValueError, lambda: s.var(axis=axis))\n pytest.raises(ValueError, lambda: s.sem(axis=axis))\n pytest.raises(ValueError, lambda: s.mean(axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_non_numeric_dtypes_test_reductions_non_numeric_dtypes.None_18": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_non_numeric_dtypes_test_reductions_non_numeric_dtypes.None_18", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 988, "end_line": 1052, "span_ids": ["test_reductions_non_numeric_dtypes"], "tokens": 732}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_non_numeric_dtypes():\n # test non-numric blocks\n\n def check_raises(d, p, func):\n pytest.raises((TypeError, ValueError), lambda: getattr(d, func)().compute())\n pytest.raises((TypeError, ValueError), lambda: getattr(p, func)())\n\n pds = pd.Series([\"a\", \"b\", \"c\", \"d\", \"e\"])\n dds = dd.from_pandas(pds, 2)\n assert_eq(dds.sum(), pds.sum())\n check_raises(dds, pds, \"prod\")\n check_raises(dds, pds, \"product\")\n assert_eq(dds.min(), pds.min())\n assert_eq(dds.max(), pds.max())\n assert_eq(dds.count(), pds.count())\n check_raises(dds, pds, \"std\")\n check_raises(dds, pds, \"var\")\n check_raises(dds, pds, \"sem\")\n check_raises(dds, pds, \"skew\")\n check_raises(dds, pds, \"kurtosis\")\n assert_eq(dds.nunique(), pds.nunique())\n\n for pds in [\n pd.Series(pd.Categorical([1, 2, 3, 4, 5], ordered=True)),\n pd.Series(pd.Categorical(list(\"abcde\"), ordered=True)),\n pd.Series(pd.date_range(\"2011-01-01\", freq=\"D\", periods=5)),\n ]:\n dds = dd.from_pandas(pds, 2)\n\n check_raises(dds, pds, \"sum\")\n check_raises(dds, pds, \"prod\")\n check_raises(dds, pds, \"product\")\n assert_eq(dds.min(), pds.min())\n assert_eq(dds.max(), pds.max())\n assert_eq(dds.count(), pds.count())\n if PANDAS_GT_120 and pds.dtype == \"datetime64[ns]\":\n # std is implemented for datetimes in pandas 1.2.0, but dask\n # implementation depends on var which isn't\n pass\n else:\n check_raises(dds, pds, \"std\")\n check_raises(dds, pds, \"var\")\n check_raises(dds, pds, \"sem\")\n check_raises(dds, pds, \"skew\")\n check_raises(dds, pds, \"kurtosis\")\n assert_eq(dds.nunique(), pds.nunique())\n\n pds = pd.Series(pd.timedelta_range(\"1 days\", freq=\"D\", periods=5))\n dds = dd.from_pandas(pds, 2)\n assert_eq(dds.sum(), pds.sum())\n assert_eq(dds.min(), pds.min())\n assert_eq(dds.max(), pds.max())\n assert_eq(dds.count(), pds.count())\n # both pandas and dask skew calculations do not support timedelta\n check_raises(dds, pds, \"skew\")\n check_raises(dds, pds, \"kurtosis\")\n\n # ToDo: pandas supports timedelta std, dask returns float64\n # assert_eq(dds.std(), pds.std())\n\n # ToDo: pandas supports timedelta std, otherwise dask raises:\n # TypeError: unsupported operand type(s) for *: 'float' and 'Timedelta'\n # assert_eq(dds.mean(), pds.mean())\n\n assert_eq(dds.nunique(), pds.nunique())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.for_axis_in_0_1_index_test_reductions_frame.assert_dask_graph_ddf1_me": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.for_axis_in_0_1_index_test_reductions_frame.assert_dask_graph_ddf1_me", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1080, "end_line": 1130, "span_ids": ["test_reductions_frame"], "tokens": 778}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame(split_every):\n # ... other code\n\n for axis in [0, 1, \"index\", \"columns\"]:\n assert_eq(ddf1.sum(axis=axis, split_every=split_every), pdf1.sum(axis=axis))\n assert_eq(ddf1.prod(axis=axis, split_every=split_every), pdf1.prod(axis=axis))\n assert_eq(\n ddf1.product(axis=axis, split_every=split_every), pdf1.product(axis=axis)\n )\n assert_eq(ddf1.min(axis=axis, split_every=split_every), pdf1.min(axis=axis))\n assert_eq(ddf1.max(axis=axis, split_every=split_every), pdf1.max(axis=axis))\n assert_eq(ddf1.count(axis=axis, split_every=split_every), pdf1.count(axis=axis))\n assert_eq(ddf1.std(axis=axis, split_every=split_every), pdf1.std(axis=axis))\n assert_eq(ddf1.var(axis=axis, split_every=split_every), pdf1.var(axis=axis))\n assert_eq(ddf1.sem(axis=axis, split_every=split_every), pdf1.sem(axis=axis))\n assert_eq(\n ddf1.std(axis=axis, ddof=0, split_every=split_every),\n pdf1.std(axis=axis, ddof=0),\n )\n assert_eq(\n ddf1.var(axis=axis, ddof=0, split_every=split_every),\n pdf1.var(axis=axis, ddof=0),\n )\n assert_eq(\n ddf1.sem(axis=axis, ddof=0, split_every=split_every),\n pdf1.sem(axis=axis, ddof=0),\n )\n assert_eq(ddf1.mean(axis=axis, split_every=split_every), pdf1.mean(axis=axis))\n\n pytest.raises(ValueError, lambda: ddf1.sum(axis=\"incorrect\").compute())\n\n # axis=0\n assert_dask_graph(ddf1.sum(split_every=split_every), \"dataframe-sum\")\n assert_dask_graph(ddf1.prod(split_every=split_every), \"dataframe-prod\")\n assert_dask_graph(ddf1.min(split_every=split_every), \"dataframe-min\")\n assert_dask_graph(ddf1.max(split_every=split_every), \"dataframe-max\")\n assert_dask_graph(ddf1.count(split_every=split_every), \"dataframe-count\")\n\n # std, var, sem, and mean consist of moment_* operations\n assert_dask_graph(ddf1.std(split_every=split_every), \"dataframe-var\")\n assert_dask_graph(ddf1.std(split_every=split_every), \"moment_chunk\")\n assert_dask_graph(ddf1.std(split_every=split_every), \"moment_agg\")\n assert_dask_graph(ddf1.std(split_every=split_every), \"values\")\n\n assert_dask_graph(ddf1.var(split_every=split_every), \"moment_chunk\")\n assert_dask_graph(ddf1.var(split_every=split_every), \"moment_agg\")\n assert_dask_graph(ddf1.var(split_every=split_every), \"values\")\n\n assert_dask_graph(ddf1.sem(split_every=split_every), \"dataframe-var\")\n assert_dask_graph(ddf1.sem(split_every=split_every), \"moment_chunk\")\n assert_dask_graph(ddf1.sem(split_every=split_every), \"moment_agg\")\n assert_dask_graph(ddf1.sem(split_every=split_every), \"values\")\n\n assert_dask_graph(ddf1.mean(split_every=split_every), \"dataframe-sum\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.None_31_test_reductions_frame.None_40": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame.None_31_test_reductions_frame.None_40", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1131, "end_line": 1142, "span_ids": ["test_reductions_frame"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame(split_every):\n # ... other code\n assert_dask_graph(ddf1.mean(split_every=split_every), \"dataframe-count\")\n\n # axis=1\n assert_dask_graph(ddf1.sum(axis=1, split_every=split_every), \"dataframe-sum\")\n assert_dask_graph(ddf1.prod(axis=1, split_every=split_every), \"dataframe-prod\")\n assert_dask_graph(ddf1.min(axis=1, split_every=split_every), \"dataframe-min\")\n assert_dask_graph(ddf1.max(axis=1, split_every=split_every), \"dataframe-max\")\n assert_dask_graph(ddf1.count(axis=1, split_every=split_every), \"dataframe-count\")\n assert_dask_graph(ddf1.std(axis=1, split_every=split_every), \"dataframe-std\")\n assert_dask_graph(ddf1.var(axis=1, split_every=split_every), \"dataframe-var\")\n assert_dask_graph(ddf1.sem(axis=1, split_every=split_every), \"dataframe-sem\")\n assert_dask_graph(ddf1.mean(axis=1, split_every=split_every), \"dataframe-mean\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan.None_1_test_reductions_frame_nan.None_1.None_23": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_nan.None_1_test_reductions_frame_nan.None_1.None_23", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1253, "end_line": 1329, "span_ids": ["test_reductions_frame_nan"], "tokens": 808}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_every\", [False, 2])\ndef test_reductions_frame_nan(split_every):\n # ... other code\n\n with warnings.catch_warnings(record=True):\n assert_eq(df.sum(skipna=False), ddf.sum(skipna=False, split_every=split_every))\n assert_eq(\n df.prod(skipna=False), ddf.prod(skipna=False, split_every=split_every)\n )\n assert_eq(\n df.product(skipna=False), ddf.product(skipna=False, split_every=split_every)\n )\n assert_eq(df.min(skipna=False), ddf.min(skipna=False, split_every=split_every))\n assert_eq(df.max(skipna=False), ddf.max(skipna=False, split_every=split_every))\n assert_eq(df.std(skipna=False), ddf.std(skipna=False, split_every=split_every))\n assert_eq(df.var(skipna=False), ddf.var(skipna=False, split_every=split_every))\n assert_eq(df.sem(skipna=False), ddf.sem(skipna=False, split_every=split_every))\n assert_eq(\n df.std(skipna=False, ddof=0),\n ddf.std(skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.var(skipna=False, ddof=0),\n ddf.var(skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.sem(skipna=False, ddof=0),\n ddf.sem(skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.mean(skipna=False), ddf.mean(skipna=False, split_every=split_every)\n )\n\n assert_eq(\n df.sum(axis=1, skipna=False),\n ddf.sum(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.prod(axis=1, skipna=False),\n ddf.prod(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.product(axis=1, skipna=False),\n ddf.product(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.min(axis=1, skipna=False),\n ddf.min(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.max(axis=1, skipna=False),\n ddf.max(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.std(axis=1, skipna=False),\n ddf.std(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.var(axis=1, skipna=False),\n ddf.var(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.sem(axis=1, skipna=False),\n ddf.sem(axis=1, skipna=False, split_every=split_every),\n )\n assert_eq(\n df.std(axis=1, skipna=False, ddof=0),\n ddf.std(axis=1, skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.var(axis=1, skipna=False, ddof=0),\n ddf.var(axis=1, skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.sem(axis=1, skipna=False, ddof=0),\n ddf.sem(axis=1, skipna=False, ddof=0, split_every=split_every),\n )\n assert_eq(\n df.mean(axis=1, skipna=False),\n ddf.mean(axis=1, skipna=False, split_every=split_every),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_partition_size_test_embarrassingly_parallel_operations.assert_len_a_sample_frac_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_repartition_partition_size_test_embarrassingly_parallel_operations.assert_len_a_sample_frac_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1950, "end_line": 2157, "span_ids": ["test_repartition_npartitions_same_limits", "test_repartition_freq_divisions", "test_repartition_freq_day", "test_repartition_freq", "test_repartition_input_errors", "test_repartition_partition_size_arg", "test_embarrassingly_parallel_operations", "test_repartition_object_index", "test_repartition_partition_size", "test_repartition_freq_errors", "test_map_freq_to_period_start", "test_repartition_freq_month", "test_repartition_npartitions_numeric_edge_case"], "tokens": 2039}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"use_index\", [True, False])\n@pytest.mark.parametrize(\"n\", [2, 5])\n@pytest.mark.parametrize(\"partition_size\", [\"1kiB\", 379])\n@pytest.mark.parametrize(\"transform\", [lambda df: df, lambda df: df.x])\ndef test_repartition_partition_size(use_index, n, partition_size, transform):\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6] * 10, \"y\": list(\"abdabd\") * 10},\n index=pd.Series([10, 20, 30, 40, 50, 60] * 10),\n )\n df = transform(df)\n a = dd.from_pandas(df, npartitions=n, sort=use_index)\n b = a.repartition(partition_size=partition_size)\n assert_eq(a, b, check_divisions=False)\n assert np.alltrue(b.map_partitions(total_mem_usage, deep=True).compute() <= 1024)\n parts = dask.get(b.dask, b.__dask_keys__())\n assert all(map(len, parts))\n\n\ndef test_repartition_partition_size_arg():\n df = pd.DataFrame({\"x\": range(10)})\n a = dd.from_pandas(df, npartitions=2)\n b = a.repartition(\"1 MiB\")\n assert b.npartitions == 1\n\n\ndef test_repartition_npartitions_same_limits():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3]},\n index=[\n pd.Timestamp(\"2017-05-09 00:00:00.006000\"),\n pd.Timestamp(\"2017-05-09 02:45:00.017999\"),\n pd.Timestamp(\"2017-05-09 05:59:58.938999\"),\n ],\n )\n\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.repartition(npartitions=10)\n\n\ndef test_repartition_npartitions_numeric_edge_case():\n \"\"\"\n Test that we cover numeric edge cases when\n int(ddf.npartitions / npartitions) * npartitions) != ddf.npartitions\n \"\"\"\n df = pd.DataFrame({\"x\": range(100)})\n a = dd.from_pandas(df, npartitions=15)\n assert a.npartitions == 15\n b = a.repartition(npartitions=11)\n assert_eq(a, b)\n\n\ndef test_repartition_object_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6] * 10}, index=list(\"abdabd\") * 10)\n a = dd.from_pandas(df, npartitions=5)\n b = a.repartition(npartitions=2)\n assert b.npartitions == 2\n assert_eq(b, df)\n\n b = a.repartition(npartitions=10)\n assert b.npartitions == 10\n assert_eq(b, df)\n assert not b.known_divisions\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"npartitions\", [1, 20, 243])\n@pytest.mark.parametrize(\"freq\", [\"1D\", \"7D\", \"28h\", \"1h\"])\n@pytest.mark.parametrize(\n \"end\", [\"2000-04-15\", \"2000-04-15 12:37:01\", \"2000-01-01 12:37:00\"]\n)\n@pytest.mark.parametrize(\n \"start\", [\"2000-01-01\", \"2000-01-01 12:30:00\", \"2000-01-01 12:30:00\"]\n)\ndef test_repartition_freq(npartitions, freq, start, end):\n start = pd.Timestamp(start)\n end = pd.Timestamp(end)\n ind = pd.date_range(start=start, end=end, freq=\"60s\")\n df = pd.DataFrame({\"x\": np.arange(len(ind))}, index=ind)\n ddf = dd.from_pandas(df, npartitions=npartitions, name=\"x\")\n\n ddf2 = ddf.repartition(freq=freq)\n assert_eq(ddf2, df)\n\n\ndef test_repartition_freq_divisions():\n df = pd.DataFrame(\n {\"x\": np.random.random(10)},\n index=pd.DatetimeIndex(np.random.random(10) * 100e9),\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n ddf2 = ddf.repartition(freq=\"15s\")\n for div in ddf2.divisions[1:-1]:\n assert div == div.round(\"15s\")\n assert ddf2.divisions[0] == df.index.min()\n assert ddf2.divisions[-1] == df.index.max()\n assert_eq(ddf2, df)\n\n\ndef test_repartition_freq_errors():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n with pytest.raises(TypeError) as info:\n ddf.repartition(freq=\"1s\")\n\n assert \"only\" in str(info.value)\n assert \"timeseries\" in str(info.value)\n\n\ndef test_repartition_freq_month():\n ts = pd.date_range(\"2015-01-01 00:00\", \"2015-05-01 23:50\", freq=\"10min\")\n df = pd.DataFrame(\n np.random.randint(0, 100, size=(len(ts), 4)), columns=list(\"ABCD\"), index=ts\n )\n ddf = dd.from_pandas(df, npartitions=1).repartition(freq=\"MS\")\n\n assert_eq(df, ddf)\n\n assert ddf.divisions == (\n pd.Timestamp(\"2015-1-1 00:00:00\"),\n pd.Timestamp(\"2015-2-1 00:00:00\"),\n pd.Timestamp(\"2015-3-1 00:00:00\"),\n pd.Timestamp(\"2015-4-1 00:00:00\"),\n pd.Timestamp(\"2015-5-1 00:00:00\"),\n pd.Timestamp(\"2015-5-1 23:50:00\"),\n )\n\n assert ddf.npartitions == 5\n\n\ndef test_repartition_freq_day():\n index = [\n pd.Timestamp(\"2020-1-1\"),\n pd.Timestamp(\"2020-1-1\"),\n pd.Timestamp(\"2020-1-2\"),\n pd.Timestamp(\"2020-1-2\"),\n ]\n pdf = pd.DataFrame(index=index, data={\"foo\": \"foo\"})\n ddf = dd.from_pandas(pdf, npartitions=1).repartition(freq=\"D\")\n assert_eq(ddf, pdf)\n assert ddf.npartitions == 2\n assert ddf.divisions == (\n pd.Timestamp(\"2020-1-1\"),\n pd.Timestamp(\"2020-1-2\"),\n pd.Timestamp(\"2020-1-2\"),\n )\n\n\n@pytest.mark.parametrize(\n \"freq, expected_freq\",\n [\n (\"M\", \"MS\"),\n (\"MS\", \"MS\"),\n (\"2M\", \"2MS\"),\n (\"Q\", \"QS\"),\n (\"Q-FEB\", \"QS-FEB\"),\n (\"2Q\", \"2QS\"),\n (\"2Q-FEB\", \"2QS-FEB\"),\n (\"2QS-FEB\", \"2QS-FEB\"),\n (\"BQ\", \"BQS\"),\n (\"2BQ\", \"2BQS\"),\n (\"SM\", \"SMS\"),\n (\"A\", \"AS\"),\n (\"A-JUN\", \"AS-JUN\"),\n (\"BA\", \"BAS\"),\n (\"2BA\", \"2BAS\"),\n (\"BY\", \"BAS\"),\n (\"Y\", \"AS\"),\n (pd.Timedelta(seconds=1), pd.Timedelta(seconds=1)),\n ],\n)\ndef test_map_freq_to_period_start(freq, expected_freq):\n new_freq = _map_freq_to_period_start(freq)\n assert new_freq == expected_freq\n\n\ndef test_repartition_input_errors():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n with pytest.raises(ValueError):\n ddf.repartition(npartitions=5, divisions=[None, None])\n with pytest.raises(ValueError):\n ddf.repartition(npartitions=5, partition_size=\"5MiB\")\n\n\ndef test_embarrassingly_parallel_operations():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, None, 6], \"y\": list(\"abdabd\")},\n index=[10, 20, 30, 40, 50, 60],\n )\n a = dd.from_pandas(df, 2)\n\n assert_eq(a.x.astype(\"float32\"), df.x.astype(\"float32\"))\n assert a.x.astype(\"float32\").compute().dtype == \"float32\"\n\n assert_eq(a.x.dropna(), df.x.dropna())\n\n assert_eq(a.x.between(2, 4), df.x.between(2, 4))\n\n assert_eq(a.x.clip(2, 4), df.x.clip(2, 4))\n\n assert_eq(a.x.notnull(), df.x.notnull())\n assert_eq(a.x.isnull(), df.x.isnull())\n assert_eq(a.notnull(), df.notnull())\n assert_eq(a.isnull(), df.isnull())\n\n assert len(a.sample(frac=0.5).compute()) < len(df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_from_textwrap_import_dede_test_repr_meta_mutation.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_format.py_from_textwrap_import_dede_test_repr_meta_mutation.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_format.py", "file_name": "test_format.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 42, "span_ids": ["test_repr", "imports", "test_repr_meta_mutation"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from textwrap import dedent\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask.array as da\nimport dask.dataframe as dd\n\nstyle = \"\"\"\n\"\"\"\n\n\ndef test_repr():\n df = pd.DataFrame({\"x\": list(range(100))})\n ddf = dd.from_pandas(df, 3)\n\n for x in [ddf, ddf.index, ddf.x]:\n assert type(x).__name__ in repr(x)\n assert str(x.npartitions) in repr(x)\n\n\ndef test_repr_meta_mutation():\n # Check that the repr changes when meta changes\n df = pd.DataFrame({\"a\": range(5), \"b\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n ddf = dd.from_pandas(df, npartitions=2)\n s1 = repr(ddf)\n assert repr(ddf) == s1\n ddf.b = ddf.b.astype(\"category\")\n assert repr(ddf) != s1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_np_test_basic.assert_abs_approx_exact": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_hyperloglog.py_np_test_basic.assert_abs_approx_exact", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_hyperloglog.py", "file_name": "test_hyperloglog.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 71, "span_ids": ["imports", "test_basic"], "tokens": 652}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask.dataframe as dd\n\nrs = np.random.RandomState(96)\n\n\n@pytest.mark.parametrize(\n \"df\",\n [\n pd.DataFrame(\n {\n \"x\": [1, 2, 3] * 3,\n \"y\": [1.2, 3.4, 5.6] * 3,\n \"z\": -(np.arange(9, dtype=np.int8)),\n }\n ),\n pd.DataFrame(\n {\n \"x\": rs.randint(0, 1000000, (10000,)),\n \"y\": rs.randn(10000),\n \"z\": rs.uniform(0, 9999999, (10000,)),\n }\n ),\n pd.DataFrame(\n {\n \"x\": np.repeat(rs.randint(0, 1000000, (1000,)), 3),\n \"y\": np.repeat(rs.randn(1000), 3),\n \"z\": np.repeat(rs.uniform(0, 9999999, (1000,)), 3),\n }\n ),\n pd.DataFrame({\"x\": rs.randint(0, 1000000, (10000,))}),\n pd.DataFrame(\n {\n \"x\": rs.randint(0, 1000000, (7,)),\n \"y\": [\"a\", \"bet\", \"is\", \"a\", \"tax\", \"on\", \"bs\"],\n }\n ),\n pd.DataFrame(\n {\n \"w\": np.zeros((20000,)),\n \"x\": np.zeros((20000,)),\n \"y\": np.zeros((20000,)) + 4803592,\n \"z\": np.zeros((20000,)),\n }\n ),\n pd.DataFrame({\"x\": [1, 2, 3] * 1000}),\n pd.DataFrame({\"x\": np.random.random(1000)}),\n pd.DataFrame(\n {\n \"a\": [1, 2, 3] * 3,\n \"b\": [1.2, 3.4, 5.6] * 3,\n \"c\": [1 + 2j, 3 + 4j, 5 + 6j] * 3,\n \"d\": -(np.arange(9, dtype=np.int8)),\n }\n ),\n pd.Series([1, 2, 3] * 1000),\n pd.Series(np.random.random(1000)),\n pd.Series(np.random.random(1000), index=np.ones(1000)),\n pd.Series(np.random.random(1000), index=np.random.random(1000)),\n ],\n)\n@pytest.mark.parametrize(\"npartitions\", [2, 20])\ndef test_basic(df, npartitions):\n ddf = dd.from_pandas(df, npartitions=npartitions)\n\n approx = ddf.nunique_approx().compute(scheduler=\"sync\")\n exact = len(df.drop_duplicates())\n assert abs(approx - exact) <= 2 or abs(approx - exact) / exact < 0.05", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_np_if_PANDAS_GT_110_.CHECK_FREQ_check_freq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_np_if_PANDAS_GT_110_.CHECK_FREQ_check_freq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 23, "span_ids": ["imports"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_120, tm\nfrom dask.dataframe.indexing import _coerce_loc_index\nfrom dask.dataframe.utils import assert_eq, make_meta\n\ndsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n}\nmeta = make_meta(\n {\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"), parent_meta=pd.DataFrame()\n)\nd = dd.DataFrame(dsk, \"x\", meta, [0, 5, 9, 9])\nfull = d.compute()\nCHECK_FREQ = {}\nif PANDAS_GT_110:\n CHECK_FREQ[\"check_freq\"] = False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_np_df_left.return.pd_DataFrame_dict_idx_idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_np_df_left.return.pd_DataFrame_dict_idx_idx", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["imports", "df_left"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\n\n\n# Fixtures\n# ========\n@pytest.fixture\ndef df_left():\n # Create frame with 10 partitions\n # Frame has 11 distinct idx values\n partition_sizes = np.array([3, 4, 2, 5, 3, 2, 5, 9, 4, 7, 4])\n idx = [i for i, s in enumerate(partition_sizes) for _ in range(s)]\n k = [i for s in partition_sizes for i in range(s)]\n vi = range(len(k))\n\n return pd.DataFrame(dict(idx=idx, k=k, v1=vi)).set_index([\"idx\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_double_bcast_left_test_merge_known_to_double_bcast_left.result_head_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_double_bcast_left_test_merge_known_to_double_bcast_left.result_head_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 207, "end_line": 227, "span_ids": ["test_merge_known_to_double_bcast_left"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"right\"])\n@pytest.mark.parametrize(\"broadcast\", [True, 0.75])\ndef test_merge_known_to_double_bcast_left(\n df_left, df_right, ddf_left_double, ddf_right, on, shuffle_method, how, broadcast\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left_double.merge(\n ddf_right, on=on, how=how, broadcast=broadcast, shuffle=shuffle_method\n )\n\n # Assertions\n assert_eq(result, expected)\n # Hash join used in disk-shuffling doesn't preserve divisions.\n if shuffle_method == \"task\":\n assert_eq(result.divisions, ddf_right.divisions)\n\n # Check that culling works\n result.head(1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_column_with_nulls_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_column_with_nulls_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 217, "end_line": 234, "span_ids": ["test_merge_column_with_nulls"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"repartition\", [None, 4])\ndef test_merge_column_with_nulls(repartition):\n # See: https://github.com/dask/dask/issues/7558\n\n df1 = pd.DataFrame({\"a\": [\"0\", \"0\", None, None, None, None, \"5\", \"7\", \"15\", \"33\"]})\n df2 = pd.DataFrame({\"c\": [\"1\", \"2\", \"3\", \"4\"], \"b\": [\"0\", \"5\", \"7\", \"15\"]})\n df1_d = dd.from_pandas(df1, npartitions=4)\n df2_d = dd.from_pandas(df2, npartitions=3).set_index(\"b\")\n if repartition:\n df2_d = df2_d.repartition(repartition)\n\n pandas_result = df1.merge(\n df2.set_index(\"b\"), how=\"left\", left_on=\"a\", right_index=True\n )\n dask_result = df1_d.merge(df2_d, how=\"left\", left_on=\"a\", right_index=True)\n\n assert_eq(dask_result, pandas_result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_ignore_order_test_concat_ignore_order.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_ignore_order_test_concat_ignore_order.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2146, "end_line": 2167, "span_ids": ["test_concat_ignore_order"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ordered\", [True, False])\ndef test_concat_ignore_order(ordered):\n pdf1 = pd.DataFrame(\n {\n \"x\": pd.Categorical(\n [\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\"], ordered=ordered\n )\n }\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n pdf2 = pd.DataFrame(\n {\n \"x\": pd.Categorical(\n [\"c\", \"b\", \"a\"], categories=[\"c\", \"b\", \"a\"], ordered=ordered\n )\n }\n )\n ddf2 = dd.from_pandas(pdf2, 2)\n expected = pd.concat([pdf1, pdf2])\n expected[\"x\"] = expected[\"x\"].astype(\"category\")\n result = dd.concat([ddf1, ddf2], ignore_order=True)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_does_not_raise_setting_with_copy_warning_test_categorical_merge_does_not_raise_setting_with_copy_warning.assert_eq_df1_join_df2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_does_not_raise_setting_with_copy_warning_test_categorical_merge_does_not_raise_setting_with_copy_warning.assert_eq_df1_join_df2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2252, "end_line": 2260, "span_ids": ["test_categorical_merge_does_not_raise_setting_with_copy_warning"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_merge_does_not_raise_setting_with_copy_warning():\n # https://github.com/dask/dask/issues/7087\n df1 = pd.DataFrame(data={\"A\": [\"a\", \"b\", \"c\"]}, index=[\"s\", \"v\", \"w\"])\n df2 = pd.DataFrame(data={\"B\": [\"t\", \"d\", \"i\"]}, index=[\"v\", \"w\", \"r\"])\n\n ddf1 = dd.from_pandas(df1, npartitions=1)\n\n df2 = df2.astype({\"B\": \"category\"})\n assert_eq(df1.join(df2), ddf1.join(df2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_numeric.py_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_numeric.py_np_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_numeric.py", "file_name": "test_numeric.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 58, "span_ids": ["test_to_numeric_on_dask_dataframe_series", "test_to_numeric_on_scalars", "test_to_numeric_on_dask_array", "test_to_numeric_on_dask_dataframe_series_with_meta", "imports", "test_to_numeric_raises", "test_to_numeric_on_dask_dataframe_dataframe_raises_error"], "tokens": 504}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom dask.array import Array, from_array\nfrom dask.dataframe import Series, from_pandas, to_numeric\nfrom dask.delayed import Delayed\n\n\n@pytest.mark.parametrize(\"arg\", [\"5\", 5, \"5 \"])\ndef test_to_numeric_on_scalars(arg):\n output = to_numeric(arg)\n assert isinstance(output, Delayed)\n assert output.compute() == 5\n\n\ndef test_to_numeric_on_dask_array():\n arg = from_array([\"1.0\", \"2\", \"-3\", \"5.1\"])\n expected = np.array([1.0, 2.0, -3.0, 5.1])\n output = to_numeric(arg)\n assert isinstance(output, Array)\n assert list(output.compute()) == list(expected)\n\n\ndef test_to_numeric_on_dask_dataframe_series():\n s = pd.Series([\"1.0\", \"2\", -3, -5.1])\n arg = from_pandas(s, npartitions=2)\n expected = pd.to_numeric(s)\n output = to_numeric(arg)\n assert output.dtype == \"int64\"\n assert isinstance(output, Series)\n assert list(output.compute()) == list(expected)\n\n\ndef test_to_numeric_on_dask_dataframe_series_with_meta():\n s = pd.Series([\"1.0\", \"2\", -3, -5.1])\n arg = from_pandas(s, npartitions=2)\n expected = pd.to_numeric(s)\n output = to_numeric(arg, meta=pd.Series([], dtype=\"float64\"))\n assert output.dtype == \"float64\"\n assert isinstance(output, Series)\n assert list(output.compute()) == list(expected)\n\n\ndef test_to_numeric_on_dask_dataframe_dataframe_raises_error():\n s = pd.Series([\"1.0\", \"2\", -3, -5.1])\n df = pd.DataFrame({\"a\": s, \"b\": s})\n arg = from_pandas(df, npartitions=2)\n with pytest.raises(TypeError, match=\"arg must be a list, tuple, dask.\"):\n to_numeric(arg)\n\n\ndef test_to_numeric_raises():\n with pytest.raises(ValueError, match=\"invalid error value\"):\n to_numeric(\"10\", errors=\"invalid\")\n with pytest.raises(KeyError, match=\"``meta`` is not allowed\"):\n to_numeric(\"10\", meta=pd.Series([], dtype=\"float64\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_pd_dfs.list_dsk_values_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_optimize_dataframe.py_pd_dfs.list_dsk_values_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_optimize_dataframe.py", "file_name": "test_optimize_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 11, "span_ids": ["imports"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pandas as pd\n\nimport dask\nimport dask.dataframe as dd\n\ndsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n}\ndfs = list(dsk.values())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_test_set_index_interpolate.assert_d2_divisions_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_test_set_index_interpolate.assert_d2_divisions_3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 635, "end_line": 661, "span_ids": ["test_set_index_interpolate"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"engine\", [\"pandas\", pytest.param(\"cudf\", marks=pytest.mark.gpu)]\n)\ndef test_set_index_interpolate(engine):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n df = pd.DataFrame({\"x\": [4, 1, 1, 3, 3], \"y\": [1.0, 1, 1, 1, 2]})\n\n if engine == \"cudf\":\n gdf = cudf.from_pandas(df)\n d = dask_cudf.from_cudf(gdf, npartitions=3)\n else:\n d = dd.from_pandas(df, 2)\n\n d1 = d.set_index(\"x\", npartitions=3)\n assert d1.npartitions == 3\n assert set(d1.divisions) == {1, 2, 4}\n\n d2 = d.set_index(\"y\", npartitions=3)\n assert d2.divisions[0] == 1.0\n assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0\n assert d2.divisions[3] == 2.0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_int_test_set_index_interpolate_int.assert_all_np_issubdtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_int_test_set_index_interpolate_int.assert_all_np_issubdtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 664, "end_line": 685, "span_ids": ["test_set_index_interpolate_int"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"engine\", [\"pandas\", pytest.param(\"cudf\", marks=pytest.mark.gpu)]\n)\ndef test_set_index_interpolate_int(engine):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n L = sorted(list(range(0, 200, 10)) * 2)\n df = pd.DataFrame({\"x\": 2 * L})\n\n if engine == \"cudf\":\n gdf = cudf.from_pandas(df)\n d = dask_cudf.from_cudf(gdf, npartitions=2)\n else:\n d = dd.from_pandas(df, 2)\n\n d1 = d.set_index(\"x\", npartitions=10)\n assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_large_uint_test_set_index_interpolate_large_uint.assert_set_d1_divisions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_interpolate_large_uint_test_set_index_interpolate_large_uint.assert_set_d1_divisions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 688, "end_line": 712, "span_ids": ["test_set_index_interpolate_large_uint"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"engine\", [\"pandas\", pytest.param(\"cudf\", marks=pytest.mark.gpu)]\n)\ndef test_set_index_interpolate_large_uint(engine):\n if engine == \"cudf\":\n # NOTE: engine == \"cudf\" requires cudf/dask_cudf,\n # will be skipped by non-GPU CI.\n\n cudf = pytest.importorskip(\"cudf\")\n dask_cudf = pytest.importorskip(\"dask_cudf\")\n\n \"\"\"This test is for #7304\"\"\"\n df = pd.DataFrame(\n {\"x\": np.array([612509347682975743, 616762138058293247], dtype=np.uint64)}\n )\n\n if engine == \"cudf\":\n gdf = cudf.from_pandas(df)\n d = dask_cudf.from_cudf(gdf, npartitions=2)\n else:\n d = dd.from_pandas(df, 1)\n\n d1 = d.set_index(\"x\", npartitions=1)\n assert d1.npartitions == 1\n assert set(d1.divisions) == {612509347682975743, 616762138058293247}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_serialize_test_shuffle_hlg_layer_serialize.for_layer_in_dsk_layers_v.assert_layer_roundtrip_ke": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_hlg_layer_serialize_test_shuffle_hlg_layer_serialize.for_layer_in_dsk_layers_v.assert_layer_roundtrip_ke", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1192, "end_line": 1216, "span_ids": ["test_shuffle_hlg_layer_serialize"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npartitions\",\n [\n 10, # ShuffleLayer\n 1, # SimpleShuffleLayer\n ],\n)\ndef test_shuffle_hlg_layer_serialize(npartitions):\n ddf = dd.from_pandas(\n pd.DataFrame({\"a\": np.random.randint(0, 10, 100)}), npartitions=npartitions\n )\n # Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks\n ddf_shuffled = ddf.shuffle(\"a\", max_branch=3, shuffle=\"tasks\")\n\n # Ensure shuffle layers can be serialized and don't result in\n # the underlying low-level graph being materialized\n dsk = ddf_shuffled.__dask_graph__()\n for layer in dsk.layers.values():\n if not isinstance(layer, dd.shuffle.SimpleShuffleLayer):\n continue\n assert not hasattr(layer, \"_cached_dict\")\n layer_roundtrip = pickle.loads(pickle.dumps(layer))\n assert type(layer_roundtrip) == type(layer)\n assert not hasattr(layer_roundtrip, \"_cached_dict\")\n assert layer_roundtrip.keys() == layer.keys()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_visualize_visualize.return.p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/profile_visualize.py_visualize_visualize.return.p", "embedding": null, "metadata": {"file_path": "dask/diagnostics/profile_visualize.py", "file_name": "profile_visualize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 156, "end_line": 231, "span_ids": ["visualize"], "tokens": 561}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def visualize(\n profilers, filename=\"profile.html\", show=True, save=None, mode=None, **kwargs\n):\n \"\"\"Visualize the results of profiling in a bokeh plot.\n\n If multiple profilers are passed in, the plots are stacked vertically.\n\n Parameters\n ----------\n profilers : profiler or list\n Profiler or list of profilers.\n filename : string, optional\n Name of the plot output file.\n show : boolean, optional\n If True (default), the plot is opened in a browser.\n save : boolean, optional\n If True (default when not in notebook), the plot is saved to disk.\n mode : str, optional\n Mode passed to bokeh.output_file()\n **kwargs\n Other keyword arguments, passed to bokeh.figure. These will override\n all defaults set by visualize.\n\n Returns\n -------\n The completed bokeh plot object.\n \"\"\"\n bp = import_required(\"bokeh.plotting\", _BOKEH_MISSING_MSG)\n from bokeh.io import state\n\n if \"file_path\" in kwargs:\n warnings.warn(\n \"The file_path keyword argument is deprecated \"\n \"and will be removed in a future release. \"\n \"Please use filename instead.\",\n category=FutureWarning,\n stacklevel=2,\n )\n filename = kwargs.pop(\"file_path\")\n\n if save is None:\n save = not state.curstate().notebook\n\n if not isinstance(profilers, list):\n profilers = [profilers]\n figs = [prof._plot(**kwargs) for prof in profilers]\n # Stack the plots\n if len(figs) == 1:\n p = figs[0]\n else:\n top = figs[0]\n for f in figs[1:]:\n f.x_range = top.x_range\n f.title = None\n f.min_border_top = 20\n if BOKEH_VERSION().major < 3:\n f.plot_height -= 30\n else:\n f.height -= 30\n for f in figs[:-1]:\n f.xaxis.axis_label = None\n f.min_border_bottom = 20\n if BOKEH_VERSION().major < 3:\n f.plot_height -= 30\n else:\n f.height -= 30\n for f in figs:\n f.min_border_left = 75\n f.min_border_right = 75\n p = bp.gridplot([[f] for f in figs])\n if show:\n bp.show(p)\n if save:\n bp.output_file(filename, mode=mode)\n bp.save(p)\n return p", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_checkpoint_checkpoint.if_len_collections_1_.else_.return.delayed_chunks_checkpoint": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_checkpoint_checkpoint.if_len_collections_1_.else_.return.delayed_chunks_checkpoint", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 29, "end_line": 68, "span_ids": ["checkpoint"], "tokens": 354}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def checkpoint(\n *collections,\n split_every: float | Literal[False] | None = None,\n) -> Delayed:\n \"\"\"Build a :doc:`delayed` which waits until all chunks of the input collection(s)\n have been computed before returning None.\n\n Parameters\n ----------\n collections\n Zero or more Dask collections or nested data structures containing zero or more\n collections\n split_every: int >= 2 or False, optional\n Determines the depth of the recursive aggregation. If greater than the number of\n input keys, the aggregation will be performed in multiple steps; the depth of\n the aggregation graph will be :math:`log_{split_every}(input keys)`. Setting to\n a low value can reduce cache size and network transfers, at the cost of more CPU\n and a larger dask graph.\n\n Set to False to disable. Defaults to 8.\n\n Returns\n -------\n :doc:`delayed` yielding None\n \"\"\"\n if split_every is None:\n # FIXME https://github.com/python/typeshed/issues/5074\n split_every = 8 # type: ignore\n elif split_every is not False:\n split_every = int(split_every) # type: ignore\n if split_every < 2: # type: ignore\n raise ValueError(\"split_every must be False, None, or >= 2\")\n\n collections, _ = unpack_collections(*collections)\n if len(collections) == 1:\n return _checkpoint_one(collections[0], split_every)\n else:\n return delayed(chunks.checkpoint)(\n *(_checkpoint_one(c, split_every) for c in collections)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__checkpoint_one__checkpoint_one.return.Delayed_name_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__checkpoint_one__checkpoint_one.return.Delayed_name_dsk_", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 125, "span_ids": ["_checkpoint_one"], "tokens": 400}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _checkpoint_one(collection, split_every) -> Delayed:\n tok = tokenize(collection)\n name = \"checkpoint-\" + tok\n\n keys_iter = flatten(collection.__dask_keys__())\n try:\n next(keys_iter)\n next(keys_iter)\n except StopIteration:\n # Collection has 0 or 1 keys; no need for a map step\n layer = {name: (chunks.checkpoint, collection.__dask_keys__())}\n dsk = HighLevelGraph.from_collections(name, layer, dependencies=(collection,))\n return Delayed(name, dsk)\n\n # Collection has 2+ keys; apply a two-step map->reduce algorithm so that we\n # transfer over the network and store in RAM only a handful of None's instead of\n # the full computed collection's contents\n dsks = []\n map_names = set()\n map_keys = []\n\n for prev_name in get_collection_names(collection):\n map_name = \"checkpoint_map-\" + tokenize(prev_name, tok)\n map_names.add(map_name)\n map_layer = _build_map_layer(chunks.checkpoint, prev_name, map_name, collection)\n map_keys += list(map_layer.get_output_keys())\n dsks.append(\n HighLevelGraph.from_collections(\n map_name, map_layer, dependencies=(collection,)\n )\n )\n\n # recursive aggregation\n reduce_layer: dict = {}\n while split_every and len(map_keys) > split_every:\n k = (name, len(reduce_layer))\n reduce_layer[k] = (chunks.checkpoint, map_keys[:split_every])\n map_keys = map_keys[split_every:] + [k]\n reduce_layer[name] = (chunks.checkpoint, map_keys)\n\n dsks.append(HighLevelGraph({name: reduce_layer}, dependencies={name: map_names}))\n dsk = HighLevelGraph.merge(*dsks)\n\n return Delayed(name, dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.get_dependencies_Layer.__dask_distributed_annotations_pack__.return.packed": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.get_dependencies_Layer.__dask_distributed_annotations_pack__.return.packed", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 149, "end_line": 192, "span_ids": ["Layer.get_dependencies", "Layer.__dask_distributed_annotations_pack__"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n def get_dependencies(self, key: Hashable, all_hlg_keys: Iterable) -> set:\n \"\"\"Get dependencies of `key` in the layer\n\n Parameters\n ----------\n key: Hashable\n The key to find dependencies of\n all_hlg_keys: Iterable\n All keys in the high level graph.\n\n Returns\n -------\n deps: set\n A set of dependencies\n \"\"\"\n return keys_in_tasks(all_hlg_keys, [self[key]])\n\n def __dask_distributed_annotations_pack__(\n self, annotations: Mapping[str, Any] | None = None\n ) -> Mapping[str, Any] | None:\n \"\"\"Packs Layer annotations for transmission to scheduler\n\n Callables annotations are fully expanded over Layer keys, while\n other values are simply transmitted as is\n\n Parameters\n ----------\n annotations : Mapping[str, Any], optional\n A top-level annotations.\n\n Returns\n -------\n packed_annotations : dict\n Packed annotations.\n \"\"\"\n annotations = toolz.merge(self.annotations or {}, annotations or {})\n packed = {}\n for a, v in annotations.items():\n if callable(v):\n packed[a] = {stringify(k): v(k) for k in self}\n packed[a][\"__expanded_annotations__\"] = True\n else:\n packed[a] = v\n return packed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.get_all_external_keys_HighLevelGraph.get_all_external_keys.try_.except_AttributeError_.return.keys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.get_all_external_keys_HighLevelGraph.get_all_external_keys.try_.except_AttributeError_.return.keys", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 723, "end_line": 744, "span_ids": ["HighLevelGraph.get_all_external_keys"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def get_all_external_keys(self) -> set:\n \"\"\"Get all output keys of all layers\n\n This will in most cases _not_ materialize any layers, which makes\n it a relative cheap operation.\n\n Returns\n -------\n keys: set\n A set of all external keys\n \"\"\"\n try:\n return self._all_external_keys\n except AttributeError:\n keys: set = set()\n for layer in self.layers.values():\n # Note: don't use `keys |= ...`, because the RHS is a\n # collections.abc.Set rather than a real set, and this will\n # cause a whole new set to be constructed.\n keys.update(layer.get_output_keys())\n self._all_external_keys = keys\n return keys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.items_HighLevelGraph.get_all_dependencies.return.self_key_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.items_HighLevelGraph.get_all_dependencies.return.self_key_dependencies", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 784, "end_line": 807, "span_ids": ["HighLevelGraph.get_all_dependencies", "HighLevelGraph.items", "HighLevelGraph.values"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def items(self):\n return self.to_dict().items()\n\n def values(self):\n return self.to_dict().values()\n\n def get_all_dependencies(self) -> dict[Hashable, Set]:\n \"\"\"Get dependencies of all keys\n\n This will in most cases materialize all layers, which makes\n it an expensive operation.\n\n Returns\n -------\n map: Mapping\n A map that maps each key to its dependencies\n \"\"\"\n all_keys = self.keys()\n missing_keys = all_keys - self.key_dependencies.keys()\n if missing_keys:\n for layer in self.layers.values():\n for k in missing_keys & layer.keys():\n self.key_dependencies[k] = layer.get_dependencies(k, all_keys)\n return self.key_dependencies", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__dask_distributed_pack___HighLevelGraph.__dask_distributed_pack__.return._layers_layers_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__dask_distributed_pack___HighLevelGraph.__dask_distributed_pack__.return._layers_layers_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1013, "end_line": 1057, "span_ids": ["HighLevelGraph.__dask_distributed_pack__"], "tokens": 296}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def __dask_distributed_pack__(\n self,\n client,\n client_keys: Iterable[Hashable],\n annotations: Mapping[str, Any] = None,\n ) -> dict:\n \"\"\"Pack the high level graph for Scheduler -> Worker communication\n\n The approach is to delegate the packaging to each layer in the high level graph\n by calling .__dask_distributed_pack__() and .__dask_distributed_annotations_pack__()\n on each layer.\n\n Parameters\n ----------\n client : distributed.Client\n The client calling this function.\n client_keys : Iterable[Hashable]\n List of keys requested by the client.\n annotations : Mapping[str, Any], optional\n A top-level annotations.\n\n Returns\n -------\n data: dict\n Packed high level graph layers\n \"\"\"\n # Dump each layer (in topological order)\n layers = []\n for layer in (self.layers[name] for name in self._toposort_layers()):\n layers.append(\n {\n \"__module__\": layer.__module__,\n \"__name__\": type(layer).__name__,\n \"state\": layer.__dask_distributed_pack__(\n self.get_all_external_keys(),\n self.key_dependencies,\n client,\n client_keys,\n ),\n \"annotations\": layer.__dask_distributed_annotations_pack__(\n annotations\n ),\n }\n )\n return {\"layers\": layers}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer.__reduce___SimpleShuffleLayer.__dask_distributed_pack__.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer.__reduce___SimpleShuffleLayer.__dask_distributed_pack__.return._", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 511, "end_line": 539, "span_ids": ["SimpleShuffleLayer.__dask_distributed_pack__", "SimpleShuffleLayer.__reduce__"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n\n def __reduce__(self):\n attrs = [\n \"name\",\n \"column\",\n \"npartitions\",\n \"npartitions_input\",\n \"ignore_index\",\n \"name_input\",\n \"meta_input\",\n \"parts_out\",\n \"annotations\",\n ]\n return (SimpleShuffleLayer, tuple(getattr(self, attr) for attr in attrs))\n\n def __dask_distributed_pack__(\n self, all_hlg_keys, known_key_dependencies, client, client_keys\n ):\n from distributed.protocol.serialize import to_serialize\n\n return {\n \"name\": self.name,\n \"column\": self.column,\n \"npartitions\": self.npartitions,\n \"npartitions_input\": self.npartitions_input,\n \"ignore_index\": self.ignore_index,\n \"name_input\": self.name_input,\n \"meta_input\": to_serialize(self.meta_input),\n \"parts_out\": list(self.parts_out),\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer.__dask_distributed_unpack___SimpleShuffleLayer.__dask_distributed_unpack__.return._dsk_toolz_valmap_dump": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer.__dask_distributed_unpack___SimpleShuffleLayer.__dask_distributed_unpack__.return._dsk_toolz_valmap_dump", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 541, "end_line": 564, "span_ids": ["SimpleShuffleLayer.__dask_distributed_unpack__"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n\n @classmethod\n def __dask_distributed_unpack__(cls, state, dsk, dependencies):\n from distributed.worker import dumps_task\n\n # msgpack will convert lists into tuples, here\n # we convert them back to lists\n if isinstance(state[\"column\"], tuple):\n state[\"column\"] = list(state[\"column\"])\n if \"inputs\" in state:\n state[\"inputs\"] = list(state[\"inputs\"])\n\n # Materialize the layer\n layer_dsk = cls(**state)._construct_graph(deserializing=True)\n\n # Convert all keys to strings and dump tasks\n layer_dsk = {\n stringify(k): stringify_collection_keys(v) for k, v in layer_dsk.items()\n }\n keys = layer_dsk.keys() | dsk.keys()\n\n # TODO: use shuffle-knowledge to calculate dependencies more efficiently\n deps = {k: keys_in_tasks(keys, [v]) for k, v in layer_dsk.items()}\n\n return {\"dsk\": toolz.valmap(dumps_task, layer_dsk), \"deps\": deps}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer._construct_graph_SimpleShuffleLayer._construct_graph.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer._construct_graph_SimpleShuffleLayer._construct_graph.return.dsk", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 566, "end_line": 612, "span_ids": ["SimpleShuffleLayer._construct_graph"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n\n def _construct_graph(self, deserializing=False):\n \"\"\"Construct graph for a simple shuffle operation.\"\"\"\n\n shuffle_group_name = \"group-\" + self.name\n\n if deserializing:\n # Use CallableLazyImport objects to avoid importing dataframe\n # module on the scheduler\n concat_func = CallableLazyImport(\"dask.dataframe.core._concat\")\n shuffle_group_func = CallableLazyImport(\n \"dask.dataframe.shuffle.shuffle_group\"\n )\n else:\n # Not running on distributed scheduler - Use explicit functions\n from dask.dataframe.core import _concat as concat_func\n from dask.dataframe.shuffle import shuffle_group as shuffle_group_func\n\n dsk = {}\n for part_out in self.parts_out:\n _concat_list = [\n (self.split_name, part_out, part_in)\n for part_in in range(self.npartitions_input)\n ]\n dsk[(self.name, part_out)] = (\n concat_func,\n _concat_list,\n self.ignore_index,\n )\n for _, _part_out, _part_in in _concat_list:\n dsk[(self.split_name, _part_out, _part_in)] = (\n operator.getitem,\n (shuffle_group_name, _part_in),\n _part_out,\n )\n if (shuffle_group_name, _part_in) not in dsk:\n dsk[(shuffle_group_name, _part_in)] = (\n shuffle_group_func,\n (self.name_input, _part_in),\n self.column,\n 0,\n self.npartitions,\n self.npartitions,\n self.ignore_index,\n self.npartitions,\n )\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ShuffleLayer_ShuffleLayer.__dask_distributed_pack__.return.ret": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ShuffleLayer_ShuffleLayer.__dask_distributed_pack__.return.ret", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 633, "end_line": 745, "span_ids": ["ShuffleLayer.get_split_keys", "ShuffleLayer.__init__", "ShuffleLayer", "ShuffleLayer.__repr__", "ShuffleLayer.__dask_distributed_pack__", "ShuffleLayer.__reduce__"], "tokens": 710}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ShuffleLayer(SimpleShuffleLayer):\n \"\"\"Shuffle-stage HighLevelGraph layer\n\n High-level graph layer corresponding to a single stage of\n a multi-stage inter-partition shuffle operation.\n\n Stage: (shuffle-group) -> (shuffle-split) -> (shuffle-join)\n\n Parameters\n ----------\n name : str\n Name of new (partially) shuffled collection.\n column : str or list of str\n Column(s) to be used to map rows to output partitions (by hashing).\n inputs : list of tuples\n Each tuple dictates the data movement for a specific partition.\n stage : int\n Index of the current shuffle stage.\n npartitions : int\n Number of output partitions for the full (multi-stage) shuffle.\n npartitions_input : int\n Number of partitions in the original (un-shuffled) DataFrame.\n k : int\n A partition is split into this many groups during each stage.\n ignore_index: bool, default False\n Ignore index during shuffle. If ``True``, performance may improve,\n but index values will not be preserved.\n name_input : str\n Name of input collection.\n meta_input : pd.DataFrame-like object\n Empty metadata of input collection.\n parts_out : list of int (optional)\n List of required output-partition indices.\n annotations : dict (optional)\n Layer annotations\n \"\"\"\n\n def __init__(\n self,\n name,\n column,\n inputs,\n stage,\n npartitions,\n npartitions_input,\n nsplits,\n ignore_index,\n name_input,\n meta_input,\n parts_out=None,\n annotations=None,\n ):\n self.inputs = inputs\n self.stage = stage\n self.nsplits = nsplits\n super().__init__(\n name,\n column,\n npartitions,\n npartitions_input,\n ignore_index,\n name_input,\n meta_input,\n parts_out=parts_out or range(len(inputs)),\n annotations=annotations,\n )\n\n def get_split_keys(self):\n # Return ShuffleLayer \"split\" keys\n keys = []\n for part in self.parts_out:\n out = self.inputs[part]\n for i in range(self.nsplits):\n keys.append(\n stringify(\n (\n self.split_name,\n out[self.stage],\n insert(out, self.stage, i),\n )\n )\n )\n return keys\n\n def __repr__(self):\n return \"ShuffleLayer\".format(\n self.name, self.stage, self.nsplits, self.npartitions\n )\n\n def __reduce__(self):\n attrs = [\n \"name\",\n \"column\",\n \"inputs\",\n \"stage\",\n \"npartitions\",\n \"npartitions_input\",\n \"nsplits\",\n \"ignore_index\",\n \"name_input\",\n \"meta_input\",\n \"parts_out\",\n \"annotations\",\n ]\n\n return (ShuffleLayer, tuple(getattr(self, attr) for attr in attrs))\n\n def __dask_distributed_pack__(self, *args, **kwargs):\n ret = super().__dask_distributed_pack__(*args, **kwargs)\n ret[\"inputs\"] = self.inputs\n ret[\"stage\"] = self.stage\n ret[\"nsplits\"] = self.nsplits\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ShuffleLayer._cull_dependencies_ShuffleLayer._cull.return.ShuffleLayer_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ShuffleLayer._cull_dependencies_ShuffleLayer._cull.return.ShuffleLayer_", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 468, "end_line": 500, "span_ids": ["ShuffleLayer._cull_dependencies", "ShuffleLayer._cull"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ShuffleLayer(SimpleShuffleLayer):\n\n def _cull_dependencies(self, keys, parts_out=None):\n \"\"\"Determine the necessary dependencies to produce `keys`.\n\n Does not require graph materialization.\n \"\"\"\n deps = defaultdict(set)\n parts_out = parts_out or self._keys_to_parts(keys)\n inp_part_map = {inp: i for i, inp in enumerate(self.inputs)}\n for part in parts_out:\n out = self.inputs[part]\n for k in range(self.nsplits):\n _inp = insert(out, self.stage, k)\n _part = inp_part_map[_inp]\n if self.stage == 0 and _part >= self.npartitions_input:\n deps[(self.name, part)].add((\"group-\" + self.name, _inp, \"empty\"))\n else:\n deps[(self.name, part)].add((self.name_input, _part))\n return deps\n\n def _cull(self, parts_out):\n return ShuffleLayer(\n self.name,\n self.column,\n self.inputs,\n self.stage,\n self.npartitions,\n self.npartitions_input,\n self.nsplits,\n self.ignore_index,\n self.name_input,\n self.meta_input,\n parts_out=parts_out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ShuffleLayer._construct_graph_ShuffleLayer._construct_graph.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ShuffleLayer._construct_graph_ShuffleLayer._construct_graph.return.dsk", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 781, "end_line": 852, "span_ids": ["ShuffleLayer._construct_graph"], "tokens": 561}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ShuffleLayer(SimpleShuffleLayer):\n\n def _construct_graph(self, deserializing=False):\n \"\"\"Construct graph for a \"rearrange-by-column\" stage.\"\"\"\n\n shuffle_group_name = \"group-\" + self.name\n\n if deserializing:\n # Use CallableLazyImport objects to avoid importing dataframe\n # module on the scheduler\n concat_func = CallableLazyImport(\"dask.dataframe.core._concat\")\n shuffle_group_func = CallableLazyImport(\n \"dask.dataframe.shuffle.shuffle_group\"\n )\n else:\n # Not running on distributed scheduler - Use explicit functions\n from dask.dataframe.core import _concat as concat_func\n from dask.dataframe.shuffle import shuffle_group as shuffle_group_func\n\n dsk = {}\n inp_part_map = {inp: i for i, inp in enumerate(self.inputs)}\n for part in self.parts_out:\n\n out = self.inputs[part]\n\n _concat_list = [] # get_item tasks to concat for this output partition\n for i in range(self.nsplits):\n # Get out each individual dataframe piece from the dicts\n _inp = insert(out, self.stage, i)\n _idx = out[self.stage]\n _concat_list.append((self.split_name, _idx, _inp))\n\n # concatenate those pieces together, with their friends\n dsk[(self.name, part)] = (\n concat_func,\n _concat_list,\n self.ignore_index,\n )\n\n for _, _idx, _inp in _concat_list:\n dsk[(self.split_name, _idx, _inp)] = (\n operator.getitem,\n (shuffle_group_name, _inp),\n _idx,\n )\n\n if (shuffle_group_name, _inp) not in dsk:\n\n # Initial partitions (output of previous stage)\n _part = inp_part_map[_inp]\n if self.stage == 0:\n if _part < self.npartitions_input:\n input_key = (self.name_input, _part)\n else:\n # In order to make sure that to_serialize() serialize the\n # empty dataframe input, we add it as a key.\n input_key = (shuffle_group_name, _inp, \"empty\")\n dsk[input_key] = self.meta_input\n else:\n input_key = (self.name_input, _part)\n\n # Convert partition into dict of dataframe pieces\n dsk[(shuffle_group_name, _inp)] = (\n shuffle_group_func,\n input_key,\n self.column,\n self.stage,\n self.nsplits,\n self.npartitions_input,\n self.ignore_index,\n self.npartitions,\n )\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer_BroadcastJoinLayer.__len__.return.len_self__dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer_BroadcastJoinLayer.__len__.return.len_self__dict_", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 837, "end_line": 921, "span_ids": ["BroadcastJoinLayer", "BroadcastJoinLayer.__init__", "BroadcastJoinLayer.__len__", "BroadcastJoinLayer.__repr__", "BroadcastJoinLayer._dict", "BroadcastJoinLayer.get_output_keys", "BroadcastJoinLayer.__iter__", "BroadcastJoinLayer.__getitem__", "BroadcastJoinLayer.is_materialized"], "tokens": 601}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BroadcastJoinLayer(Layer):\n \"\"\"Broadcast-based Join Layer\n\n High-level graph layer for a join operation requiring the\n smaller collection to be broadcasted to every partition of\n the larger collection.\n\n Parameters\n ----------\n name : str\n Name of new (joined) output collection.\n lhs_name: string\n \"Left\" DataFrame collection to join.\n lhs_npartitions: int\n Number of partitions in \"left\" DataFrame collection.\n rhs_name: string\n \"Right\" DataFrame collection to join.\n rhs_npartitions: int\n Number of partitions in \"right\" DataFrame collection.\n parts_out : list of int (optional)\n List of required output-partition indices.\n annotations : dict (optional)\n Layer annotations.\n **merge_kwargs : **dict\n Keyword arguments to be passed to chunkwise merge func.\n \"\"\"\n\n def __init__(\n self,\n name,\n npartitions,\n lhs_name,\n lhs_npartitions,\n rhs_name,\n rhs_npartitions,\n parts_out=None,\n annotations=None,\n **merge_kwargs,\n ):\n super().__init__(annotations=annotations)\n self.name = name\n self.npartitions = npartitions\n self.lhs_name = lhs_name\n self.lhs_npartitions = lhs_npartitions\n self.rhs_name = rhs_name\n self.rhs_npartitions = rhs_npartitions\n self.parts_out = parts_out or set(range(self.npartitions))\n self.merge_kwargs = merge_kwargs\n self.how = self.merge_kwargs.get(\"how\")\n self.left_on = self.merge_kwargs.get(\"left_on\")\n self.right_on = self.merge_kwargs.get(\"right_on\")\n if isinstance(self.left_on, list):\n self.left_on = (list, tuple(self.left_on))\n if isinstance(self.right_on, list):\n self.right_on = (list, tuple(self.right_on))\n\n def get_output_keys(self):\n return {(self.name, part) for part in self.parts_out}\n\n def __repr__(self):\n return \"BroadcastJoinLayer\".format(\n self.name, self.how, self.lhs_name, self.rhs_name\n )\n\n def is_materialized(self):\n return hasattr(self, \"_cached_dict\")\n\n @property\n def _dict(self):\n \"\"\"Materialize full dict representation\"\"\"\n if hasattr(self, \"_cached_dict\"):\n return self._cached_dict\n else:\n dsk = self._construct_graph()\n self._cached_dict = dsk\n return self._cached_dict\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self):\n return len(self._dict)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer.__dask_distributed_pack___BroadcastJoinLayer.__dask_distributed_pack__.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer.__dask_distributed_pack___BroadcastJoinLayer.__dask_distributed_pack__.return._", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 923, "end_line": 944, "span_ids": ["BroadcastJoinLayer.__dask_distributed_pack__"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BroadcastJoinLayer(Layer):\n\n def __dask_distributed_pack__(self, *args, **kwargs):\n import pickle\n\n # Pickle complex merge_kwargs elements. Also\n # tuples, which may be confused with keys.\n _merge_kwargs = {}\n for k, v in self.merge_kwargs.items():\n if not isinstance(v, (str, list, bool)):\n _merge_kwargs[k] = pickle.dumps(v)\n else:\n _merge_kwargs[k] = v\n\n return {\n \"name\": self.name,\n \"npartitions\": self.npartitions,\n \"lhs_name\": self.lhs_name,\n \"lhs_npartitions\": self.lhs_npartitions,\n \"rhs_name\": self.rhs_name,\n \"rhs_npartitions\": self.rhs_npartitions,\n \"parts_out\": self.parts_out,\n \"merge_kwargs\": _merge_kwargs,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer.__dask_distributed_unpack___BroadcastJoinLayer.__dask_distributed_unpack__.return._dsk_toolz_valmap_dump": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer.__dask_distributed_unpack___BroadcastJoinLayer.__dask_distributed_unpack__.return._dsk_toolz_valmap_dump", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 946, "end_line": 962, "span_ids": ["BroadcastJoinLayer.__dask_distributed_unpack__"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BroadcastJoinLayer(Layer):\n\n @classmethod\n def __dask_distributed_unpack__(cls, state, dsk, dependencies):\n from distributed.worker import dumps_task\n\n # Expand merge_kwargs\n merge_kwargs = state.pop(\"merge_kwargs\", {})\n state.update(merge_kwargs)\n\n # Materialize the layer\n raw = cls(**state)._construct_graph(deserializing=True)\n\n # Convert all keys to strings and dump tasks\n raw = {stringify(k): stringify_collection_keys(v) for k, v in raw.items()}\n keys = raw.keys() | dsk.keys()\n deps = {k: keys_in_tasks(keys, [v]) for k, v in raw.items()}\n\n return {\"dsk\": toolz.valmap(dumps_task, raw), \"deps\": deps}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._keys_to_parts_BroadcastJoinLayer._broadcast_plan.if_self_lhs_npartitions_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._keys_to_parts_BroadcastJoinLayer._broadcast_plan.if_self_lhs_npartitions_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 964, "end_line": 1001, "span_ids": ["BroadcastJoinLayer._broadcast_plan", "BroadcastJoinLayer._keys_to_parts"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BroadcastJoinLayer(Layer):\n\n def _keys_to_parts(self, keys):\n \"\"\"Simple utility to convert keys to partition indices.\"\"\"\n parts = set()\n for key in keys:\n try:\n _name, _part = key\n except ValueError:\n continue\n if _name != self.name:\n continue\n parts.add(_part)\n return parts\n\n @property\n def _broadcast_plan(self):\n # Return structure (tuple):\n # (\n # ,\n # ,\n # ,\n # ,\n # )\n if self.lhs_npartitions < self.rhs_npartitions:\n # Broadcasting the left\n return (\n self.lhs_name,\n self.lhs_npartitions,\n self.rhs_name,\n self.right_on,\n )\n else:\n # Broadcasting the right\n return (\n self.rhs_name,\n self.rhs_npartitions,\n self.lhs_name,\n self.left_on,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._cull_dependencies_BroadcastJoinLayer._cull_dependencies.return.deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._cull_dependencies_BroadcastJoinLayer._cull_dependencies.return.deps", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1003, "end_line": 1020, "span_ids": ["BroadcastJoinLayer._cull_dependencies"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BroadcastJoinLayer(Layer):\n\n def _cull_dependencies(self, keys, parts_out=None):\n \"\"\"Determine the necessary dependencies to produce `keys`.\n\n For a broadcast join, output partitions always depend on\n all partitions of the broadcasted collection, but only one\n partition of the \"other\" collecction.\n \"\"\"\n # Get broadcast info\n bcast_name, bcast_size, other_name = self._broadcast_plan[:3]\n\n deps = defaultdict(set)\n parts_out = parts_out or self._keys_to_parts(keys)\n for part in parts_out:\n deps[(self.name, part)] |= {(bcast_name, i) for i in range(bcast_size)}\n deps[(self.name, part)] |= {\n (other_name, part),\n }\n return deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._cull_BroadcastJoinLayer.cull.if_parts_out_set_self_.else_.return.self_culled_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._cull_BroadcastJoinLayer.cull.if_parts_out_set_self_.else_.return.self_culled_deps", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1022, "end_line": 1049, "span_ids": ["BroadcastJoinLayer.cull", "BroadcastJoinLayer._cull"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BroadcastJoinLayer(Layer):\n\n def _cull(self, parts_out):\n return BroadcastJoinLayer(\n self.name,\n self.npartitions,\n self.lhs_name,\n self.lhs_npartitions,\n self.rhs_name,\n self.rhs_npartitions,\n annotations=self.annotations,\n parts_out=parts_out,\n **self.merge_kwargs,\n )\n\n def cull(self, keys, all_keys):\n \"\"\"Cull a BroadcastJoinLayer HighLevelGraph layer.\n\n The underlying graph will only include the necessary\n tasks to produce the keys (indicies) included in `parts_out`.\n Therefore, \"culling\" the layer only requires us to reset this\n parameter.\n \"\"\"\n parts_out = self._keys_to_parts(keys)\n culled_deps = self._cull_dependencies(keys, parts_out=parts_out)\n if parts_out != set(self.parts_out):\n culled_layer = self._cull(parts_out)\n return culled_layer, culled_deps\n else:\n return self, culled_deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._construct_graph_BroadcastJoinLayer._construct_graph.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_BroadcastJoinLayer._construct_graph_BroadcastJoinLayer._construct_graph.return.dsk", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1051, "end_line": 1129, "span_ids": ["BroadcastJoinLayer._construct_graph"], "tokens": 667}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BroadcastJoinLayer(Layer):\n\n def _construct_graph(self, deserializing=False):\n \"\"\"Construct graph for a broadcast join operation.\"\"\"\n\n inter_name = \"inter-\" + self.name\n split_name = \"split-\" + self.name\n\n if deserializing:\n # Use CallableLazyImport objects to avoid importing dataframe\n # module on the scheduler\n split_partition_func = CallableLazyImport(\n \"dask.dataframe.multi._split_partition\"\n )\n concat_func = CallableLazyImport(\"dask.dataframe.multi._concat_wrapper\")\n merge_chunk_func = CallableLazyImport(\n \"dask.dataframe.multi._merge_chunk_wrapper\"\n )\n else:\n # Not running on distributed scheduler - Use explicit functions\n from dask.dataframe.multi import _concat_wrapper as concat_func\n from dask.dataframe.multi import _merge_chunk_wrapper as merge_chunk_func\n from dask.dataframe.multi import _split_partition as split_partition_func\n\n # Get broadcast \"plan\"\n bcast_name, bcast_size, other_name, other_on = self._broadcast_plan\n bcast_side = \"left\" if self.lhs_npartitions < self.rhs_npartitions else \"right\"\n\n # Loop over output partitions, which should be a 1:1\n # mapping with the input partitions of \"other\".\n # Culling should allow us to avoid generating tasks for\n # any output partitions that are not requested (via `parts_out`)\n dsk = {}\n for i in self.parts_out:\n\n # Split each \"other\" partition by hash\n if self.how != \"inner\":\n dsk[(split_name, i)] = (\n split_partition_func,\n (other_name, i),\n other_on,\n bcast_size,\n )\n\n # For each partition of \"other\", we need to join\n # to each partition of \"bcast\". If it is a \"left\"\n # or \"right\" join, there should be a unique mapping\n # between the local splits of \"other\" and the\n # partitions of \"bcast\" (which means we need an\n # additional `getitem` operation to isolate the\n # correct split of each \"other\" partition).\n _concat_list = []\n for j in range(bcast_size):\n # Specify arg list for `merge_chunk`\n _merge_args = [\n (\n operator.getitem,\n (split_name, i),\n j,\n )\n if self.how != \"inner\"\n else (other_name, i),\n (bcast_name, j),\n ]\n if bcast_side == \"left\":\n # If the left is broadcasted, the\n # arg list needs to be reversed\n _merge_args.reverse()\n inter_key = (inter_name, i, j)\n dsk[inter_key] = (\n apply,\n merge_chunk_func,\n _merge_args,\n self.merge_kwargs,\n )\n _concat_list.append(inter_key)\n\n # Concatenate the merged results for each output partition\n dsk[(self.name, i)] = (concat_func, _concat_list)\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameIOLayer_DataFrameIOLayer.__init__.super___init___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameIOLayer_DataFrameIOLayer.__init__.super___init___", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1132, "end_line": 1202, "span_ids": ["DataFrameIOLayer", "DataFrameIOLayer.__init__"], "tokens": 529}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrameIOLayer(Blockwise):\n \"\"\"DataFrame-based Blockwise Layer with IO\n\n Parameters\n ----------\n name : str\n Name to use for the constructed layer.\n columns : str, list or None\n Field name(s) to read in as columns in the output.\n inputs : list[tuple]\n List of arguments to be passed to ``io_func`` so\n that the materialized task to produce partition ``i``\n will be: ``(, inputs[i])``. Note that each\n element of ``inputs`` is typically a tuple of arguments.\n io_func : callable\n A callable function that takes in a single tuple\n of arguments, and outputs a DataFrame partition.\n label : str (optional)\n String to use as a prefix in the place-holder collection\n name. If nothing is specified (default), \"subset-\" will\n be used.\n produces_tasks : bool (optional)\n Whether one or more elements of `inputs` is expected to\n contain a nested task. This argument in only used for\n serialization purposes, and will be deprecated in the\n future. Default is False.\n creation_info: dict (optional)\n Dictionary containing the callable function ('func'),\n positional arguments ('args'), and key-word arguments\n ('kwargs') used to produce the dask collection with\n this underlying ``DataFrameIOLayer``.\n annotations: dict (optional)\n Layer annotations to pass through to Blockwise.\n \"\"\"\n\n def __init__(\n self,\n name,\n columns,\n inputs,\n io_func,\n label=None,\n produces_tasks=False,\n creation_info=None,\n annotations=None,\n ):\n self.name = name\n self.columns = columns\n self.inputs = inputs\n self.io_func = io_func\n self.label = label\n self.produces_tasks = produces_tasks\n self.annotations = annotations\n self.creation_info = creation_info\n\n # Define mapping between key index and \"part\"\n io_arg_map = BlockwiseDepDict(\n {(i,): inp for i, inp in enumerate(self.inputs)},\n produces_tasks=self.produces_tasks,\n )\n\n # Use Blockwise initializer\n dsk = {self.name: (io_func, blockwise_token(0))}\n super().__init__(\n output=self.name,\n output_indices=\"i\",\n dsk=dsk,\n indices=[(io_arg_map, \"i\")],\n numblocks={},\n annotations=annotations,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_2_execute_task.return.key_result_failed": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_None_2_execute_task.return.key_result_failed", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 202, "end_line": 230, "span_ids": ["execute_task", "start_state_from_dask"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nRunning tasks\n-------------\n\nWhen we execute tasks we both\n\n1. Perform the actual work of collecting the appropriate data and calling the function\n2. Manage administrative state to coordinate with the scheduler\n\"\"\"\n\n\ndef execute_task(key, task_info, dumps, loads, get_id, pack_exception):\n \"\"\"\n Compute task and handle all administration\n\n See Also\n --------\n _execute_task : actually execute task\n \"\"\"\n try:\n task, data = loads(task_info)\n result = _execute_task(task, data)\n id = get_id()\n result = dumps((result, id))\n failed = False\n except BaseException as e:\n result = pack_exception(e, dumps)\n failed = True\n return key, result, failed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_batch_execute_tasks_release_data.if_delete_.del_state_cache_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_batch_execute_tasks_release_data.if_delete_.del_state_cache_key_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 254, "span_ids": ["release_data", "batch_execute_tasks"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def batch_execute_tasks(it):\n \"\"\"\n Batch computing of multiple tasks with `execute_task`\n \"\"\"\n return [execute_task(*a) for a in it]\n\n\ndef release_data(key, state, delete=True):\n \"\"\"Remove data from temporary storage\n\n See Also\n --------\n finish_task\n \"\"\"\n if key in state[\"waiting_data\"]:\n assert not state[\"waiting_data\"][key]\n del state[\"waiting_data\"][key]\n\n state[\"released\"].add(key)\n\n if delete:\n del state[\"cache\"][key]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py__Synchronous_concrete__get_apply_async.return.get_async_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py__Synchronous_concrete__get_apply_async.return.get_async_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 524, "end_line": 591, "span_ids": ["impl:3", "MultiprocessingPoolExecutor", "MultiprocessingPoolExecutor.__init__", "get_sync", "get_async", "SynchronousExecutor.submit", "SynchronousExecutor", "MultiprocessingPoolExecutor.submit", "get_apply_async", "submit_apply_async"], "tokens": 423}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" Synchronous concrete version of get_async\n\nUsually we supply a ``concurrent.futures.Executor``. Here we provide a\nsequential one. This is useful for debugging and for code dominated by the\nGIL\n\"\"\"\n\n\nclass SynchronousExecutor(Executor):\n _max_workers = 1\n\n def submit(self, fn, *args, **kwargs):\n fut = Future()\n try:\n fut.set_result(fn(*args, **kwargs))\n except BaseException as e:\n fut.set_exception(e)\n return fut\n\n\nsynchronous_executor = SynchronousExecutor()\n\n\ndef get_sync(dsk, keys, **kwargs):\n \"\"\"A naive synchronous version of get_async\n\n Can be useful for debugging.\n \"\"\"\n kwargs.pop(\"num_workers\", None) # if num_workers present, remove it\n return get_async(\n synchronous_executor.submit,\n synchronous_executor._max_workers,\n dsk,\n keys,\n **kwargs,\n )\n\n\n\"\"\" Adaptor for ``multiprocessing.Pool`` instances\n\nUsually we supply a ``concurrent.futures.Executor``. Here we provide a wrapper\nclass for ``multiprocessing.Pool`` instances so we can treat them like\n``concurrent.futures.Executor`` instances instead.\n\nThis is mainly useful for legacy use cases or users that prefer\n``multiprocessing.Pool``.\n\"\"\"\n\n\nclass MultiprocessingPoolExecutor(Executor):\n def __init__(self, pool):\n self.pool = pool\n self._max_workers = len(pool._pool)\n\n def submit(self, fn, *args, **kwargs):\n return submit_apply_async(self.pool.apply_async, fn, *args, **kwargs)\n\n\ndef submit_apply_async(apply_async, fn, *args, **kwargs):\n fut = Future()\n apply_async(fn, args, kwargs, fut.set_result, fut.set_exception)\n return fut\n\n\ndef get_apply_async(apply_async, num_workers, *args, **kwargs):\n return get_async(\n partial(submit_apply_async, apply_async), num_workers, *args, **kwargs\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_fuse.if_config_get_optimizati.return.dsk_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/optimization.py_fuse_fuse.if_config_get_optimizati.return.dsk_dependencies", "embedding": null, "metadata": {"file_path": "dask/optimization.py", "file_name": "optimization.py", "file_type": "text/x-python", "category": "implementation", "start_line": 428, "end_line": 501, "span_ids": ["fuse"], "tokens": 751}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fuse(\n dsk,\n keys=None,\n dependencies=None,\n ave_width=_default,\n max_width=_default,\n max_height=_default,\n max_depth_new_edges=_default,\n rename_keys=_default,\n fuse_subgraphs=_default,\n):\n \"\"\"Fuse tasks that form reductions; more advanced than ``fuse_linear``\n\n This trades parallelism opportunities for faster scheduling by making tasks\n less granular. It can replace ``fuse_linear`` in optimization passes.\n\n This optimization applies to all reductions--tasks that have at most one\n dependent--so it may be viewed as fusing \"multiple input, single output\"\n groups of tasks into a single task. There are many parameters to fine\n tune the behavior, which are described below. ``ave_width`` is the\n natural parameter with which to compare parallelism to granularity, so\n it should always be specified. Reasonable values for other parameters\n will be determined using ``ave_width`` if necessary.\n\n Parameters\n ----------\n dsk: dict\n dask graph\n keys: list or set, optional\n Keys that must remain in the returned dask graph\n dependencies: dict, optional\n {key: [list-of-keys]}. Must be a list to provide count of each key\n This optional input often comes from ``cull``\n ave_width: float (default 1)\n Upper limit for ``width = num_nodes / height``, a good measure of\n parallelizability.\n dask.config key: ``optimization.fuse.ave-width``\n max_width: int (default infinite)\n Don't fuse if total width is greater than this.\n dask.config key: ``optimization.fuse.max-width``\n max_height: int or None (default None)\n Don't fuse more than this many levels. Set to None to dynamically\n adjust to ``1.5 + ave_width * log(ave_width + 1)``.\n dask.config key: ``optimization.fuse.max-height``\n max_depth_new_edges: int or None (default None)\n Don't fuse if new dependencies are added after this many levels.\n Set to None to dynamically adjust to ave_width * 1.5.\n dask.config key: ``optimization.fuse.max-depth-new-edges``\n rename_keys: bool or func, optional (default True)\n Whether to rename the fused keys with ``default_fused_keys_renamer``\n or not. Renaming fused keys can keep the graph more understandable\n and comprehensive, but it comes at the cost of additional processing.\n If False, then the top-most key will be used. For advanced usage, a\n function to create the new name is also accepted.\n dask.config key: ``optimization.fuse.rename-keys``\n fuse_subgraphs : bool or None, optional (default None)\n Whether to fuse multiple tasks into ``SubgraphCallable`` objects.\n Set to None to let the default optimizer of individual dask collections decide.\n If no collection-specific default exists, None defaults to False.\n dask.config key: ``optimization.fuse.subgraphs``\n\n Returns\n -------\n dsk\n output graph with keys fused\n dependencies\n dict mapping dependencies after fusion. Useful side effect to accelerate other\n downstream optimizations.\n \"\"\"\n\n # Perform low-level fusion unless the user has\n # specified False explicitly.\n if config.get(\"optimization.fuse.active\") is False:\n return dsk, dependencies\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_from_operator_import_add_test_cache.assert_not_Callback_activ": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_cache.py_from_operator_import_add_test_cache.assert_not_Callback_activ", "embedding": null, "metadata": {"file_path": "dask/tests/test_cache.py", "file_name": "test_cache.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 43, "span_ids": ["test_cache", "imports", "inc"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from operator import add\nfrom time import sleep\n\nimport pytest\n\nfrom dask.cache import Cache\nfrom dask.callbacks import Callback\nfrom dask.local import get_sync\nfrom dask.threaded import get\n\ncachey = pytest.importorskip(\"cachey\")\n\n\nflag = []\n\n\ndef inc(x):\n flag.append(x)\n return x + 1\n\n\ndef test_cache():\n c = cachey.Cache(10000)\n cc = Cache(c)\n\n with cc:\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n\n assert flag == [1]\n assert c.data[\"x\"] == 2\n\n assert not cc.starttimes\n assert not cc.durations\n\n while flag:\n flag.pop()\n dsk = {\"x\": (inc, 1), \"y\": (inc, 2), \"z\": (add, \"x\", \"y\")}\n with cc:\n assert get(dsk, \"z\") == 5\n\n assert flag == [2] # no x present\n\n assert not Callback.active", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_from_dask_callbacks_impor_test_start_state_callback.assert_flag_0_is_True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_callbacks.py_from_dask_callbacks_impor_test_start_state_callback.assert_flag_0_is_True", "embedding": null, "metadata": {"file_path": "dask/tests/test_callbacks.py", "file_name": "test_callbacks.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 32, "span_ids": ["test_start_state_callback", "imports", "test_start_callback"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dask.callbacks import Callback\nfrom dask.local import get_sync\nfrom dask.threaded import get as get_threaded\nfrom dask.utils_test import add\n\n\ndef test_start_callback():\n flag = [False]\n\n class MyCallback(Callback):\n def _start(self, dsk):\n flag[0] = True\n\n with MyCallback():\n get_sync({\"x\": 1}, \"x\")\n\n assert flag[0] is True\n\n\ndef test_start_state_callback():\n flag = [False]\n\n class MyCallback(Callback):\n def _start_state(self, dsk, state):\n flag[0] = True\n assert dsk[\"x\"] == 1\n assert len(state[\"cache\"]) == 1\n\n with MyCallback():\n get_sync({\"x\": 1}, \"x\")\n\n assert flag[0] is True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_pickle_test_istask.assert_not_istask_f_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_core.py_pickle_test_istask.assert_not_istask_f_sum_", "embedding": null, "metadata": {"file_path": "dask/tests/test_core.py", "file_name": "test_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["contains", "imports", "test_istask"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pickle\nfrom collections import namedtuple\n\nimport pytest\n\nfrom dask import core\nfrom dask.core import (\n flatten,\n get_dependencies,\n get_deps,\n has_tasks,\n istask,\n literal,\n preorder_traversal,\n quote,\n subs,\n)\nfrom dask.utils_test import GetFunctionTestMixin, add, inc\n\n\ndef contains(a, b):\n \"\"\"\n\n >>> contains({'x': 1, 'y': 2}, {'x': 1})\n True\n >>> contains({'x': 1, 'y': 2}, {'z': 3})\n False\n \"\"\"\n return all(a.get(k) == v for k, v in b.items())\n\n\ndef test_istask():\n assert istask((inc, 1))\n assert not istask(1)\n assert not istask((1, 2))\n f = namedtuple(\"f\", [\"x\", \"y\"])\n assert not istask(f(sum, 2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_datasets.py_pytest_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_datasets.py_pytest_", "embedding": null, "metadata": {"file_path": "dask/tests/test_datasets.py", "file_name": "test_datasets.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 45, "span_ids": ["test_make_dataset_with_processes", "imports", "test_mimesis", "test_no_mimesis", "test_deterministic", "test_full_dataset"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask\n\n\ndef test_mimesis():\n pytest.importorskip(\"mimesis\")\n\n b = dask.datasets.make_people()\n assert b.take(5)\n\n assert b.take(3) == b.take(3)\n\n\ndef test_full_dataset():\n pytest.importorskip(\"mimesis\")\n b = dask.datasets.make_people(npartitions=2, records_per_partition=10)\n assert b.count().compute() == 20\n\n\ndef test_make_dataset_with_processes():\n pytest.importorskip(\"mimesis\")\n b = dask.datasets.make_people(npartitions=2)\n try:\n b.compute(scheduler=\"processes\")\n except TypeError:\n pytest.fail(\"Failed to execute make_people using processes\")\n\n\ndef test_no_mimesis():\n try:\n import mimesis # noqa: F401\n except ImportError:\n with pytest.raises(Exception) as info:\n dask.datasets.make_people()\n\n assert \"python -m pip install mimesis\" in str(info.value)\n\n\ndef test_deterministic():\n pytest.importorskip(\"mimesis\")\n\n b = dask.datasets.make_people(seed=123)\n assert b.take(1)[0][\"name\"] == (\"Leandro\", \"Orr\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_pickle_Tuple.__dask_postcompute__.return.tuple_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_pickle_Tuple.__dask_postcompute__.return.tuple_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["Tuple.__init__", "imports", "Tuple.__dask_keys__", "Tuple.__dask_tokenize__", "Tuple", "Tuple.__dask_postcompute__", "Tuple.__dask_graph__"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pickle\nimport types\nfrom collections import namedtuple\nfrom functools import partial\nfrom operator import add, matmul, setitem\nfrom random import random\n\nimport cloudpickle\nimport pytest\nfrom tlz import merge\n\nimport dask\nimport dask.bag as db\nfrom dask import compute\nfrom dask.delayed import Delayed, delayed, to_task_dask\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.utils_test import inc\n\n\nclass Tuple:\n __dask_scheduler__ = staticmethod(dask.threaded.get)\n\n def __init__(self, dsk, keys):\n self._dask = dsk\n self._keys = keys\n\n def __dask_tokenize__(self):\n return self._keys\n\n def __dask_graph__(self):\n return self._dask\n\n def __dask_keys__(self):\n return self._keys\n\n def __dask_postcompute__(self):\n return tuple, ()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_nout_with_tasks_test_kwargs.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_nout_with_tasks_test_kwargs.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 398, "end_line": 423, "span_ids": ["test_nout_with_tasks", "test_kwargs"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"x\",\n [[1, 2], (1, 2), (add, 1, 2), [], ()],\n)\ndef test_nout_with_tasks(x):\n length = len(x)\n d = delayed(x, nout=length)\n assert len(d) == len(list(d)) == length\n assert d.compute() == x\n\n\ndef test_kwargs():\n def mysum(a, b, c=(), **kwargs):\n return a + b + sum(c) + sum(kwargs.values())\n\n dmysum = delayed(mysum)\n ten = dmysum(1, 2, c=[delayed(3), 0], four=dmysum(2, 2))\n assert ten.compute() == 10\n dmysum = delayed(mysum, pure=True)\n c = [delayed(3), 0]\n ten = dmysum(1, 2, c=c, four=dmysum(2, 2))\n assert ten.compute() == 10\n assert dmysum(1, 2, c=c, four=dmysum(2, 2)).key == ten.key\n assert dmysum(1, 2, c=c, four=dmysum(2, 3)).key != ten.key\n assert dmysum(1, 2, c=c, four=4).key != ten.key\n assert dmysum(1, 2, c=c, four=4).key != dmysum(2, 2, c=c, four=4).key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_pytest_test_persist.assert_y2_key_in_a_data_o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_pytest_test_persist.assert_y2_key_in_a_data_o", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 48, "span_ids": ["imports", "test_can_import_nested_things", "test_persist", "test_can_import_client"], "tokens": 336}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\ndistributed = pytest.importorskip(\"distributed\")\n\nimport asyncio\nimport os\nfrom functools import partial\nfrom operator import add\n\nfrom distributed.utils_test import client as c # noqa F401\nfrom distributed.utils_test import cluster_fixture # noqa F401\nfrom distributed.utils_test import loop # noqa F401\nfrom distributed.utils_test import cluster, gen_cluster, inc, varying\n\nimport dask\nimport dask.bag as db\nfrom dask import compute, delayed, persist\nfrom dask.delayed import Delayed\nfrom dask.distributed import futures_of, wait\nfrom dask.highlevelgraph import HighLevelGraph, MaterializedLayer\nfrom dask.utils import get_named_args, tmpdir, tmpfile\n\nif \"should_check_state\" in get_named_args(gen_cluster):\n gen_cluster = partial(gen_cluster, should_check_state=False)\n cluster = partial(cluster, should_check_state=False)\n\n\ndef test_can_import_client():\n from dask.distributed import Client # noqa: F401\n\n\ndef test_can_import_nested_things():\n from dask.distributed.protocol import dumps # noqa: F401\n\n\n@gen_cluster(client=True)\nasync def test_persist(c, s, a, b):\n x = delayed(inc)(1)\n (x2,) = persist(x)\n\n await wait(x2)\n assert x2.key in a.data or x2.key in b.data\n\n y = delayed(inc)(10)\n y2, one = persist(y, 1)\n\n await wait(y2)\n assert y2.key in a.data or y2.key in b.data", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_array_creation_test_blockwise_array_creation.with_dask_config_set_op.da_assert_eq_darr_narr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_array_creation_test_blockwise_array_creation.with_dask_config_set_op.da_assert_eq_darr_narr_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 296, "end_line": 329, "span_ids": ["test_blockwise_array_creation"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"io\",\n [\n \"ones\",\n \"zeros\",\n \"full\",\n ],\n)\n@pytest.mark.parametrize(\"fuse\", [True, False, None])\ndef test_blockwise_array_creation(c, io, fuse):\n np = pytest.importorskip(\"numpy\")\n da = pytest.importorskip(\"dask.array\")\n\n chunks = (5, 2)\n shape = (10, 4)\n\n if io == \"ones\":\n darr = da.ones(shape, chunks=chunks)\n narr = np.ones(shape)\n elif io == \"zeros\":\n darr = da.zeros(shape, chunks=chunks)\n narr = np.zeros(shape)\n elif io == \"full\":\n darr = da.full(shape, 10, chunks=chunks)\n narr = np.full(shape, 10)\n\n darr += 2\n narr += 2\n with dask.config.set({\"optimization.fuse.active\": fuse}):\n darr.compute()\n dsk = dask.array.optimize(darr.dask, darr.__dask_keys__())\n # dsk should be a dict unless fuse is explicitly False\n assert isinstance(dsk, dict) == (fuse is not False)\n da.assert_eq(darr, narr, scheduler=c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_dataframe_io_test_blockwise_dataframe_io.with_dask_config_set_op.dd_assert_eq_ddf_df_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_dataframe_io_test_blockwise_dataframe_io.with_dask_config_set_op.dd_assert_eq_ddf_df_che", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 332, "end_line": 381, "span_ids": ["test_blockwise_dataframe_io"], "tokens": 524}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\n \"ignore:Running on a single-machine scheduler when a distributed client \"\n \"is active might lead to unexpected results.\"\n)\n@pytest.mark.parametrize(\n \"io\",\n [\"parquet-pyarrow\", \"parquet-fastparquet\", \"csv\", \"hdf\"],\n)\n@pytest.mark.parametrize(\"fuse\", [True, False, None])\n@pytest.mark.parametrize(\"from_futures\", [True, False])\ndef test_blockwise_dataframe_io(c, tmpdir, io, fuse, from_futures):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n df = pd.DataFrame({\"x\": [1, 2, 3] * 5, \"y\": range(15)})\n\n if from_futures:\n parts = [df.iloc[:5], df.iloc[5:10], df.iloc[10:15]]\n futs = c.scatter(parts)\n ddf0 = dd.from_delayed(futs, meta=parts[0])\n else:\n ddf0 = dd.from_pandas(df, npartitions=3)\n\n if io.startswith(\"parquet\"):\n if io == \"parquet-pyarrow\":\n pytest.importorskip(\"pyarrow.parquet\")\n engine = \"pyarrow\"\n else:\n pytest.importorskip(\"fastparquet\")\n engine = \"fastparquet\"\n ddf0.to_parquet(str(tmpdir), engine=engine)\n ddf = dd.read_parquet(str(tmpdir), engine=engine)\n elif io == \"csv\":\n ddf0.to_csv(str(tmpdir), index=False)\n ddf = dd.read_csv(os.path.join(str(tmpdir), \"*\"))\n elif io == \"hdf\":\n pytest.importorskip(\"tables\")\n fn = str(tmpdir.join(\"h5\"))\n ddf0.to_hdf(fn, \"/data*\")\n ddf = dd.read_hdf(fn, \"/data*\")\n\n df = df[[\"x\"]] + 10\n ddf = ddf[[\"x\"]] + 10\n with dask.config.set({\"optimization.fuse.active\": fuse}):\n ddf.compute()\n dsk = dask.dataframe.optimize(ddf.dask, ddf.__dask_keys__())\n # dsk should not be a dict unless fuse is explicitly True\n assert isinstance(dsk, dict) == bool(fuse)\n\n dd.assert_eq(ddf, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_numpy_args_test_blockwise_numpy_args.assert_res_1000": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_numpy_args_test_blockwise_numpy_args.assert_res_1000", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 356, "end_line": 370, "span_ids": ["test_blockwise_numpy_args"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_blockwise_numpy_args(c, s, a, b):\n \"\"\"Test pack/unpack of blockwise that includes a NumPy literal argument\"\"\"\n da = pytest.importorskip(\"dask.array\")\n np = pytest.importorskip(\"numpy\")\n\n def fn(x, dt):\n assert type(dt) is np.uint16\n return x.astype(dt)\n\n arr = da.blockwise(\n fn, \"x\", da.ones(1000), \"x\", np.uint16(42), None, dtype=np.uint16\n )\n res = await c.compute(arr.sum(), optimize_graph=False)\n assert res == 1000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_numpy_kwargs_test_blockwise_numpy_kwargs.assert_res_1000": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_numpy_kwargs_test_blockwise_numpy_kwargs.assert_res_1000", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 373, "end_line": 385, "span_ids": ["test_blockwise_numpy_kwargs"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_blockwise_numpy_kwargs(c, s, a, b):\n \"\"\"Test pack/unpack of blockwise that includes a NumPy literal keyword argument\"\"\"\n da = pytest.importorskip(\"dask.array\")\n np = pytest.importorskip(\"numpy\")\n\n def fn(x, dt=None):\n assert type(dt) is np.uint16\n return x.astype(dt)\n\n arr = da.blockwise(fn, \"x\", da.ones(1000), \"x\", dtype=np.uint16, dt=np.uint16(42))\n res = await c.compute(arr.sum(), optimize_graph=False)\n assert res == 1000", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_map_partitions_partition_info_test_map_partitions_partition_info.assert_res_1_number": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_map_partitions_partition_info_test_map_partitions_partition_info.assert_res_1_number", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 452, "end_line": 462, "span_ids": ["test_map_partitions_partition_info"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_map_partitions_partition_info(c, s, a, b):\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n\n ddf = dd.from_pandas(pd.DataFrame({\"a\": range(10)}), npartitions=2)\n res = await c.compute(\n ddf.map_partitions(lambda x, partition_info=None: partition_info)\n )\n assert res[0] == {\"number\": 0, \"division\": 0}\n assert res[1] == {\"number\": 1, \"division\": 5}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_in_subgraphs_test_futures_in_subgraphs.ddf_4.await_c_submit_dd_categor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_futures_in_subgraphs_test_futures_in_subgraphs.ddf_4.await_c_submit_dd_categor", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 465, "end_line": 486, "span_ids": ["test_futures_in_subgraphs"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_futures_in_subgraphs(c, s, a, b):\n \"\"\"Copied from distributed (tests/test_client.py)\"\"\"\n\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n\n ddf = dd.from_pandas(\n pd.DataFrame(\n dict(\n uid=range(50),\n enter_time=pd.date_range(\n start=\"2020-01-01\", end=\"2020-09-01\", periods=50, tz=\"UTC\"\n ),\n )\n ),\n npartitions=1,\n )\n\n ddf = ddf[ddf.uid.isin(range(29))].persist()\n ddf[\"day\"] = ddf.enter_time.dt.day_name()\n ddf = await c.submit(dd.categorical.categorize, ddf, columns=[\"day\"], index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_copy_test_task_label.assert_task_label_add_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_copy_test_task_label.assert_task_label_add_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 63, "span_ids": ["test_task_label", "impl:21", "imports", "get_label", "get_shape"], "tokens": 378}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import copy\nimport os\nimport re\nimport sys\nfrom functools import partial\nfrom operator import add, neg\n\nimport pytest\n\nif sys.flags.optimize != 2:\n pytest.importorskip(\"graphviz\")\n from dask.dot import dot_graph, label, task_label, to_graphviz\nelse:\n pytestmark = pytest.mark.skipif(\n True, reason=\"graphviz exception with Python -OO flag\"\n )\n\nfrom dask import delayed\nfrom dask.utils import ensure_not_exists\n\ntry:\n from IPython.display import SVG, Image\nexcept ImportError:\n ipython_not_installed = True\n Image = None\n SVG = None\nelse:\n ipython_not_installed = False\nipython_not_installed_mark = pytest.mark.skipif(\n ipython_not_installed, reason=\"IPython not installed\"\n)\n\n\n# Since graphviz doesn't store a graph, we need to parse the output\nlabel_re = re.compile(r\".*\\[label=(.*?) shape=(.*?)\\]\")\n\n\ndef get_label(line):\n m = label_re.match(line)\n if m:\n return m.group(1)\n\n\ndef get_shape(line):\n m = label_re.match(line)\n if m:\n return m.group(2)\n\n\ndsk = {\n \"a\": 1,\n \"b\": 2,\n \"c\": (neg, \"a\"),\n \"d\": (neg, \"b\"),\n \"e\": (add, \"c\", \"d\"),\n \"f\": (sum, [\"a\", \"e\"]),\n}\n\n\ndef test_task_label():\n assert task_label((partial(add, 1), 1)) == \"add\"\n assert task_label((add, 1)) == \"add\"\n assert task_label((add, (add, 1, 2))) == \"add(...)\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_assert_no_common_keys_assert_no_common_keys.if_omit_is_not_None_.else_.if_layers_.assert_not_dsk1_dependenc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_assert_no_common_keys_assert_no_common_keys.if_omit_is_not_None_.else_.if_layers_.assert_not_dsk1_dependenc", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 32, "end_line": 56, "span_ids": ["assert_no_common_keys"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_no_common_keys(a, b, omit=None, *, layers: bool) -> None:\n dsk1 = a.__dask_graph__()\n dsk2 = b.__dask_graph__()\n\n if omit is not None:\n dsko = omit.__dask_graph__()\n assert not (dsk1.keys() - dsko.keys()) & dsk2.keys()\n assert not dsko.keys() - dsk1.keys()\n assert not dsko.keys() - dsk2.keys()\n if layers:\n assert not (dsk1.layers.keys() - dsko.layers.keys()) & dsk2.layers.keys()\n assert (\n not (dsk1.dependencies.keys() - dsko.dependencies.keys())\n & dsk2.dependencies.keys()\n )\n assert not dsko.layers.keys() - dsk1.layers.keys()\n assert not dsko.layers.keys() - dsk2.layers.keys()\n assert not dsko.dependencies.keys() - dsk1.dependencies.keys()\n assert not dsko.dependencies.keys() - dsk2.dependencies.keys()\n\n else:\n assert not dsk1.keys() & dsk2.keys()\n if layers:\n assert not dsk1.layers.keys() & dsk2.layers.keys()\n assert not dsk1.dependencies.keys() & dsk2.dependencies.keys()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_assert_did_not_materialize_h2.object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_assert_did_not_materialize_h2.object_", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 59, "end_line": 77, "span_ids": ["assert_did_not_materialize", "impl:7"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_did_not_materialize(cloned, orig):\n \"\"\"Test that all layers of the original collection exist in the cloned collection\n too and that Blockwise layers have not been materialized\n \"\"\"\n olayers = orig.__dask_graph__().layers\n clayers = cloned.__dask_graph__().layers\n for k, v in olayers.items():\n try:\n cv = clayers[k]\n except KeyError:\n cv = clayers[clone_key(k, 0)]\n if isinstance(v, Blockwise):\n assert not v.is_materialized()\n assert not cv.is_materialized()\n\n\n# Generic hashables\nh1 = object()\nh2 = object()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_bind_test_bind.assert_cnt_n_9": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_bind_test_bind.assert_cnt_n_9", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 225, "end_line": 274, "span_ids": ["test_bind"], "tokens": 762}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"layers\", [False, True])\ndef test_bind(layers):\n dsk1 = {(\"a-1\", h1): 1, (\"a-1\", h2): 2}\n dsk2 = {\"b-1\": (add, (\"a-1\", h1), (\"a-1\", h2))}\n dsk3 = {\"c-1\": \"b-1\"}\n cnt = NodeCounter()\n dsk4 = {(\"d-1\", h1): (cnt.f, 1), (\"d-1\", h2): (cnt.f, 2)}\n dsk4b = {\"e\": (cnt.f, 3)}\n\n if layers:\n dsk1 = HighLevelGraph({\"a-1\": dsk1}, {\"a-1\": set()})\n dsk2 = HighLevelGraph(\n {\"a-1\": dsk1, \"b-1\": dsk2}, {\"a-1\": set(), \"b-1\": {\"a-1\"}}\n )\n dsk3 = HighLevelGraph(\n {\"a-1\": dsk1, \"b-1\": dsk2, \"c-1\": dsk3},\n {\"a-1\": set(), \"b-1\": {\"a-1\"}, \"c-1\": {\"b-1\"}},\n )\n dsk4 = HighLevelGraph({\"d-1\": dsk4, \"e\": dsk4b}, {\"d-1\": set(), \"e\": set()})\n else:\n dsk2.update(dsk1)\n dsk3.update(dsk2)\n dsk4.update(dsk4b)\n\n # t1 = Tuple(dsk1, [(\"a\", h1), (\"a\", h2)])\n t2 = Tuple(dsk2, [\"b-1\"])\n t3 = Tuple(dsk3, [\"c-1\"])\n t4 = Tuple(dsk4, [(\"d-1\", h1), (\"d-1\", h2), \"e\"]) # Multiple names\n\n bound1 = bind(t3, t4, seed=1, assume_layers=layers)\n cloned_a_name = clone_key(\"a-1\", seed=1)\n assert bound1.__dask_graph__()[cloned_a_name, h1][0] is chunks.bind\n assert bound1.__dask_graph__()[cloned_a_name, h2][0] is chunks.bind\n assert bound1.compute() == (3,)\n assert cnt.n == 3\n\n bound2 = bind(t3, t4, omit=t2, seed=1, assume_layers=layers)\n cloned_c_name = clone_key(\"c-1\", seed=1)\n assert bound2.__dask_graph__()[cloned_c_name][0] is chunks.bind\n assert bound2.compute() == (3,)\n assert cnt.n == 6\n\n bound3 = bind(t4, t3, seed=1, assume_layers=layers)\n cloned_d_name = clone_key(\"d-1\", seed=1)\n cloned_e_name = clone_key(\"e\", seed=1)\n assert bound3.__dask_graph__()[cloned_d_name, h1][0] is chunks.bind\n assert bound3.__dask_graph__()[cloned_d_name, h2][0] is chunks.bind\n assert bound3.__dask_graph__()[cloned_e_name][0] is chunks.bind\n assert bound3.compute() == (1, 2, 3)\n assert cnt.n == 9", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_bind_clone_collections_test_bind_clone_collections.assert_cnt_n_8_or_func": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_bind_clone_collections_test_bind_clone_collections.assert_cnt_n_8_or_func", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 277, "end_line": 347, "span_ids": ["test_bind_clone_collections"], "tokens": 821}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da or not dd\")\n@pytest.mark.parametrize(\"func\", [bind, clone])\ndef test_bind_clone_collections(func):\n @delayed\n def double(x):\n return x * 2\n\n # dask.delayed\n d1 = double(2)\n d2 = double(d1)\n # dask.array\n a1 = da.ones((10, 10), chunks=5)\n a2 = a1 + 1\n a3 = a2.T\n # dask.bag\n b1 = db.from_sequence([1, 2], npartitions=2)\n # b1's tasks are not callable, so we need an extra step to properly test bind\n b2 = b1.map(lambda x: x * 2)\n b3 = b2.map(lambda x: x + 1)\n b4 = b3.min()\n # dask.dataframe\n df = pd.DataFrame({\"x\": list(range(10))})\n ddf1 = dd.from_pandas(df, npartitions=2)\n # ddf1's tasks are not callable, so we need an extra step to properly test bind\n ddf2 = ddf1.map_partitions(lambda x: x * 2)\n ddf3 = ddf2.map_partitions(lambda x: x + 1)\n ddf4 = ddf3[\"x\"] # dd.Series\n ddf5 = ddf4.min() # dd.Scalar\n\n cnt = NodeCounter()\n if func is bind:\n parent = da.ones((10, 10), chunks=5).map_blocks(cnt.f)\n cnt.n = 0\n d2c, a3c, b3c, b4c, ddf3c, ddf4c, ddf5c = bind(\n children=(d2, a3, b3, b4, ddf3, ddf4, ddf5),\n parents=parent,\n omit=(d1, a1, b2, ddf2),\n seed=0,\n )\n else:\n d2c, a3c, b3c, b4c, ddf3c, ddf4c, ddf5c = clone(\n d2,\n a3,\n b3,\n b4,\n ddf3,\n ddf4,\n ddf5,\n omit=(d1, a1, b2, ddf2),\n seed=0,\n )\n\n assert_did_not_materialize(d2c, d2)\n assert_did_not_materialize(a3c, a3)\n assert_did_not_materialize(b3c, b3)\n assert_did_not_materialize(b4c, b4)\n assert_did_not_materialize(ddf3c, ddf3)\n assert_did_not_materialize(ddf4c, ddf4)\n assert_did_not_materialize(ddf5c, ddf5)\n\n assert_no_common_keys(d2c, d2, omit=d1, layers=True)\n assert_no_common_keys(a3c, a3, omit=a1, layers=True)\n assert_no_common_keys(b3c, b3, omit=b2, layers=True)\n assert_no_common_keys(ddf3c, ddf3, omit=ddf2, layers=True)\n assert_no_common_keys(ddf4c, ddf4, omit=ddf2, layers=True)\n assert_no_common_keys(ddf5c, ddf5, omit=ddf2, layers=True)\n\n assert d2.compute() == d2c.compute()\n assert cnt.n == 4 or func is clone\n da.utils.assert_eq(a3c, a3)\n assert cnt.n == 8 or func is clone\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_bind_clone_collections.db_utils_assert_eq_b3c_b_test_bind_clone_collections.assert_cnt_n_36_or_fun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_bind_clone_collections.db_utils_assert_eq_b3c_b_test_bind_clone_collections.assert_cnt_n_36_or_fun", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 348, "end_line": 357, "span_ids": ["test_bind_clone_collections"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da or not dd\")\n@pytest.mark.parametrize(\"func\", [bind, clone])\ndef test_bind_clone_collections(func):\n # ... other code\n db.utils.assert_eq(b3c, b3)\n assert cnt.n == 12 or func is clone\n db.utils.assert_eq(b4c, b4)\n assert cnt.n == 16 or func is clone\n dd.utils.assert_eq(ddf3c, ddf3)\n assert cnt.n == 24 or func is clone # dd.utils.assert_eq calls compute() twice\n dd.utils.assert_eq(ddf4c, ddf4)\n assert cnt.n == 32 or func is clone # dd.utils.assert_eq calls compute() twice\n dd.utils.assert_eq(ddf5c, ddf5)\n assert cnt.n == 36 or func is clone", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_split_every_test_split_every.assert_t4_compute_schedul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_split_every_test_split_every.assert_t4_compute_schedul", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 360, "end_line": 387, "span_ids": ["test_split_every"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"split_every,nkeys\",\n [\n (2, 299),\n (3, 250),\n (8, 215),\n (None, 215), # default is 8\n (8.1, 215),\n (1e9, 201),\n (False, 201),\n ],\n)\ndef test_split_every(split_every, nkeys):\n dsk = {(\"a\", i): i for i in range(100)}\n t1 = Tuple(dsk, list(dsk))\n c = checkpoint(t1, split_every=split_every)\n assert len(c.__dask_graph__()) == nkeys\n assert c.compute(scheduler=\"sync\") is None\n\n t2 = wait_on(t1, split_every=split_every)\n assert len(t2.__dask_graph__()) == nkeys + 100\n assert t2.compute(scheduler=\"sync\") == tuple(range(100))\n\n dsk3 = {\"b\": 1, \"c\": 2}\n t3 = Tuple(dsk3, list(dsk3))\n t4 = bind(t3, t1, split_every=split_every, assume_layers=False)\n assert len(t4.__dask_graph__()) == nkeys + 2\n assert t4.compute(scheduler=\"sync\") == (1, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_split_every_invalid_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_graph_manipulation.py_test_split_every_invalid_", "embedding": null, "metadata": {"file_path": "dask/tests/test_graph_manipulation.py", "file_name": "test_graph_manipulation.py", "file_type": "text/x-python", "category": "test", "start_line": 390, "end_line": 402, "span_ids": ["test_split_every_invalid"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_split_every_invalid():\n t = Tuple({\"a\": 1, \"b\": 2}, [\"a\", \"b\"])\n with pytest.raises(ValueError):\n checkpoint(t, split_every=1)\n with pytest.raises(ValueError):\n checkpoint(t, split_every=1.9)\n with pytest.raises(ValueError):\n checkpoint(t, split_every=0) # Not to be confused with False or None\n with pytest.raises(ValueError):\n checkpoint(t, split_every=-2)\n with pytest.raises(TypeError):\n checkpoint(t, split_every={0: 2}) # This is legal for dask.array but not here", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_blockwise_cull_test_blockwise_cull.for_name_layer_in_dsk_cu.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_blockwise_cull_test_blockwise_cull.for_name_layer_in_dsk_cu.None_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 181, "end_line": 211, "span_ids": ["test_blockwise_cull"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"flat\", [True, False])\ndef test_blockwise_cull(flat):\n da = pytest.importorskip(\"dask.array\")\n np = pytest.importorskip(\"numpy\")\n if flat:\n # Simple \"flat\" mapping between input and\n # outut indices\n x = da.from_array(np.arange(40).reshape((4, 10)), (2, 4)) + 100\n else:\n # Complex mapping between input and output\n # indices (outer product and transpose)\n x = da.from_array(np.arange(10).reshape((10,)), (4,))\n y = da.from_array(np.arange(10).reshape((10,)), (4,))\n x = da.outer(x, y).transpose()\n\n # Check that blockwise culling results in correct\n # output keys and that full graph is not materialized\n dsk = x.__dask_graph__()\n select = (1, 1) # Select a single chunk\n keys = {(x._name, *select)}\n dsk_cull = dsk.cull(keys)\n for name, layer in dsk_cull.layers.items():\n if not isinstance(layer, dask.blockwise.Blockwise):\n # The original layer shouldn't be Blockwise if the new one isn't\n assert not isinstance(dsk.layers[name], dask.blockwise.Blockwise)\n continue\n assert isinstance(dsk.layers[name], dask.blockwise.Blockwise)\n assert not layer.is_materialized()\n out_keys = layer.get_output_keys()\n assert out_keys == {(layer.output, *select)}\n assert not layer.is_materialized()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__pq_pyarrow__pq_pyarrow.if_pa_ds_.else_.return.ddf1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__pq_pyarrow__pq_pyarrow.if_pa_ds_.else_.return.ddf1", "embedding": null, "metadata": {"file_path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "file_type": "text/x-python", "category": "test", "start_line": 133, "end_line": 161, "span_ids": ["_pq_pyarrow"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _pq_pyarrow(tmpdir):\n pytest.importorskip(\"pyarrow.parquet\")\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n try:\n import pyarrow.dataset as pa_ds\n except ImportError:\n # PyArrow version too old for Dataset API\n pa_ds = None\n\n dd.from_pandas(pd.DataFrame({\"a\": range(10)}), npartitions=2,).to_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n )\n filters = [((\"a\", \"<=\", 2))]\n\n ddf1 = dd.read_parquet(str(tmpdir), engine=\"pyarrow\", filters=filters)\n if pa_ds:\n # Need to test that layer serialization succeeds\n # with \"pyarrow-dataset\" filtering\n ddf2 = dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-dataset\",\n filters=filters,\n )\n return (ddf1, ddf2)\n else:\n return ddf1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__pq_fastparquet__read_csv.return.dd_read_csv_os_path_join_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__pq_fastparquet__read_csv.return.dd_read_csv_os_path_join_", "embedding": null, "metadata": {"file_path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "file_type": "text/x-python", "category": "test", "start_line": 106, "end_line": 126, "span_ids": ["_pq_fastparquet", "_read_csv"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _pq_fastparquet(tmpdir):\n pytest.importorskip(\"fastparquet\")\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n dd.from_pandas(pd.DataFrame({\"a\": range(10)}), npartitions=2,).to_parquet(\n str(tmpdir),\n engine=\"fastparquet\",\n )\n return dd.read_parquet(str(tmpdir), engine=\"fastparquet\")\n\n\ndef _read_csv(tmpdir):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n dd.from_pandas(\n pd.DataFrame({\"a\": range(10)}),\n npartitions=2,\n ).to_csv(str(tmpdir))\n return dd.read_csv(os.path.join(str(tmpdir), \"*\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_scheduler_highlevel_graph_unpack_import_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_scheduler_highlevel_graph_unpack_import_", "embedding": null, "metadata": {"file_path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "file_type": "text/x-python", "category": "test", "start_line": 187, "end_line": 229, "span_ids": ["test_scheduler_highlevel_graph_unpack_import"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"#8480\")\n@pytest.mark.parametrize(\n \"op,lib\",\n [\n (_dataframe_shuffle, \"pandas.\"),\n (_dataframe_tree_reduction, \"pandas.\"),\n (_dataframe_broadcast_join, \"pandas.\"),\n (_pq_pyarrow, \"pandas.\"),\n (_pq_fastparquet, \"pandas.\"),\n (_read_csv, \"pandas.\"),\n (_array_creation, \"numpy.\"),\n (_array_map_overlap, \"numpy.\"),\n ],\n)\n@pytest.mark.parametrize(\"optimize_graph\", [True, False])\ndef test_scheduler_highlevel_graph_unpack_import(op, lib, optimize_graph, loop, tmpdir):\n # Test that array/dataframe-specific modules are not imported\n # on the scheduler when an HLG layers are unpacked/materialized.\n\n with cluster(scheduler_kwargs={\"plugins\": [SchedulerImportCheck(lib)]}) as (\n scheduler,\n workers,\n ):\n with Client(scheduler[\"address\"], loop=loop) as c:\n # Perform a computation using a HighLevelGraph Layer\n c.compute(op(tmpdir), optimize_graph=optimize_graph)\n\n # Get the new modules which were imported on the scheduler during the computation\n end_modules = c.run_on_scheduler(lambda: set(sys.modules))\n start_modules = c.run_on_scheduler(\n lambda dask_scheduler: dask_scheduler.plugins[\n SchedulerImportCheck.name\n ].start_modules\n )\n new_modules = end_modules - start_modules\n\n # Check that the scheduler didn't start with `lib`\n # (otherwise we arent testing anything)\n assert not any(module.startswith(lib) for module in start_modules)\n\n # Check whether we imported `lib` on the scheduler\n assert not any(module.startswith(lib) for module in new_modules)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_multiprocessing_test_pickle_locals.assert_b_unrelated_functi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_multiprocessing.py_multiprocessing_test_pickle_locals.assert_b_unrelated_functi", "embedding": null, "metadata": {"file_path": "dask/tests/test_multiprocessing.py", "file_name": "test_multiprocessing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 46, "span_ids": ["unrelated_function_global", "test_pickle_globals", "imports", "my_small_function_global", "test_pickle_locals"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import multiprocessing\nimport pickle\nimport sys\nfrom concurrent.futures import ProcessPoolExecutor\nfrom operator import add\n\nimport pytest\n\nimport dask\nfrom dask import compute, delayed\nfrom dask.multiprocessing import _dumps, _loads, get, get_context, remote_exception\nfrom dask.system import CPU_COUNT\nfrom dask.utils_test import inc\n\n\ndef unrelated_function_global(a):\n np = pytest.importorskip(\"numpy\")\n return np.array([a])\n\n\ndef my_small_function_global(a, b):\n return a + b\n\n\ndef test_pickle_globals():\n \"\"\"Unrelated globals should not be included in serialized bytes\"\"\"\n b = _dumps(my_small_function_global)\n assert b\"my_small_function_global\" in b\n assert b\"unrelated_function_global\" not in b\n assert b\"numpy\" not in b\n\n\ndef test_pickle_locals():\n \"\"\"Unrelated locals should not be included in serialized bytes\"\"\"\n np = pytest.importorskip(\"numpy\")\n\n def unrelated_function_local(a):\n return np.array([a])\n\n def my_small_function_local(a, b):\n return a + b\n\n b = _dumps(my_small_function_local)\n assert b\"my_small_function_global\" not in b\n assert b\"my_small_function_local\" in b\n assert b\"unrelated_function_local\" not in b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_os_test_reuse_pool.with_pool_typ_CPU_COUNT_.with_dask_config_set_pool.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_os_test_reuse_pool.with_pool_typ_CPU_COUNT_.with_dask_config_set_pool.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_threaded.py", "file_name": "test_threaded.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 63, "span_ids": ["test_reuse_pool", "imports", "test_get", "test_exceptions_rise_to_top", "bad", "test_get_without_computation", "test_broken_callback", "test_nested_get"], "tokens": 463}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport signal\nimport threading\nfrom concurrent.futures import ThreadPoolExecutor\nfrom multiprocessing.pool import ThreadPool\nfrom time import sleep, time\n\nimport pytest\n\nimport dask\nfrom dask.system import CPU_COUNT\nfrom dask.threaded import get\nfrom dask.utils_test import add, inc\n\n\ndef test_get():\n dsk = {\"x\": 1, \"y\": 2, \"z\": (inc, \"x\"), \"w\": (add, \"z\", \"y\")}\n assert get(dsk, \"w\") == 4\n assert get(dsk, [\"w\", \"z\"]) == (4, 2)\n\n\ndef test_nested_get():\n dsk = {\"x\": 1, \"y\": 2, \"a\": (add, \"x\", \"y\"), \"b\": (sum, [\"x\", \"y\"])}\n assert get(dsk, [\"a\", \"b\"]) == (3, 3)\n\n\ndef test_get_without_computation():\n dsk = {\"x\": 1}\n assert get(dsk, \"x\") == 1\n\n\ndef test_broken_callback():\n from dask.callbacks import Callback\n\n def _f_ok(*args, **kwargs):\n pass\n\n def _f_broken(*args, **kwargs):\n raise ValueError(\"my_exception\")\n\n dsk = {\"x\": 1}\n\n with Callback(start=_f_broken, finish=_f_ok):\n with Callback(start=_f_ok, finish=_f_ok):\n with pytest.raises(ValueError, match=\"my_exception\"):\n get(dsk, \"x\")\n\n\ndef bad(x):\n raise ValueError()\n\n\ndef test_exceptions_rise_to_top():\n dsk = {\"x\": 1, \"y\": (bad, \"x\")}\n pytest.raises(ValueError, lambda: get(dsk, \"y\"))\n\n\n@pytest.mark.parametrize(\"pool_typ\", [ThreadPool, ThreadPoolExecutor])\ndef test_reuse_pool(pool_typ):\n with pool_typ(CPU_COUNT) as pool:\n with dask.config.set(pool=pool):\n assert get({\"x\": (inc, 1)}, \"x\") == 2\n assert get({\"x\": (inc, 1)}, \"x\") == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_pool_kwarg_test_pool_kwarg.with_pool_typ_3_as_pool_.assert_get_dsk_x_pool": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_threaded.py_test_pool_kwarg_test_pool_kwarg.with_pool_typ_3_as_pool_.assert_get_dsk_x_pool", "embedding": null, "metadata": {"file_path": "dask/tests/test_threaded.py", "file_name": "test_threaded.py", "file_type": "text/x-python", "category": "test", "start_line": 66, "end_line": 76, "span_ids": ["test_pool_kwarg"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"pool_typ\", [ThreadPool, ThreadPoolExecutor])\ndef test_pool_kwarg(pool_typ):\n def f():\n sleep(0.01)\n return threading.get_ident()\n\n dsk = {(\"x\", i): (f,) for i in range(30)}\n dsk[\"x\"] = (len, (set, [(\"x\", i) for i in range(len(dsk))]))\n\n with pool_typ(3) as pool:\n assert get(dsk, \"x\", pool=pool) == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_datetime_from_dask_utils_test_impo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_datetime_from_dask_utils_test_impo", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 42, "span_ids": ["imports"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import datetime\nimport functools\nimport operator\nimport pickle\n\nimport pytest\nfrom tlz import curry\n\nfrom dask import get\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.optimization import SubgraphCallable\nfrom dask.utils import (\n Dispatch,\n M,\n SerializableLock,\n _deprecated,\n asciitable,\n cached_cumsum,\n derived_from,\n ensure_dict,\n extra_titles,\n format_bytes,\n funcname,\n getargspec,\n has_keyword,\n is_arraylike,\n itemgetter,\n iter_chunks,\n memory_repr,\n methodcaller,\n ndeepmap,\n parse_bytes,\n parse_timedelta,\n partial_by_order,\n random_state_data,\n skip_doctest,\n stringify,\n stringify_collection_keys,\n takes_multiple_arguments,\n typename,\n)\nfrom dask.utils_test import inc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_stringify_test_stringify.assert_dsk_y_1_1_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_stringify_test_stringify.assert_dsk_y_1_1_0", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 594, "end_line": 629, "span_ids": ["test_stringify"], "tokens": 483}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stringify():\n obj = \"Hello\"\n assert stringify(obj) is obj\n obj = b\"Hello\"\n assert stringify(obj) is obj\n dsk = {\"x\": 1}\n\n assert stringify(dsk) == str(dsk)\n assert stringify(dsk, exclusive=()) == dsk\n\n dsk = {(\"x\", 1): (inc, 1)}\n assert stringify(dsk) == str({(\"x\", 1): (inc, 1)})\n assert stringify(dsk, exclusive=()) == {(\"x\", 1): (inc, 1)}\n\n dsk = {(\"x\", 1): (inc, 1), (\"x\", 2): (inc, (\"x\", 1))}\n assert stringify(dsk, exclusive=dsk) == {\n (\"x\", 1): (inc, 1),\n (\"x\", 2): (inc, str((\"x\", 1))),\n }\n\n dsks = [\n {\"x\": 1},\n {(\"x\", 1): (inc, 1), (\"x\", 2): (inc, (\"x\", 1))},\n {(\"x\", 1): (sum, [1, 2, 3]), (\"x\", 2): (sum, [(\"x\", 1), (\"x\", 1)])},\n ]\n for dsk in dsks:\n sdsk = {stringify(k): stringify(v, exclusive=dsk) for k, v in dsk.items()}\n keys = list(dsk)\n skeys = [str(k) for k in keys]\n assert all(isinstance(k, str) for k in sdsk)\n assert get(dsk, keys) == get(sdsk, skeys)\n\n dsk = {(\"y\", 1): (SubgraphCallable({\"x\": (\"y\", 1)}, \"x\", ((\"y\", 1),)), ((\"z\", 1),))}\n dsk = stringify(dsk, exclusive=set(dsk) | {(\"z\", 1)})\n assert dsk[(\"y\", 1)][0].dsk[\"x\"] == \"('y', 1)\"\n assert dsk[(\"y\", 1)][1][0] == \"('z', 1)\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_OperatorMethodMixin_partial_by_order.return.function_args2_kwargs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_OperatorMethodMixin_partial_by_order.return.function_args2_kwargs", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1172, "end_line": 1224, "span_ids": ["OperatorMethodMixin._get_binary_operator", "partial_by_order", "OperatorMethodMixin", "OperatorMethodMixin._get_unary_operator", "OperatorMethodMixin._bind_operator"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class OperatorMethodMixin:\n \"\"\"A mixin for dynamically implementing operators\"\"\"\n\n __slots__ = ()\n\n @classmethod\n def _bind_operator(cls, op):\n \"\"\"bind operator to this class\"\"\"\n name = op.__name__\n\n if name.endswith(\"_\"):\n # for and_ and or_\n name = name[:-1]\n elif name == \"inv\":\n name = \"invert\"\n\n meth = f\"__{name}__\"\n\n if name in (\"abs\", \"invert\", \"neg\", \"pos\"):\n setattr(cls, meth, cls._get_unary_operator(op))\n else:\n setattr(cls, meth, cls._get_binary_operator(op))\n\n if name in (\"eq\", \"gt\", \"ge\", \"lt\", \"le\", \"ne\", \"getitem\"):\n return\n\n rmeth = f\"__r{name}__\"\n setattr(cls, rmeth, cls._get_binary_operator(op, inv=True))\n\n @classmethod\n def _get_unary_operator(cls, op):\n \"\"\"Must return a method used by unary operator\"\"\"\n raise NotImplementedError\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n \"\"\"Must return a method used by binary operator\"\"\"\n raise NotImplementedError\n\n\ndef partial_by_order(*args, **kwargs):\n \"\"\"\n\n >>> from operator import add\n >>> partial_by_order(5, function=add, other=[(1, 10)])\n 15\n \"\"\"\n function = kwargs.pop(\"function\")\n other = kwargs.pop(\"other\")\n args2 = list(args)\n for i, arg in other:\n args2.insert(i, arg)\n return function(*args2, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_arraylike_is_arraylike.return.bool_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_arraylike_is_arraylike.return.bool_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1110, "end_line": 1150, "span_ids": ["is_arraylike"], "tokens": 385}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def is_arraylike(x):\n \"\"\"Is this object a numpy array or something similar?\n\n This function tests specifically for an object that already has\n array attributes (e.g. np.ndarray, dask.array.Array, cupy.ndarray,\n sparse.COO), **NOT** for something that can be coerced into an\n array object (e.g. Python lists and tuples). It is meant for dask\n developers and developers of downstream libraries.\n\n Note that this function does not correspond with NumPy's\n definition of array_like, which includes any object that can be\n coerced into an array (see definition in the NumPy glossary):\n https://numpy.org/doc/stable/glossary.html\n\n Examples\n --------\n >>> import numpy as np\n >>> is_arraylike(np.ones(5))\n True\n >>> is_arraylike(np.ones(()))\n True\n >>> is_arraylike(5)\n False\n >>> is_arraylike('cat')\n False\n \"\"\"\n from .base import is_dask_collection\n\n is_duck_array = hasattr(x, \"__array_function__\") or hasattr(x, \"__array_ufunc__\")\n\n return bool(\n hasattr(x, \"shape\")\n and isinstance(x.shape, tuple)\n and hasattr(x, \"dtype\")\n and not any(is_dask_collection(n) for n in x.shape)\n # We special case scipy.sparse and cupyx.scipy.sparse arrays as having partial\n # support for them is useful in scenerios where we mostly call `map_partitions`\n # or `map_blocks` with scikit-learn functions on dask arrays and dask dataframes.\n # https://github.com/dask/dask/pull/3738\n and (is_duck_array or \"scipy.sparse\" in typename(type(x)))\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_bytes_format_bytes.return.f_n_B_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_format_bytes_format_bytes.return.f_n_B_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1515, "end_line": 1543, "span_ids": ["format_bytes"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_bytes(n: int) -> str:\n \"\"\"Format bytes as text\n\n >>> from dask.utils import format_bytes\n >>> format_bytes(1)\n '1 B'\n >>> format_bytes(1234)\n '1.21 kiB'\n >>> format_bytes(12345678)\n '11.77 MiB'\n >>> format_bytes(1234567890)\n '1.15 GiB'\n >>> format_bytes(1234567890000)\n '1.12 TiB'\n >>> format_bytes(1234567890000000)\n '1.10 PiB'\n\n For all values < 2**60, the output is always <= 10 characters.\n \"\"\"\n for prefix, k in (\n (\"Pi\", 2**50),\n (\"Ti\", 2**40),\n (\"Gi\", 2**30),\n (\"Mi\", 2**20),\n (\"ki\", 2**10),\n ):\n if n >= k * 0.9:\n return f\"{n / k:.2f} {prefix}B\"\n return f\"{n} B\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_from_random_import_randin_trivial.return.d_x_height_1_i_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/scripts/scheduling.py_from_random_import_randin_trivial.return.d_x_height_1_i_", "embedding": null, "metadata": {"file_path": "docs/source/scripts/scheduling.py", "file_name": "scheduling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 22, "span_ids": ["imports", "noop", "trivial", "impl"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from random import randint\nfrom time import time\n\nimport matplotlib.pyplot as plt\n\nimport dask\nfrom dask import local, multiprocessing, threaded\n\n\ndef noop(x):\n pass\n\n\nnrepetitions = 1\n\n\ndef trivial(width, height):\n \"\"\"Embarrassingly parallel dask\"\"\"\n d = {(\"x\", 0, i): i for i in range(width)}\n for j in range(1, height):\n d.update({(\"x\", j, i): (noop, (\"x\", j - 1, i)) for i in range(width)})\n return d, [(\"x\", height - 1, i) for i in range(width)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/__init__.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/__init__.py__", "embedding": null, "metadata": {"file_path": "dask/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 12, "span_ids": ["imports"], "tokens": 86}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from . import config, datasets\nfrom ._version import get_versions\nfrom .base import annotate, compute, is_dask_collection, optimize, persist, visualize\nfrom .core import istask\nfrom .delayed import delayed\nfrom .local import get_sync as get\n\nversions = get_versions()\n__version__ = versions[\"version\"]\n__git_revision__ = versions[\"full-revisionid\"]\ndel get_versions, versions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py__concatenate__concatenate.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py__concatenate__concatenate.return.out", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 22, "end_line": 35, "span_ids": ["_concatenate"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@concatenate_lookup.register(np.ma.masked_array)\ndef _concatenate(arrays, axis=0):\n out = np.ma.concatenate(arrays, axis=axis)\n fill_values = [i.fill_value for i in arrays if hasattr(i, \"fill_value\")]\n if any(isinstance(f, np.ndarray) for f in fill_values):\n raise ValueError(\n \"Dask doesn't support masked array's with non-scalar `fill_value`s\"\n )\n if fill_values:\n # If all the fill_values are the same copy over the fill value\n fill_values = np.unique(fill_values)\n if len(fill_values) == 1:\n out.fill_value = fill_values[0]\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py__tensordot__tensordot.return.res_reshape_olda_oldb_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py__tensordot__tensordot.return.res_reshape_olda_oldb_", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 103, "span_ids": ["_tensordot"], "tokens": 549}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@tensordot_lookup.register(np.ma.masked_array)\ndef _tensordot(a, b, axes=2):\n # Much of this is stolen from numpy/core/numeric.py::tensordot\n try:\n iter(axes)\n except TypeError:\n axes_a = list(range(-axes, 0))\n axes_b = list(range(0, axes))\n else:\n axes_a, axes_b = axes\n try:\n na = len(axes_a)\n axes_a = list(axes_a)\n except TypeError:\n axes_a = [axes_a]\n na = 1\n try:\n nb = len(axes_b)\n axes_b = list(axes_b)\n except TypeError:\n axes_b = [axes_b]\n nb = 1\n\n # a, b = asarray(a), asarray(b) # <--- modified\n as_ = a.shape\n nda = a.ndim\n bs = b.shape\n ndb = b.ndim\n equal = True\n if na != nb:\n equal = False\n else:\n for k in range(na):\n if as_[axes_a[k]] != bs[axes_b[k]]:\n equal = False\n break\n if axes_a[k] < 0:\n axes_a[k] += nda\n if axes_b[k] < 0:\n axes_b[k] += ndb\n if not equal:\n raise ValueError(\"shape-mismatch for sum\")\n\n # Move the axes to sum over to the end of \"a\"\n # and to the front of \"b\"\n notin = [k for k in range(nda) if k not in axes_a]\n newaxes_a = notin + axes_a\n N2 = 1\n for axis in axes_a:\n N2 *= as_[axis]\n newshape_a = (-1, N2)\n olda = [as_[axis] for axis in notin]\n\n notin = [k for k in range(ndb) if k not in axes_b]\n newaxes_b = axes_b + notin\n N2 = 1\n for axis in axes_b:\n N2 *= bs[axis]\n newshape_b = (N2, -1)\n oldb = [bs[axis] for axis in notin]\n\n at = a.transpose(newaxes_a).reshape(newshape_a)\n bt = b.transpose(newaxes_b).reshape(newshape_b)\n res = np.ma.dot(at, bt)\n return res.reshape(olda + oldb)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_cupy_register_cupy._cupy_einsum.return.cupy_einsum_args_kwar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_register_cupy_register_cupy._cupy_einsum.return.cupy_einsum_args_kwar", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 113, "end_line": 129, "span_ids": ["register_cupy"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@tensordot_lookup.register_lazy(\"cupy\")\n@concatenate_lookup.register_lazy(\"cupy\")\ndef register_cupy():\n import cupy\n\n from dask.array.dispatch import percentile_lookup\n\n concatenate_lookup.register(cupy.ndarray, cupy.concatenate)\n tensordot_lookup.register(cupy.ndarray, cupy.tensordot)\n percentile_lookup.register(cupy.ndarray, percentile)\n\n @einsum_lookup.register(cupy.ndarray)\n def _cupy_einsum(*args, **kwargs):\n # NB: cupy does not accept `order` or `casting` kwargs - ignore\n kwargs.pop(\"casting\", None)\n kwargs.pop(\"order\", None)\n return cupy.einsum(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks_unify_chunks.arrays._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks_unify_chunks.arrays._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3554, "end_line": 3633, "span_ids": ["unify_chunks"], "tokens": 694}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unify_chunks(*args, **kwargs):\n \"\"\"\n Unify chunks across a sequence of arrays\n\n This utility function is used within other common operations like\n :func:`dask.array.core.map_blocks` and :func:`dask.array.core.blockwise`.\n It is not commonly used by end-users directly.\n\n Parameters\n ----------\n *args: sequence of Array, index pairs\n Sequence like (x, 'ij', y, 'jk', z, 'i')\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.ones(10, chunks=((5, 2, 3),))\n >>> y = da.ones(10, chunks=((2, 3, 5),))\n >>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')\n >>> chunkss\n {'i': (2, 3, 2, 3)}\n\n >>> x = da.ones((100, 10), chunks=(20, 5))\n >>> y = da.ones((10, 100), chunks=(4, 50))\n >>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)\n >>> chunkss # doctest: +SKIP\n {'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}\n\n >>> unify_chunks(0, None)\n ({}, [0])\n\n Returns\n -------\n chunkss : dict\n Map like {index: chunks}.\n arrays : list\n List of rechunked arrays.\n\n See Also\n --------\n common_blockdim\n \"\"\"\n if not args:\n return {}, []\n\n arginds = [\n (asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)\n ] # [x, ij, y, jk]\n warn = kwargs.get(\"warn\", True)\n\n arrays, inds = zip(*arginds)\n if all(ind is None for ind in inds):\n return {}, list(arrays)\n if all(ind == inds[0] for ind in inds) and all(\n a.chunks == arrays[0].chunks for a in arrays\n ):\n return dict(zip(inds[0], arrays[0].chunks)), arrays\n\n nameinds = []\n blockdim_dict = dict()\n max_parts = 0\n for a, ind in arginds:\n if ind is not None:\n nameinds.append((a.name, ind))\n blockdim_dict[a.name] = a.chunks\n max_parts = max(max_parts, a.npartitions)\n else:\n nameinds.append((a, ind))\n\n chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)\n nparts = np.prod(list(map(len, chunkss.values())))\n\n if warn and nparts and nparts >= max_parts * 10:\n warnings.warn(\n \"Increasing number of chunks by factor of %d\" % (nparts / max_parts),\n PerformanceWarning,\n stacklevel=3,\n )\n\n arrays = []\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks.for_a_i_in_arginds__unpack_singleton.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_unify_chunks.for_a_i_in_arginds__unpack_singleton.return.x", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3634, "end_line": 3666, "span_ids": ["unify_chunks", "unpack_singleton"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unify_chunks(*args, **kwargs):\n # ... other code\n for a, i in arginds:\n if i is None:\n arrays.append(a)\n else:\n chunks = tuple(\n chunkss[j]\n if a.shape[n] > 1\n else a.shape[n]\n if not np.isnan(sum(chunkss[j]))\n else None\n for n, j in enumerate(i)\n )\n if chunks != a.chunks and all(a.chunks):\n arrays.append(a.rechunk(chunks))\n else:\n arrays.append(a)\n return chunkss, arrays\n\n\ndef unpack_singleton(x):\n \"\"\"\n\n >>> unpack_singleton([[[[1]]]])\n 1\n >>> unpack_singleton(np.array(np.datetime64('2000-01-01')))\n array('2000-01-01', dtype='datetime64[D]')\n \"\"\"\n while isinstance(x, (list, tuple)):\n try:\n x = x[0]\n except (IndexError, TypeError, KeyError):\n break\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_deepfirst_shapelist.if_type_a_is_list_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_deepfirst_shapelist.if_type_a_is_list_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4594, "end_line": 4611, "span_ids": ["shapelist", "deepfirst"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def deepfirst(seq):\n \"\"\"First element in a nested list\n\n >>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])\n 1\n \"\"\"\n if not isinstance(seq, (list, tuple)):\n return seq\n else:\n return deepfirst(seq[0])\n\n\ndef shapelist(a):\n \"\"\"Get the shape of nested list\"\"\"\n if type(a) is list:\n return tuple([len(a)] + list(shapelist(a[0])))\n else:\n return ()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_itertools_empty": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_itertools_empty", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 28, "span_ids": ["imports"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import itertools\nfrom collections.abc import Sequence\nfrom functools import partial\nfrom itertools import product\nfrom numbers import Integral, Number\n\nimport numpy as np\nfrom tlz import sliding_window\n\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import cached_cumsum, derived_from, is_cupy_type\nfrom . import chunk\nfrom .core import (\n Array,\n asarray,\n block,\n blockwise,\n broadcast_arrays,\n broadcast_to,\n concatenate,\n normalize_chunks,\n stack,\n)\nfrom .numpy_compat import _numpy_120\nfrom .ufunc import greater_equal, rint\nfrom .utils import meta_from_array\nfrom .wrap import empty, full, ones, zeros", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tri_tri.return.m": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_tri_tri.return.m", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 683, "end_line": 703, "span_ids": ["tri"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef tri(N, M=None, k=0, dtype=float, chunks=\"auto\", *, like=None):\n if not _numpy_120 and like is not None:\n raise RuntimeError(\"The use of ``like`` required NumPy >= 1.20\")\n\n _min_int = np.lib.twodim_base._min_int\n\n if M is None:\n M = N\n\n chunks = normalize_chunks(chunks, shape=(N, M), dtype=dtype)\n\n m = greater_equal(\n arange(N, chunks=chunks[0][0], dtype=_min_int(0, N), like=like).reshape(1, N).T,\n arange(-k, M - k, chunks=chunks[1][0], dtype=_min_int(-k, M - k), like=like),\n )\n\n # Avoid making a copy if the requested type is already bool\n m = m.astype(dtype, copy=False)\n\n return m", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_fromfunction_fromfunction.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_fromfunction_fromfunction.return.res", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 705, "end_line": 719, "span_ids": ["fromfunction"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef fromfunction(func, chunks=\"auto\", shape=None, dtype=None, **kwargs):\n dtype = dtype or float\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n\n inds = tuple(range(len(shape)))\n\n arrs = [arange(s, dtype=dtype, chunks=c) for s, c in zip(shape, chunks)]\n arrs = meshgrid(*arrs, indexing=\"ij\")\n\n args = sum(zip(arrs, itertools.repeat(inds)), ())\n\n res = blockwise(func, inds, *args, token=\"fromfunction\", **kwargs)\n\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/dispatch.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/dispatch.py__", "embedding": null, "metadata": {"file_path": "dask/array/dispatch.py", "file_name": "dispatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 15, "span_ids": ["docstring"], "tokens": 74}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nDispatch in dask.array.\n\nAlso see backends.py\n\"\"\"\n\nfrom ..utils import Dispatch\n\nconcatenate_lookup = Dispatch(\"concatenate\")\ntensordot_lookup = Dispatch(\"tensordot\")\neinsum_lookup = Dispatch(\"einsum\")\nempty_lookup = Dispatch(\"empty\")\ndivide_lookup = Dispatch(\"divide\")\npercentile_lookup = Dispatch(\"percentile\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.for_dim_sizes_in_dimsize_apply_gufunc.leaf_arrs._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.for_dim_sizes_in_dimsize_apply_gufunc.leaf_arrs._", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 426, "end_line": 487, "span_ids": ["apply_gufunc"], "tokens": 686}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_gufunc(\n func,\n signature,\n *args,\n axes=None,\n axis=None,\n keepdims=False,\n output_dtypes=None,\n output_sizes=None,\n vectorize=None,\n allow_rechunk=False,\n meta=None,\n **kwargs,\n):\n # ... other code\n for dim, sizes in dimsizess.items():\n #### Check that the arrays have same length for same dimensions or dimension `1`\n if set(sizes) | {1} != {1, max(sizes)}:\n raise ValueError(f\"Dimension `'{dim}'` with different lengths in arrays\")\n if not allow_rechunk:\n chunksizes = chunksizess[dim]\n #### Check if core dimensions consist of only one chunk\n if (dim in core_shapes) and (chunksizes[0][0] < core_shapes[dim]):\n raise ValueError(\n \"Core dimension `'{}'` consists of multiple chunks. To fix, rechunk into a single \\\nchunk along this dimension or set `allow_rechunk=True`, but beware that this may increase memory usage \\\nsignificantly.\".format(\n dim\n )\n )\n #### Check if loop dimensions consist of same chunksizes, when they have sizes > 1\n relevant_chunksizes = list(\n unique(c for s, c in zip(sizes, chunksizes) if s > 1)\n )\n if len(relevant_chunksizes) > 1:\n raise ValueError(\n f\"Dimension `'{dim}'` with different chunksize present\"\n )\n\n ## Apply function - use blockwise here\n arginds = list(concat(zip(args, input_dimss)))\n\n ### Use existing `blockwise` but only with loopdims to enforce\n ### concatenation for coredims that appear also at the output\n ### Modifying `blockwise` could improve things here.\n tmp = blockwise(\n func, loop_output_dims, *arginds, concatenate=True, meta=meta, **kwargs\n )\n\n # NOTE: we likely could just use `meta` instead of `tmp._meta`,\n # but we use it and validate it anyway just to be sure nothing odd has happened.\n metas = tmp._meta\n if nout is None:\n assert not isinstance(\n metas, (list, tuple)\n ), f\"meta changed from single output to multiple output during blockwise: {meta} -> {metas}\"\n metas = (metas,)\n else:\n assert isinstance(\n metas, (list, tuple)\n ), f\"meta changed from multiple output to single output during blockwise: {meta} -> {metas}\"\n assert (\n len(metas) == nout\n ), f\"Number of outputs changed from {nout} to {len(metas)} during blockwise\"\n\n ## Prepare output shapes\n loop_output_shape = tmp.shape\n loop_output_chunks = tmp.chunks\n keys = list(flatten(tmp.__dask_keys__()))\n name, token = keys[0][0].split(\"-\")\n\n ### *) Treat direct output\n if nout is None:\n output_coredimss = [output_coredimss]\n\n ## Split output\n leaf_arrs = []\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.for_i_ocd_oax_meta_i_apply_gufunc._Undo_from_above": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.for_i_ocd_oax_meta_i_apply_gufunc._Undo_from_above", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 486, "end_line": 520, "span_ids": ["apply_gufunc"], "tokens": 434}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_gufunc(\n func,\n signature,\n *args,\n axes=None,\n axis=None,\n keepdims=False,\n output_dtypes=None,\n output_sizes=None,\n vectorize=None,\n allow_rechunk=False,\n meta=None,\n **kwargs,\n):\n # ... other code\n for i, (ocd, oax, meta) in enumerate(zip(output_coredimss, output_axes, metas)):\n core_output_shape = tuple(core_shapes[d] for d in ocd)\n core_chunkinds = len(ocd) * (0,)\n output_shape = loop_output_shape + core_output_shape\n output_chunks = loop_output_chunks + core_output_shape\n leaf_name = \"%s_%d-%s\" % (name, i, token)\n leaf_dsk = {\n (leaf_name,)\n + key[1:]\n + core_chunkinds: ((getitem, key, i) if nout else key)\n for key in keys\n }\n graph = HighLevelGraph.from_collections(leaf_name, leaf_dsk, dependencies=[tmp])\n meta = meta_from_array(meta, len(output_shape))\n leaf_arr = Array(\n graph, leaf_name, chunks=output_chunks, shape=output_shape, meta=meta\n )\n\n ### Axes:\n if keepdims:\n slices = len(leaf_arr.shape) * (slice(None),) + len(oax) * (np.newaxis,)\n leaf_arr = leaf_arr[slices]\n\n tidcs = [None] * len(leaf_arr.shape)\n for ii, oa in zip(range(-len(oax), 0), oax):\n tidcs[oa] = ii\n j = 0\n for ii in range(len(tidcs)):\n if tidcs[ii] is None:\n tidcs[ii] = j\n j += 1\n leaf_arr = leaf_arr.transpose(tidcs)\n leaf_arrs.append(leaf_arr)\n\n return (*leaf_arrs,) if nout else leaf_arrs[0] # Undo *) from above", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_from_functools_import_wra_masked_outside.return.x_map_blocks_np_ma_masked": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/ma.py_from_functools_import_wra_masked_outside.return.x_map_blocks_np_ma_masked", "embedding": null, "metadata": {"file_path": "dask/array/ma.py", "file_name": "ma.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 68, "span_ids": ["impl", "masked_equal", "imports", "_wrap_masked", "masked_inside", "masked_outside", "normalize_masked_array", "filled", "masked_invalid"], "tokens": 501}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from functools import wraps\n\nimport numpy as np\n\nfrom ..base import normalize_token\nfrom ..utils import derived_from\nfrom .core import asanyarray, blockwise, map_blocks\nfrom .routines import _average\n\n\n@normalize_token.register(np.ma.masked_array)\ndef normalize_masked_array(x):\n data = normalize_token(x.data)\n mask = normalize_token(x.mask)\n fill_value = normalize_token(x.fill_value)\n return (data, mask, fill_value)\n\n\n@derived_from(np.ma)\ndef filled(a, fill_value=None):\n a = asanyarray(a)\n return a.map_blocks(np.ma.filled, fill_value=fill_value)\n\n\ndef _wrap_masked(f):\n @wraps(f)\n def _(a, value):\n a = asanyarray(a)\n value = asanyarray(value)\n ainds = tuple(range(a.ndim))[::-1]\n vinds = tuple(range(value.ndim))[::-1]\n oinds = max(ainds, vinds, key=len)\n return blockwise(f, oinds, a, ainds, value, vinds, dtype=a.dtype)\n\n return _\n\n\nmasked_greater = _wrap_masked(np.ma.masked_greater)\nmasked_greater_equal = _wrap_masked(np.ma.masked_greater_equal)\nmasked_less = _wrap_masked(np.ma.masked_less)\nmasked_less_equal = _wrap_masked(np.ma.masked_less_equal)\nmasked_not_equal = _wrap_masked(np.ma.masked_not_equal)\n\n\n@derived_from(np.ma)\ndef masked_equal(a, value):\n a = asanyarray(a)\n if getattr(value, \"shape\", ()):\n raise ValueError(\"da.ma.masked_equal doesn't support array `value`s\")\n inds = tuple(range(a.ndim))\n return blockwise(np.ma.masked_equal, inds, a, inds, value, (), dtype=a.dtype)\n\n\n@derived_from(np.ma)\ndef masked_invalid(a):\n return asanyarray(a).map_blocks(np.ma.masked_invalid)\n\n\n@derived_from(np.ma)\ndef masked_inside(x, v1, v2):\n x = asanyarray(x)\n return x.map_blocks(np.ma.masked_inside, v1, v2)\n\n\n@derived_from(np.ma)\ndef masked_outside(x, v1, v2):\n x = asanyarray(x)\n return x.map_blocks(np.ma.masked_outside, v1, v2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_warnings__overlap_internal_chunks.return.chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_warnings__overlap_internal_chunks.return.chunks", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 38, "span_ids": ["imports", "_overlap_internal_chunks"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\nfrom numbers import Integral, Number\n\nimport numpy as np\nfrom tlz import concat, get, partial\nfrom tlz.curried import map\n\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..layers import ArrayOverlapLayer\nfrom ..utils import derived_from\nfrom . import chunk, numpy_compat\nfrom .core import Array, concatenate, map_blocks, unify_chunks\nfrom .creation import empty_like, full_like\n\n\ndef _overlap_internal_chunks(original_chunks, axes):\n \"\"\"Get new chunks for array with overlap.\"\"\"\n chunks = []\n for i, bds in enumerate(original_chunks):\n depth = axes.get(i, 0)\n if isinstance(depth, tuple):\n left_depth = depth[0]\n right_depth = depth[1]\n else:\n left_depth = depth\n right_depth = depth\n\n if len(bds) == 1:\n chunks.append(bds)\n else:\n left = [bds[0] + right_depth]\n right = [bds[-1] + left_depth]\n mid = []\n for bd in bds[1:-1]:\n mid.append(bd + left_depth + right_depth)\n chunks.append(left + mid + right)\n return chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_internal_trim_overlap.return.trim_internal_x_axes_axe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_overlap_internal_trim_overlap.return.trim_internal_x_axes_axe", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 41, "end_line": 85, "span_ids": ["trim_overlap", "overlap_internal"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def overlap_internal(x, axes):\n \"\"\"Share boundaries between neighboring blocks\n\n Parameters\n ----------\n\n x: da.Array\n A dask array\n axes: dict\n The size of the shared boundary per axis\n\n The axes input informs how many cells to overlap between neighboring blocks\n {0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis\n \"\"\"\n token = tokenize(x, axes)\n name = \"overlap-\" + token\n\n graph = ArrayOverlapLayer(\n name=x.name,\n axes=axes,\n chunks=x.chunks,\n numblocks=x.numblocks,\n token=token,\n )\n graph = HighLevelGraph.from_collections(name, graph, dependencies=[x])\n chunks = _overlap_internal_chunks(x.chunks, axes)\n\n return Array(graph, name, chunks, meta=x)\n\n\ndef trim_overlap(x, depth, boundary=None):\n \"\"\"Trim sides from each block.\n\n This couples well with the ``map_overlap`` operation which may leave\n excess data on each block.\n\n See also\n --------\n dask.array.overlap.map_overlap\n\n \"\"\"\n\n # parameter to be passed to trim_internal\n axes = coerce_depth(x.ndim, depth)\n return trim_internal(x, axes=axes, boundary=boundary)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_trim_internal_trim_internal.return.map_blocks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_trim_internal_trim_internal.return.map_blocks_", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 89, "end_line": 132, "span_ids": ["trim_internal"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def trim_internal(x, axes, boundary=None):\n \"\"\"Trim sides from each block\n\n This couples well with the overlap operation, which may leave excess data on\n each block\n\n See also\n --------\n dask.array.chunk.trim\n dask.array.map_blocks\n \"\"\"\n boundary = coerce_boundary(x.ndim, boundary)\n\n olist = []\n for i, bd in enumerate(x.chunks):\n bdy = boundary.get(i, \"none\")\n overlap = axes.get(i, 0)\n ilist = []\n for j, d in enumerate(bd):\n if bdy != \"none\":\n if isinstance(overlap, tuple):\n d = d - sum(overlap)\n else:\n d = d - overlap * 2\n\n else:\n if isinstance(overlap, tuple):\n d = d - overlap[0] if j != 0 else d\n d = d - overlap[1] if j != len(bd) - 1 else d\n else:\n d = d - overlap if j != 0 else d\n d = d - overlap if j != len(bd) - 1 else d\n\n ilist.append(d)\n olist.append(tuple(ilist))\n chunks = tuple(olist)\n\n return map_blocks(\n partial(_trim, axes=axes, boundary=boundary),\n x,\n chunks=chunks,\n dtype=x.dtype,\n meta=x._meta,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.beta_RandomState.with_contextlib_suppress_.choice.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_RandomState.beta_RandomState.with_contextlib_suppress_.choice.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 192, "end_line": 276, "span_ids": ["RandomState.binomial", "RandomState.beta", "RandomState.chisquare", "RandomState:3"], "tokens": 803}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RandomState:\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def beta(self, a, b, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"beta\", a, b, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def binomial(self, n, p, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"binomial\", n, p, size=size, chunks=chunks, **kwargs)\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def chisquare(self, df, size=None, chunks=\"auto\", **kwargs):\n return self._wrap(\"chisquare\", df, size=size, chunks=chunks, **kwargs)\n\n with contextlib.suppress(AttributeError):\n\n @derived_from(np.random.RandomState, skipblocks=1)\n def choice(self, a, size=None, replace=True, p=None, chunks=\"auto\"):\n dependencies = []\n # Normalize and validate `a`\n if isinstance(a, Integral):\n # On windows the output dtype differs if p is provided or\n # absent, see https://github.com/numpy/numpy/issues/9867\n dummy_p = np.array([1]) if p is not None else p\n dtype = np.random.choice(1, size=(), p=dummy_p).dtype\n len_a = a\n if a < 0:\n raise ValueError(\"a must be greater than 0\")\n else:\n a = asarray(a)\n a = a.rechunk(a.shape)\n dtype = a.dtype\n if a.ndim != 1:\n raise ValueError(\"a must be one dimensional\")\n len_a = len(a)\n dependencies.append(a)\n a = a.__dask_keys__()[0]\n\n # Normalize and validate `p`\n if p is not None:\n if not isinstance(p, Array):\n # If p is not a dask array, first check the sum is close\n # to 1 before converting.\n p = np.asarray(p)\n if not np.isclose(p.sum(), 1, rtol=1e-7, atol=0):\n raise ValueError(\"probabilities do not sum to 1\")\n p = asarray(p)\n else:\n p = p.rechunk(p.shape)\n\n if p.ndim != 1:\n raise ValueError(\"p must be one dimensional\")\n if len(p) != len_a:\n raise ValueError(\"a and p must have the same size\")\n\n dependencies.append(p)\n p = p.__dask_keys__()[0]\n\n if size is None:\n size = ()\n elif not isinstance(size, (tuple, list)):\n size = (size,)\n\n chunks = normalize_chunks(chunks, size, dtype=np.float64)\n if not replace and len(chunks[0]) > 1:\n err_msg = (\n \"replace=False is not currently supported for \"\n \"dask.array.choice with multi-chunk output \"\n \"arrays\"\n )\n raise NotImplementedError(err_msg)\n sizes = list(product(*chunks))\n state_data = random_state_data(len(sizes), self._numpy_state)\n\n name = \"da.random.choice-%s\" % tokenize(\n state_data, size, chunks, a, replace, p\n )\n keys = product([name], *(range(len(bd)) for bd in chunks))\n dsk = {\n k: (_choice, state, a, size, replace, p)\n for k, state, size in zip(keys, state_data, sizes)\n }\n\n graph = HighLevelGraph.from_collections(\n name, dsk, dependencies=dependencies\n )\n return Array(graph, name, chunks, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__tensordot__tensordot.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__tensordot__tensordot.return.x", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 251, "end_line": 262, "span_ids": ["_tensordot"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _tensordot(a, b, axes):\n x = max([a, b], key=lambda x: x.__array_priority__)\n tensordot = tensordot_lookup.dispatch(type(x))\n x = tensordot(a, b, axes=axes)\n\n if len(axes[0]) != 1:\n ind = [slice(None, None)] * x.ndim\n for a in sorted(axes[0]):\n ind.insert(a, None)\n x = x[tuple(ind)]\n\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_digitize__searchsorted_block.return.res_np_newaxis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_digitize__searchsorted_block.return.res_np_newaxis_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 711, "end_line": 724, "span_ids": ["_searchsorted_block", "digitize"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef digitize(a, bins, right=False):\n bins = asarray_safe(bins, like=meta_from_array(a))\n dtype = np.digitize(asarray_safe([0], like=bins), bins, right=False).dtype\n return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)\n\n\ndef _searchsorted_block(x, y, side):\n res = np.searchsorted(x, y, side=side)\n # 0 is only correct for the first block of a, but blockwise doesn't have a way\n # of telling which block is being operated on (unlike map_blocks),\n # so set all 0 values to a special value and set back at the end of searchsorted\n res[res == 0] = -1\n return res[np.newaxis, :]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_searchsorted_searchsorted.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_searchsorted_searchsorted.return.out", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 727, "end_line": 765, "span_ids": ["searchsorted"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef searchsorted(a, v, side=\"left\", sorter=None):\n if a.ndim != 1:\n raise ValueError(\"Input array a must be one dimensional\")\n\n if sorter is not None:\n raise NotImplementedError(\n \"da.searchsorted with a sorter argument is not supported\"\n )\n\n # call np.searchsorted for each pair of blocks in a and v\n meta = np.searchsorted(a._meta, v._meta)\n out = blockwise(\n _searchsorted_block,\n list(range(v.ndim + 1)),\n a,\n [0],\n v,\n list(range(1, v.ndim + 1)),\n side,\n None,\n meta=meta,\n adjust_chunks={0: 1}, # one row for each block in a\n )\n\n # add offsets to take account of the position of each block within the array a\n a_chunk_sizes = array_safe((0, *a.chunks[0]), like=meta_from_array(a))\n a_chunk_offsets = np.cumsum(a_chunk_sizes)[:-1]\n a_chunk_offsets = a_chunk_offsets[(Ellipsis,) + v.ndim * (np.newaxis,)]\n a_offsets = asarray(a_chunk_offsets, chunks=1)\n out = where(out < 0, out, out + a_offsets)\n\n # combine the results from each block (of a)\n out = out.max(axis=0)\n\n # fix up any -1 values\n out[out == -1] = 0\n\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__TODO_dask_linspace_doe__block_hist.return.np_histogram_x_bins_ran": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__TODO_dask_linspace_doe__block_hist.return.np_histogram_x_bins_ran", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 768, "end_line": 785, "span_ids": ["searchsorted", "_block_hist", "_linspace_from_delayed"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# TODO: dask linspace doesn't support delayed values\ndef _linspace_from_delayed(start, stop, num=50):\n linspace_name = \"linspace-\" + tokenize(start, stop, num)\n (start_ref, stop_ref, num_ref), deps = unpack_collections([start, stop, num])\n if len(deps) == 0:\n return np.linspace(start, stop, num=num)\n\n linspace_dsk = {(linspace_name, 0): (np.linspace, start_ref, stop_ref, num_ref)}\n linspace_graph = HighLevelGraph.from_collections(\n linspace_name, linspace_dsk, dependencies=deps\n )\n\n chunks = ((np.nan,),) if is_dask_collection(num) else ((num,),)\n return Array(linspace_graph, linspace_name, chunks, dtype=float)\n\n\ndef _block_hist(x, bins, range=None, weights=None):\n return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__block_histogramdd_rect__block_histogramdd_rect.return.np_histogramdd_sample_bi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__block_histogramdd_rect__block_histogramdd_rect.return.np_histogramdd_sample_bi", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 980, "end_line": 993, "span_ids": ["_block_histogramdd_rect"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _block_histogramdd_rect(sample, bins, range, weights):\n \"\"\"Call numpy.histogramdd for a blocked/chunked calculation.\n\n Slurps the result into an additional outer axis; this new axis\n will be used to stack chunked calls of the numpy function and add\n them together later.\n\n Returns\n -------\n :py:object:`np.ndarray`\n NumPy array with an additional outer dimension.\n\n \"\"\"\n return np.histogramdd(sample, bins, range=range, weights=weights)[0:1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__block_histogramdd_multiarg_histogramdd": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__block_histogramdd_multiarg_histogramdd", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 996, "end_line": 1349, "span_ids": ["histogramdd", "_block_histogramdd_multiarg"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _block_histogramdd_multiarg(*args):\n \"\"\"Call numpy.histogramdd for a multi argument blocked/chunked calculation.\n\n Slurps the result into an additional outer axis; this new axis\n will be used to stack chunked calls of the numpy function and add\n them together later.\n\n The last three arguments _must be_ (bins, range, weights).\n\n The difference between this function and\n _block_histogramdd_rect is that here we expect the sample\n to be composed of multiple arguments (multiple 1D arrays, each one\n representing a coordinate), while _block_histogramdd_rect\n expects a single rectangular (2D array where columns are\n coordinates) sample.\n\n \"\"\"\n bins, range, weights = args[-3:]\n sample = args[:-3]\n return np.histogramdd(sample, bins=bins, range=range, weights=weights)[0:1]\n\n\ndef histogramdd(sample, bins, range=None, normed=None, weights=None, density=None):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd._logic_used_in_numpy_his_histogramdd._sequence_of_pairs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd._logic_used_in_numpy_his_histogramdd._sequence_of_pairs_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1317, "end_line": 1398, "span_ids": ["histogramdd"], "tokens": 804}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogramdd(sample, bins, range=None, normed=None, weights=None, density=None):\n # logic used in numpy.histogramdd to handle normed/density.\n if normed is None:\n if density is None:\n density = False\n elif density is None:\n # an explicit normed argument was passed, alias it to the new name\n density = normed\n else:\n raise TypeError(\"Cannot specify both 'normed' and 'density'\")\n\n # check if any dask collections (dc) were passed to bins= or\n # range= these are unsupported.\n dc_bins = is_dask_collection(bins)\n if isinstance(bins, (list, tuple)):\n dc_bins = dc_bins or any([is_dask_collection(b) for b in bins])\n dc_range = (\n any([is_dask_collection(r) for r in range]) if range is not None else False\n )\n if dc_bins or dc_range:\n raise NotImplementedError(\n \"Passing dask collections to bins=... or range=... is not supported.\"\n )\n\n # generate token and name for task\n token = tokenize(sample, bins, range, weights, density)\n name = f\"histogramdd-sum-{token}\"\n\n # N == total number of samples\n # D == total number of dimensions\n if hasattr(sample, \"shape\"):\n if len(sample.shape) != 2:\n raise ValueError(\"Single array input to histogramdd should be columnar\")\n else:\n _, D = sample.shape\n n_chunks = sample.numblocks[0]\n rectangular_sample = True\n # Require data to be chunked along the first axis only.\n if sample.shape[1:] != sample.chunksize[1:]:\n raise ValueError(\"Input array can only be chunked along the 0th axis.\")\n elif isinstance(sample, (tuple, list)):\n rectangular_sample = False\n D = len(sample)\n n_chunks = sample[0].numblocks[0]\n for i in _range(1, D):\n if sample[i].chunks != sample[0].chunks:\n raise ValueError(\"All coordinate arrays must be chunked identically.\")\n else:\n raise ValueError(\n \"Incompatible sample. Must be a 2D array or a sequence of 1D arrays.\"\n )\n\n # Require only Array or Delayed objects for bins, range, and weights.\n for argname, val in [(\"bins\", bins), (\"range\", range), (\"weights\", weights)]:\n if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):\n raise TypeError(\n \"Dask types besides Array and Delayed are not supported \"\n \"for `histogramdd`. For argument `{}`, got: {!r}\".format(argname, val)\n )\n\n # Require that the chunking of the sample and weights are compatible.\n if weights is not None:\n if rectangular_sample and weights.chunks[0] != sample.chunks[0]:\n raise ValueError(\n \"Input array and weights must have the same shape \"\n \"and chunk structure along the first dimension.\"\n )\n elif not rectangular_sample and weights.numblocks[0] != n_chunks:\n raise ValueError(\n \"Input arrays and weights must have the same shape \"\n \"and chunk structure.\"\n )\n\n # if bins is a list, tuple, then make sure the length is the same\n # as the number dimensions.\n if isinstance(bins, (list, tuple)):\n if len(bins) != D:\n raise ValueError(\n \"The dimension of bins must be equal to the dimension of the sample.\"\n )\n\n # if range is defined, check that it's the right length and also a\n # sequence of pairs.\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd.if_range_is_not_None__histogramdd.n.mapped_sum_axis_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd.if_range_is_not_None__histogramdd.n.mapped_sum_axis_0_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1259, "end_line": 1336, "span_ids": ["histogramdd"], "tokens": 822}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogramdd(sample, bins, range=None, normed=None, weights=None, density=None):\n # ... other code\n if range is not None:\n if len(range) != D:\n raise ValueError(\n \"range argument requires one entry, a min max pair, per dimension.\"\n )\n if not all(len(r) == 2 for r in range):\n raise ValueError(\"range argument should be a sequence of pairs\")\n\n # If bins is a single int, create a tuple of len `D` containing `bins`.\n if isinstance(bins, int):\n bins = (bins,) * D\n\n # we will return the edges to mimic the NumPy API (we also use the\n # edges later as a way to calculate the total number of bins).\n if all(isinstance(b, int) for b in bins) and all(len(r) == 2 for r in range):\n edges = [np.linspace(r[0], r[1], b + 1) for b, r in zip(bins, range)]\n else:\n edges = [np.asarray(b) for b in bins]\n\n if rectangular_sample:\n deps = (sample,)\n else:\n deps = tuple(sample)\n\n if weights is not None:\n w_keys = flatten(weights.__dask_keys__())\n deps += (weights,)\n dtype = weights.dtype\n else:\n w_keys = (None,) * n_chunks\n dtype = np.histogramdd([])[0].dtype\n\n # This tuple of zeros represents the chunk index along the columns\n # (we only allow chunking along the rows).\n column_zeros = tuple(0 for _ in _range(D))\n\n # With dsk below, we will construct a (D + 1) dimensional array\n # stacked for each chunk. For example, if the histogram is going\n # to be 3 dimensions, this creates a stack of cubes (1 cube for\n # each sample chunk) that will be collapsed into a final cube (the\n # result). Depending on the input data, we can do this in two ways\n #\n # 1. The rectangular case: when the sample is a single 2D array\n # where each column in the sample represents a coordinate of\n # the sample).\n #\n # 2. The sequence-of-arrays case, when the sample is a tuple or\n # list of arrays, with each array in that sequence representing\n # the entirety of one coordinate of the complete sample.\n\n if rectangular_sample:\n sample_keys = flatten(sample.__dask_keys__())\n dsk = {\n (name, i, *column_zeros): (_block_histogramdd_rect, k, bins, range, w)\n for i, (k, w) in enumerate(zip(sample_keys, w_keys))\n }\n else:\n sample_keys = [\n list(flatten(sample[i].__dask_keys__())) for i in _range(len(sample))\n ]\n fused_on_chunk_keys = [\n tuple(sample_keys[j][i] for j in _range(D)) for i in _range(n_chunks)\n ]\n dsk = {\n (name, i, *column_zeros): (\n _block_histogramdd_multiarg,\n *(*k, bins, range, w),\n )\n for i, (k, w) in enumerate(zip(fused_on_chunk_keys, w_keys))\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)\n all_nbins = tuple((b.size - 1,) for b in edges)\n stacked_chunks = ((1,) * n_chunks, *all_nbins)\n mapped = Array(graph, name, stacked_chunks, dtype=dtype)\n # Finally, sum over chunks providing to get the final D\n # dimensional result array.\n n = mapped.sum(axis=0)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd.if_density__histogramdd.return.n_asarray_entry_for_en": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogramdd.if_density__histogramdd.return.n_asarray_entry_for_en", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1338, "end_line": 1349, "span_ids": ["histogramdd"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogramdd(sample, bins, range=None, normed=None, weights=None, density=None):\n # ... other code\n\n if density:\n # compute array of values to divide by the bin width along\n # each dimension.\n width_divider = np.ones(n.shape)\n for i in _range(D):\n shape = np.ones(D, int)\n shape[i] = width_divider.shape[i]\n width_divider *= np.diff(edges[i]).reshape(shape)\n width_divider = asarray(width_divider, chunks=n.chunks)\n return n / width_divider / n.sum(), edges\n\n return n, [asarray(entry) for entry in edges]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ravel_multi_index_ravel_multi_index.return.index_stack_map_blocks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_ravel_multi_index_ravel_multi_index.return.index_stack_map_blocks_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1893, "end_line": 1922, "span_ids": ["ravel_multi_index"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(np.ravel_multi_index)\ndef ravel_multi_index(multi_index, dims, mode=\"raise\", order=\"C\"):\n if np.isscalar(dims):\n dims = (dims,)\n if is_dask_collection(dims) or any(is_dask_collection(d) for d in dims):\n raise NotImplementedError(\n f\"Dask types are not supported in the `dims` argument: {dims!r}\"\n )\n\n if is_arraylike(multi_index):\n index_stack = asarray(multi_index)\n else:\n multi_index_arrs = broadcast_arrays(*multi_index)\n index_stack = stack(multi_index_arrs)\n\n if not np.isnan(index_stack.shape).any() and len(index_stack) != len(dims):\n raise ValueError(\n f\"parameter multi_index must be a sequence of length {len(dims)}\"\n )\n if not np.issubdtype(index_stack.dtype, np.signedinteger):\n raise TypeError(\"only int indices permitted\")\n return index_stack.map_blocks(\n np.ravel_multi_index,\n dtype=np.intp,\n chunks=index_stack.chunks[1:],\n drop_axis=0,\n dims=dims,\n mode=mode,\n order=order,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__int_piecewise__select.return.np_select_condlist_choic": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__int_piecewise__select.return.np_select_condlist_choic", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1925, "end_line": 1953, "span_ids": ["_int_piecewise", "_select", "piecewise"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _int_piecewise(x, *condlist, **kwargs):\n return np.piecewise(\n x, list(condlist), kwargs[\"funclist\"], *kwargs[\"func_args\"], **kwargs[\"func_kw\"]\n )\n\n\n@derived_from(np)\ndef piecewise(x, condlist, funclist, *args, **kw):\n return map_blocks(\n _int_piecewise,\n x,\n *condlist,\n dtype=x.dtype,\n name=\"piecewise\",\n funclist=funclist,\n func_args=args,\n func_kw=kw,\n )\n\n\ndef _select(*args, **kwargs):\n \"\"\"\n This is a version of :func:`numpy.select` that acceptes an arbitrary number of arguments and\n splits them in half to create ``condlist`` and ``choicelist`` params.\n \"\"\"\n split_at = len(args) // 2\n condlist = args[:split_at]\n choicelist = args[split_at:]\n return np.select(condlist, choicelist, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_select_select.return.blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_select_select.return.blockwise_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1956, "end_line": 1987, "span_ids": ["select"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef select(condlist, choicelist, default=0):\n # Making the same checks that np.select\n # Check the size of condlist and choicelist are the same, or abort.\n if len(condlist) != len(choicelist):\n raise ValueError(\"list of cases must be same length as list of conditions\")\n\n if len(condlist) == 0:\n raise ValueError(\"select with an empty condition list is not possible\")\n\n choicelist = [asarray(choice) for choice in choicelist]\n\n try:\n intermediate_dtype = result_type(*choicelist)\n except TypeError as e:\n msg = \"Choicelist elements do not have a common dtype.\"\n raise TypeError(msg) from e\n\n blockwise_shape = tuple(range(choicelist[0].ndim))\n\n condargs = [arg for elem in condlist for arg in (elem, blockwise_shape)]\n choiceargs = [arg for elem in choicelist for arg in (elem, blockwise_shape)]\n\n return blockwise(\n _select,\n blockwise_shape,\n *condargs,\n *choiceargs,\n dtype=intermediate_dtype,\n name=\"select\",\n default=default,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_unknown_chunksizes_test_block_simple_column_wise.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_stack_unknown_chunksizes_test_block_simple_column_wise.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 433, "end_line": 666, "span_ids": ["test_stack_unknown_chunksizes", "test_concatenate_rechunk", "test_concatenate_fixlen_strings", "test_block_simple_row_wise", "test_concatenate", "test_concatenate_unknown_axes", "test_concatenate_flatten", "test_concatenate_types", "test_block_simple_column_wise", "test_concatenate_zero_size"], "tokens": 2131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stack_unknown_chunksizes():\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n\n a_df = pd.DataFrame({\"x\": np.arange(12)})\n b_df = pd.DataFrame({\"y\": np.arange(12) * 10})\n\n a_ddf = dd.from_pandas(a_df, sort=False, npartitions=3)\n b_ddf = dd.from_pandas(b_df, sort=False, npartitions=3)\n\n a_x = a_ddf.values\n b_x = b_ddf.values\n\n assert np.isnan(a_x.shape[0])\n assert np.isnan(b_x.shape[0])\n\n with pytest.raises(ValueError) as exc_info:\n da.stack([a_x, b_x], axis=0)\n\n assert \"shape\" in str(exc_info.value)\n assert \"nan\" in str(exc_info.value)\n\n c_x = da.stack([a_x, b_x], axis=0, allow_unknown_chunksizes=True)\n\n assert_eq(c_x, np.stack([a_df.values, b_df.values], axis=0))\n\n with pytest.raises(ValueError) as exc_info:\n da.stack([a_x, b_x], axis=1)\n\n assert \"shape\" in str(exc_info.value)\n assert \"nan\" in str(exc_info.value)\n\n c_x = da.stack([a_x, b_x], axis=1, allow_unknown_chunksizes=True)\n\n assert_eq(c_x, np.stack([a_df.values, b_df.values], axis=1))\n\n m_df = pd.DataFrame({\"m\": np.arange(12) * 100})\n n_df = pd.DataFrame({\"n\": np.arange(12) * 1000})\n\n m_ddf = dd.from_pandas(m_df, sort=False, npartitions=3)\n n_ddf = dd.from_pandas(n_df, sort=False, npartitions=3)\n\n m_x = m_ddf.values\n n_x = n_ddf.values\n\n assert np.isnan(m_x.shape[0])\n assert np.isnan(n_x.shape[0])\n\n with pytest.raises(ValueError) as exc_info:\n da.stack([[a_x, b_x], [m_x, n_x]])\n\n assert \"shape\" in str(exc_info.value)\n assert \"nan\" in str(exc_info.value)\n\n c_x = da.stack([[a_x, b_x], [m_x, n_x]], allow_unknown_chunksizes=True)\n\n assert_eq(c_x, np.stack([[a_df.values, b_df.values], [m_df.values, n_df.values]]))\n\n\ndef test_concatenate():\n a, b, c = (\n Array(\n graph_from_arraylike(object(), chunks=(2, 3), shape=(4, 6), name=name),\n name,\n chunks=(2, 3),\n dtype=\"f8\",\n shape=(4, 6),\n )\n for name in \"ABC\"\n )\n\n x = concatenate([a, b, c], axis=0)\n\n assert x.shape == (12, 6)\n assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))\n assert x.dask[(x.name, 0, 1)] == (\"A\", 0, 1)\n assert x.dask[(x.name, 5, 0)] == (\"C\", 1, 0)\n assert same_keys(x, concatenate([a, b, c], axis=0))\n\n y = concatenate([a, b, c], axis=1)\n\n assert y.shape == (4, 18)\n assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))\n assert y.dask[(y.name, 1, 0)] == (\"A\", 1, 0)\n assert y.dask[(y.name, 1, 5)] == (\"C\", 1, 1)\n assert same_keys(y, concatenate([a, b, c], axis=1))\n\n assert set(b.dask.keys()).issubset(y.dask.keys())\n\n z = concatenate([a], axis=0)\n\n assert z.shape == a.shape\n assert z.chunks == a.chunks\n assert z.dask == a.dask\n assert z is a\n\n assert (\n concatenate([a, b, c], axis=-1).chunks == concatenate([a, b, c], axis=1).chunks\n )\n\n pytest.raises(ValueError, lambda: concatenate([]))\n pytest.raises(ValueError, lambda: concatenate([a, b, c], axis=2))\n\n\n@pytest.mark.parametrize(\n \"dtypes\", [((\">f8\", \">f8\"), \"float64\"), ((\"()\", a, b, 0, output_dtypes=a.dtype)\n\n\ndef test__validate_normalize_axes_01():\n with pytest.raises(ValueError):\n _validate_normalize_axes([(1, 0)], None, False, [(\"i\", \"j\")], (\"j\",))\n\n with pytest.raises(ValueError):\n _validate_normalize_axes([0, 0], None, False, [(\"i\", \"j\")], (\"j\",))\n\n with pytest.raises(ValueError):\n _validate_normalize_axes([(0,), 0], None, False, [(\"i\", \"j\")], (\"j\",))\n\n i, o = _validate_normalize_axes([(1, 0), 0], None, False, [(\"i\", \"j\")], (\"j\",))\n assert i == [(1, 0)]\n assert o == [(0,)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_many_outputs_test_apply_gufunc_output_dtypes_string_many_outputs.assert_min_compute_shap": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_output_dtypes_string_many_outputs_test_apply_gufunc_output_dtypes_string_many_outputs.assert_min_compute_shap", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 156, "end_line": 167, "span_ids": ["test_apply_gufunc_output_dtypes_string_many_outputs"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"vectorize\", [False, True])\ndef test_apply_gufunc_output_dtypes_string_many_outputs(vectorize):\n def stats(x):\n return np.mean(x, axis=-1), np.std(x, axis=-1), np.min(x, axis=-1)\n\n a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))\n mean, std, min = apply_gufunc(\n stats, \"(i)->(),(),()\", a, output_dtypes=(\"f\", \"f\", \"f\"), vectorize=vectorize\n )\n assert mean.compute().shape == (10, 20)\n assert std.compute().shape == (10, 20)\n assert min.compute().shape == (10, 20)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_mixed_inputs_test_gufunc_mixed_inputs_vectorize.assert_eq_x_np_full_8_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_mixed_inputs_test_gufunc_mixed_inputs_vectorize.assert_eq_x_np_full_8_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 312, "end_line": 330, "span_ids": ["test_gufunc_mixed_inputs_vectorize", "test_gufunc_mixed_inputs"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gufunc_mixed_inputs():\n def foo(x, y):\n return x + y\n\n a = np.ones((2, 1), dtype=int)\n b = da.ones((1, 8), chunks=(2, 3), dtype=int)\n x = apply_gufunc(foo, \"(),()->()\", a, b, output_dtypes=int)\n assert_eq(x, 2 * np.ones((2, 8), dtype=int))\n\n\ndef test_gufunc_mixed_inputs_vectorize():\n def foo(x, y):\n return (x + y).sum(axis=1)\n\n a = da.ones((8, 3, 5), chunks=(2, 3, 5), dtype=int)\n b = np.ones(5, dtype=int)\n x = apply_gufunc(foo, \"(m,n),(n)->(m)\", a, b, vectorize=True)\n\n assert_eq(x, np.full((8, 3), 10, dtype=int))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_test_as_gufunc.assert_valy_shape_10_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_test_as_gufunc.assert_valy_shape_10_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 333, "end_line": 364, "span_ids": ["test_gufunc", "test_as_gufunc"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gufunc():\n x = da.random.normal(size=(10, 5), chunks=(2, 5))\n\n def foo(x):\n return np.mean(x, axis=-1)\n\n gufoo = gufunc(\n foo,\n signature=\"(i)->()\",\n axis=-1,\n keepdims=False,\n output_dtypes=float,\n vectorize=True,\n )\n\n y = gufoo(x)\n valy = y.compute()\n assert isinstance(y, Array)\n assert valy.shape == (10,)\n\n\ndef test_as_gufunc():\n x = da.random.normal(size=(10, 5), chunks=(2, 5))\n\n @as_gufunc(\"(i)->()\", axis=-1, keepdims=False, output_dtypes=float, vectorize=True)\n def foo(x):\n return np.mean(x, axis=-1)\n\n y = foo(x)\n valy = y.compute()\n assert isinstance(y, Array)\n assert valy.shape == (10,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_broadcasting_loopdims_test_apply_gufunc_broadcasting_loopdims.assert_z_compute_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_apply_gufunc_broadcasting_loopdims_test_apply_gufunc_broadcasting_loopdims.assert_z_compute_shape_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 367, "end_line": 383, "span_ids": ["test_apply_gufunc_broadcasting_loopdims"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_gufunc_broadcasting_loopdims():\n def foo(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 3\n x, y = np.broadcast_arrays(x, y)\n return x, y, x * y\n\n a = da.random.normal(size=(10, 30), chunks=(8, 30))\n b = da.random.normal(size=(20, 1, 30), chunks=(3, 1, 30))\n\n x, y, z = apply_gufunc(\n foo, \"(i),(i)->(i),(i),(i)\", a, b, output_dtypes=3 * (float,), vectorize=False\n )\n\n assert x.compute().shape == (20, 10, 30)\n assert y.compute().shape == (20, 10, 30)\n assert z.compute().shape == (20, 10, 30)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_pytest_test_overlap_internal.assert_same_keys_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_pytest_test_overlap_internal.assert_same_keys_overlap_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 51, "span_ids": ["imports", "test_overlap_internal"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\n\nimport dask.array as da\nfrom dask.array.overlap import (\n boundaries,\n constant,\n ensure_minimum_chunksize,\n nearest,\n overlap,\n overlap_internal,\n periodic,\n reflect,\n trim_internal,\n)\nfrom dask.array.utils import assert_eq, same_keys\n\nfrom ..lib.stride_tricks import sliding_window_view\n\n\ndef test_overlap_internal():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n\n g = overlap_internal(d, {0: 2, 1: 1})\n result = g.compute(scheduler=\"sync\")\n assert g.chunks == ((6, 6), (5, 5))\n\n expected = np.array(\n [\n [0, 1, 2, 3, 4, 3, 4, 5, 6, 7],\n [8, 9, 10, 11, 12, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 59, 60, 61, 62, 63],\n ]\n )\n\n assert_eq(result, expected)\n assert same_keys(overlap_internal(d, {0: 2, 1: 1}), g)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_contextlib_test_array.assert_isinstance_y_da_A": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_contextlib_test_array.assert_isinstance_y_da_A", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 26, "span_ids": ["imports", "test_array"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\nimport itertools\nimport sys\nimport warnings\nfrom numbers import Number\n\nimport pytest\nfrom numpy import AxisError\n\nfrom dask.delayed import delayed\n\nnp = pytest.importorskip(\"numpy\")\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq, same_keys\n\n\ndef test_array():\n x = np.ones(5, dtype=\"i4\")\n d = da.ones(5, chunks=3, dtype=\"i4\")\n assert_eq(da.array(d, ndmin=3, dtype=\"i8\"), np.array(x, ndmin=3, dtype=\"i8\"))\n\n # regression #1847 this shall not raise an exception.\n x = da.ones((100, 3), chunks=10)\n y = da.array(x)\n assert isinstance(y, da.Array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_searchsorted_test_searchsorted.assert_eq_out_np_searchs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_searchsorted_test_searchsorted.assert_eq_out_np_searchs", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 629, "end_line": 652, "span_ids": ["test_searchsorted"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"a, a_chunks, v, v_chunks\",\n [\n [[], 1, [], 1],\n [[0], 1, [0], 1],\n [[-10, 0, 10, 20, 30], 3, [11, 30], 2],\n [[-10, 0, 10, 20, 30], 3, [11, 30, -20, 1, -10, 10, 37, 11], 5],\n [[-10, 0, 10, 20, 30], 3, [[11, 30, -20, 1, -10, 10, 37, 11]], 5],\n [[-10, 0, 10, 20, 30], 3, [[7, 0], [-10, 10], [11, -1], [15, 15]], (2, 2)],\n ],\n)\n@pytest.mark.parametrize(\"side\", [\"left\", \"right\"])\ndef test_searchsorted(a, a_chunks, v, v_chunks, side):\n a = np.array(a)\n v = np.array(v)\n\n ad = da.asarray(a, chunks=a_chunks)\n vd = da.asarray(v, chunks=v_chunks)\n\n out = da.searchsorted(ad, vd, side)\n\n assert out.shape == vd.shape\n assert out.chunks == vd.chunks\n assert_eq(out, np.searchsorted(a, v, side))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_searchsorted_sorter_not_implemented_test_histogram.assert_same_keys_da_histo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_searchsorted_sorter_not_implemented_test_histogram.assert_same_keys_da_histo", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 655, "end_line": 672, "span_ids": ["test_histogram", "test_searchsorted_sorter_not_implemented"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_searchsorted_sorter_not_implemented():\n with pytest.raises(NotImplementedError):\n da.searchsorted(da.asarray([1, 0]), da.asarray([1]), sorter=da.asarray([1, 0]))\n\n\ndef test_histogram():\n # Test for normal, flattened input\n n = 100\n v = da.random.random(n, chunks=10)\n bins = np.arange(0, 1.01, 0.01)\n (a1, b1) = da.histogram(v, bins=bins)\n (a2, b2) = np.histogram(v, bins=bins)\n\n # Check if the sum of the bins equals the number of samples\n assert a2.sum(axis=0) == n\n assert a1.sum(axis=0) == n\n assert_eq(a1, a2)\n assert same_keys(da.histogram(v, bins=bins)[0], a1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_seq_of_arrays_test_histogramdd_seq_of_arrays.assert_eq_a1_a3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_seq_of_arrays_test_histogramdd_seq_of_arrays.assert_eq_a1_a3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 856, "end_line": 866, "span_ids": ["test_histogramdd_seq_of_arrays"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_seq_of_arrays():\n n1 = 800\n x = da.random.uniform(size=(n1,), chunks=200)\n y = da.random.uniform(size=(n1,), chunks=200)\n bx = [0.0, 0.25, 0.75, 1.0]\n by = [0.0, 0.30, 0.70, 0.8, 1.0]\n (a1, b1) = da.histogramdd([x, y], bins=[bx, by])\n (a2, b2) = np.histogramdd([x, y], bins=[bx, by])\n (a3, b3) = np.histogramdd((x.compute(), y.compute()), bins=[bx, by])\n assert_eq(a1, a2)\n assert_eq(a1, a3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_weighted_test_histogramdd_weighted.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_weighted_test_histogramdd_weighted.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 890, "end_line": 907, "span_ids": ["test_histogramdd_weighted"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_weighted():\n # test for normal input\n n1, n2 = 600, 3\n x = da.random.uniform(0, 1, size=(n1, n2), chunks=((200, 200, 200), (3,)))\n w = da.random.uniform(0.5, 0.8, size=(n1,), chunks=200)\n bins = (3, 5, 4)\n ranges = ((0, 1),) * len(bins)\n (a1, b1) = da.histogramdd(x, bins=bins, range=ranges, weights=w)\n (a2, b2) = np.histogramdd(x, bins=bins, range=ranges, weights=w)\n (a3, b3) = np.histogramdd(x.compute(), bins=bins, range=ranges, weights=w.compute())\n assert_eq(a1, a2)\n assert_eq(a1, a3)\n bins = 4\n (a1, b1) = da.histogramdd(x, bins=bins, range=ranges, weights=w)\n (a2, b2) = np.histogramdd(x, bins=bins, range=ranges, weights=w)\n (a3, b3) = np.histogramdd(x.compute(), bins=bins, range=ranges, weights=w.compute())\n assert_eq(a1, a2)\n assert_eq(a1, a3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_weighted_density_test_histogramdd_weighted_density.assert_eq_a1_a3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_weighted_density_test_histogramdd_weighted_density.assert_eq_a1_a3_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 924, "end_line": 934, "span_ids": ["test_histogramdd_weighted_density"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_weighted_density():\n n1, n2 = 1200, 4\n x = da.random.standard_normal(size=(n1, n2), chunks=(200, 4))\n w = da.random.uniform(0.5, 1.2, size=(n1,), chunks=200)\n bins = (5, 6, 7, 8)\n ranges = ((-4, 4),) * len(bins)\n (a1, b1) = da.histogramdd(x, bins=bins, range=ranges, weights=w, density=True)\n (a2, b2) = np.histogramdd(x, bins=bins, range=ranges, weights=w, density=True)\n (a3, b3) = da.histogramdd(x, bins=bins, range=ranges, weights=w, normed=True)\n assert_eq(a1, a2)\n assert_eq(a1, a3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raises_incompat_sample_chunks_test_histogramdd_raises_incompat_multiarg_chunks.with_pytest_raises_.da_histogramdd_x_y_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raises_incompat_sample_chunks_test_histogramdd_raises_incompat_multiarg_chunks.with_pytest_raises_.da_histogramdd_x_y_z_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 937, "end_line": 952, "span_ids": ["test_histogramdd_raises_incompat_sample_chunks", "test_histogramdd_raises_incompat_multiarg_chunks"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_raises_incompat_sample_chunks():\n data = da.random.random(size=(10, 3), chunks=(5, 1))\n with pytest.raises(\n ValueError, match=\"Input array can only be chunked along the 0th axis\"\n ):\n da.histogramdd(data, bins=10, range=((0, 1),) * 3)\n\n\ndef test_histogramdd_raises_incompat_multiarg_chunks():\n x = da.random.random(size=(10,), chunks=2)\n y = da.random.random(size=(10,), chunks=2)\n z = da.random.random(size=(10,), chunks=5)\n with pytest.raises(\n ValueError, match=\"All coordinate arrays must be chunked identically.\"\n ):\n da.histogramdd((x, y, z), bins=(3,) * 3, range=((0, 1),) * 3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raises_incompat_weight_chunks_test_histogramdd_raises_incompat_weight_chunks.None_1.da_histogramdd_z_bins_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raises_incompat_weight_chunks_test_histogramdd_raises_incompat_weight_chunks.None_1.da_histogramdd_z_bins_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 955, "end_line": 969, "span_ids": ["test_histogramdd_raises_incompat_weight_chunks"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_raises_incompat_weight_chunks():\n x = da.random.random(size=(10,), chunks=2)\n y = da.random.random(size=(10,), chunks=2)\n z = da.atleast_2d((x, y)).T.rechunk((2, 2))\n w = da.random.random(size=(10,), chunks=5)\n with pytest.raises(\n ValueError,\n match=\"Input arrays and weights must have the same shape and chunk structure.\",\n ):\n da.histogramdd((x, y), bins=(3,) * 2, range=((0, 1),) * 2, weights=w)\n with pytest.raises(\n ValueError,\n match=\"Input array and weights must have the same shape and chunk structure along the first dimension.\",\n ):\n da.histogramdd(z, bins=(3,) * 2, range=((0, 1),) * 2, weights=w)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raise_normed_and_density_test_histogramdd_raise_incompat_shape.None_1.da_histogramdd_data_bins": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_raise_normed_and_density_test_histogramdd_raise_incompat_shape.None_1.da_histogramdd_data_bins", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1001, "end_line": 1021, "span_ids": ["test_histogramdd_raise_incompat_shape", "test_histogramdd_raise_normed_and_density"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_raise_normed_and_density():\n data = da.random.random(size=(10, 3), chunks=(5, 3))\n bins = (4, 5, 6)\n ranges = ((0, 1),) * 3\n with pytest.raises(TypeError, match=\"Cannot specify both 'normed' and 'density'\"):\n da.histogramdd(data, bins=bins, range=ranges, normed=True, density=True)\n\n\ndef test_histogramdd_raise_incompat_shape():\n # 1D\n data = da.random.random(size=(10,), chunks=(2,))\n with pytest.raises(\n ValueError, match=\"Single array input to histogramdd should be columnar\"\n ):\n da.histogramdd(data, bins=4, range=((-3, 3),))\n # 3D (not columnar)\n data = da.random.random(size=(4, 4, 4), chunks=(2, 2, 2))\n with pytest.raises(\n ValueError, match=\"Single array input to histogramdd should be columnar\"\n ):\n da.histogramdd(data, bins=4, range=((-3, 3),))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_edges_test_histogramdd_edges.None_1.assert_eq_ib1_ib2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_edges_test_histogramdd_edges.None_1.assert_eq_ib1_ib2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1024, "end_line": 1040, "span_ids": ["test_histogramdd_edges"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd_edges():\n data = da.random.random(size=(10, 3), chunks=(5, 3))\n edges = [\n np.array([0.1, 0.3, 0.8, 1.0]),\n np.array([0.2, 0.3, 0.8, 0.9]),\n np.array([0.1, 0.5, 0.7]),\n ]\n # passing bins as an array of bin edges.\n a1, b1 = da.histogramdd(data, bins=edges)\n a2, b2 = np.histogramdd(data.compute(), bins=edges)\n for ib1, ib2 in zip(b1, b2):\n assert_eq(ib1, ib2)\n # passing bins as an int with range definitions\n a1, b1 = da.histogramdd(data, bins=5, range=((0, 1),) * 3)\n a2, b2 = np.histogramdd(data.compute(), bins=5, range=((0, 1),) * 3)\n for ib1, ib2 in zip(b1, b2):\n assert_eq(ib1, ib2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_select_test_select.assert_eq_np_select_condi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_select_test_select.assert_eq_np_select_condi", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1536, "end_line": 1549, "span_ids": ["test_select"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_select():\n conditions = [\n np.array([False, False, False, False]),\n np.array([False, True, False, True]),\n np.array([False, False, True, True]),\n ]\n choices = [\n np.array([1, 2, 3, 4]),\n np.array([5, 6, 7, 8]),\n np.array([9, 10, 11, 12]),\n ]\n d_conditions = da.from_array(conditions, chunks=(3, 2))\n d_choices = da.from_array(choices)\n assert_eq(np.select(conditions, choices), da.select(d_conditions, d_choices))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_select_multidimension_test_select_multidimension.assert_eq_res_y_res_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_select_multidimension_test_select_multidimension.assert_eq_res_y_res_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1552, "end_line": 1558, "span_ids": ["test_select_multidimension"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_select_multidimension():\n x = np.random.random((100, 50, 2))\n y = da.from_array(x, chunks=(50, 50, 1))\n res_x = np.select([x < 0, x > 2, x > 1], [x, x * 2, x * 3], default=1)\n res_y = da.select([y < 0, y > 2, y > 1], [y, y * 2, y * 3], default=1)\n assert isinstance(res_y, da.Array)\n assert_eq(res_y, res_x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_select_return_dtype_test_select_broadcasting.assert_eq_np_select_True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_select_return_dtype_test_select_broadcasting.assert_eq_np_select_True", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1561, "end_line": 1577, "span_ids": ["test_select_return_dtype", "test_select_broadcasting"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_select_return_dtype():\n d = np.array([1, 2, 3, np.nan, 5, 7])\n m = np.isnan(d)\n d_d = da.from_array(d)\n d_m = da.isnan(d_d)\n assert_eq(np.select([m], [d]), da.select([d_m], [d_d]), equal_nan=True)\n\n\n@pytest.mark.xfail(reason=\"broadcasting in da.select() not implemented yet\")\ndef test_select_broadcasting():\n conditions = [np.array(True), np.array([False, True, False])]\n choices = [1, np.arange(12).reshape(4, 3)]\n d_conditions = da.from_array(conditions)\n d_choices = da.from_array(choices)\n assert_eq(np.select(conditions, choices), da.select(d_conditions, d_choices))\n # default can broadcast too:\n assert_eq(np.select([True], [0], default=[0]), da.select([True], [0], default=[0]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_unknown_shape_test_ravel_multi_index_unknown_shape.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_unknown_shape_test_ravel_multi_index_unknown_shape.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1898, "end_line": 1908, "span_ids": ["test_ravel_multi_index_unknown_shape"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ravel_multi_index_unknown_shape():\n multi_index = da.from_array([[3, 6, 6], [4, 5, 1], [-1, -1, -1]])\n multi_index = multi_index[(multi_index > 0).all(axis=1)]\n\n multi_index_np = multi_index.compute()\n\n assert np.isnan(multi_index.shape).any()\n assert_eq(\n np.ravel_multi_index(multi_index_np, dims=(7, 6)),\n da.ravel_multi_index(multi_index, dims=(7, 6)),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_unknown_shape_fails_test_ravel_multi_index_unknown_shape_fails.with_pytest_raises_ValueE.da_ravel_multi_index_mult": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_unknown_shape_fails_test_ravel_multi_index_unknown_shape_fails.with_pytest_raises_ValueE.da_ravel_multi_index_mult", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1911, "end_line": 1925, "span_ids": ["test_ravel_multi_index_unknown_shape_fails"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ravel_multi_index_unknown_shape_fails():\n multi_index1 = da.from_array([2, -1, 3, -1], chunks=2)\n multi_index1 = multi_index1[multi_index1 > 0]\n\n multi_index2 = da.from_array(\n [[1, 2], [-1, -1], [3, 4], [5, 6], [7, 8], [-1, -1]], chunks=(2, 1)\n )\n multi_index2 = multi_index2[(multi_index2 > 0).all(axis=1)]\n\n multi_index = [1, multi_index1, multi_index2]\n\n assert np.isnan(multi_index1.shape).any()\n assert np.isnan(multi_index2.shape).any()\n with pytest.raises(ValueError, match=\"Arrays' chunk sizes\"):\n da.ravel_multi_index(multi_index, dims=(8, 9, 10))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_delayed_dims_test_ravel_multi_index_non_int_dtype.with_pytest_raises_TypeEr.da_ravel_multi_index_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_ravel_multi_index_delayed_dims_test_ravel_multi_index_non_int_dtype.with_pytest_raises_TypeEr.da_ravel_multi_index_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 2054, "end_line": 2066, "span_ids": ["test_ravel_multi_index_delayed_dims", "test_ravel_multi_index_non_int_dtype"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dims\", [da.from_array([5, 10]), delayed([5, 10], nout=2)])\n@pytest.mark.parametrize(\"wrap_in_list\", [False, True])\ndef test_ravel_multi_index_delayed_dims(dims, wrap_in_list):\n with pytest.raises(NotImplementedError, match=\"Dask types are not supported\"):\n da.ravel_multi_index((2, 1), [dims[0], dims[1]] if wrap_in_list else dims)\n\n\ndef test_ravel_multi_index_non_int_dtype():\n with pytest.raises(TypeError, match=\"only int indices permitted\"):\n da.ravel_multi_index(\n (1.0, 2),\n (5, 10),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_divmod_test_divmod.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_divmod_test_divmod.None_7", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 451, "end_line": 476, "span_ids": ["test_divmod"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_divmod():\n arr1 = np.random.randint(1, 100, size=(20, 20))\n arr2 = np.random.randint(1, 100, size=(20, 20))\n\n darr1 = da.from_array(arr1, 3)\n darr2 = da.from_array(arr2, 3)\n\n result = np.divmod(darr1, 2.0)\n expected = np.divmod(arr1, 2.0)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])\n\n result = np.divmod(darr1, darr2)\n expected = np.divmod(arr1, arr2)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])\n\n result = divmod(darr1, 2.0)\n expected = divmod(arr1, 2.0)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])\n\n result = divmod(darr1, darr2)\n expected = divmod(arr1, arr2)\n assert_eq(result[0], expected[0])\n assert_eq(result[1], expected[1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__dask_distributed_pack___Blockwise.__dask_distributed_pack__._All_blockwise_tasks_wil": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__dask_distributed_pack___Blockwise.__dask_distributed_pack__._All_blockwise_tasks_wil", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 487, "end_line": 570, "span_ids": ["Blockwise.__dask_distributed_pack__"], "tokens": 836}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n def __dask_distributed_pack__(\n self, all_hlg_keys, known_key_dependencies, client, client_keys\n ):\n from distributed.protocol import to_serialize\n from distributed.utils import CancelledError\n from distributed.utils_comm import unpack_remotedata\n from distributed.worker import dumps_function\n\n keys = tuple(map(blockwise_token, range(len(self.indices))))\n dsk, _ = fuse(self.dsk, [self.output])\n\n # Embed literals in `dsk`\n keys2 = []\n indices2 = []\n global_dependencies = set()\n for key, (val, index) in zip(keys, self.indices):\n if index is None:\n try:\n val_is_a_key = val in all_hlg_keys\n except TypeError: # not hashable\n val_is_a_key = False\n if val_is_a_key:\n keys2.append(key)\n indices2.append((val, index))\n global_dependencies.add(stringify(val))\n else:\n dsk[key] = val # Literal\n else:\n keys2.append(key)\n indices2.append((val, index))\n\n dsk = (SubgraphCallable(dsk, self.output, tuple(keys2)),)\n dsk, dsk_unpacked_futures = unpack_remotedata(dsk, byte_keys=True)\n\n # Handle `io_deps` serialization. Assume each element\n # is a `BlockwiseDep`-based object.\n packed_io_deps = {}\n inline_tasks = False\n for name, blockwise_dep in self.io_deps.items():\n packed_io_deps[name] = {\n \"__module__\": blockwise_dep.__module__,\n \"__name__\": type(blockwise_dep).__name__,\n # TODO: Pass a `required_indices` list to __pack__\n \"state\": blockwise_dep.__dask_distributed_pack__(),\n }\n inline_tasks = inline_tasks or blockwise_dep.produces_tasks\n\n # Dump (pickle + cache) the function here if we know `make_blockwise_graph`\n # will NOT be producing \"nested\" tasks (via `__dask_distributed_unpack__`).\n #\n # If `make_blockwise_graph` DOES need to produce nested tasks later on, it\n # will need to call `to_serialize` on the entire task. That will be a\n # problem if the function was already pickled here. Therefore, we want to\n # call `to_serialize` on the function if we know there will be nested tasks.\n #\n # We know there will be nested tasks if either:\n # (1) `concatenate=True` # Check `self.concatenate`\n # (2) `inline_tasks=True` # Check `BlockwiseDep.produces_tasks`\n #\n # We do not call `to_serialize` in ALL cases, because that code path does\n # not cache the function on the scheduler or worker (or warn if there are\n # large objects being passed into the graph). However, in the future,\n # single-pass serialization improvements should allow us to remove this\n # special logic altogether.\n func = (\n to_serialize(dsk[0])\n if (self.concatenate or inline_tasks)\n else dumps_function(dsk[0])\n )\n func_future_args = dsk[1:]\n\n indices = list(toolz.concat(indices2))\n indices, indices_unpacked_futures = unpack_remotedata(indices, byte_keys=True)\n\n # Check the legality of the unpacked futures\n for future in itertools.chain(dsk_unpacked_futures, indices_unpacked_futures):\n if future.client is not client:\n raise ValueError(\n \"Inputs contain futures that were created by another client.\"\n )\n if stringify(future.key) not in client.futures:\n raise CancelledError(stringify(future.key))\n\n # All blockwise tasks will depend on the futures in `indices`\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__dask_distributed_pack__.global_dependencies_s_Blockwise.__dask_distributed_pack__.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__dask_distributed_pack__.global_dependencies_s_Blockwise.__dask_distributed_pack__.return._", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 571, "end_line": 587, "span_ids": ["Blockwise.__dask_distributed_pack__"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n def __dask_distributed_pack__(\n self, all_hlg_keys, known_key_dependencies, client, client_keys\n ):\n # ... other code\n global_dependencies |= {stringify(f.key) for f in indices_unpacked_futures}\n\n return {\n \"output\": self.output,\n \"output_indices\": self.output_indices,\n \"func\": func,\n \"func_future_args\": func_future_args,\n \"global_dependencies\": global_dependencies,\n \"indices\": indices,\n \"is_list\": [isinstance(x, list) for x in indices],\n \"numblocks\": self.numblocks,\n \"concatenate\": self.concatenate,\n \"new_axes\": self.new_axes,\n \"output_blocks\": self.output_blocks,\n \"dims\": self.dims,\n \"io_deps\": packed_io_deps,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py__nonempty_series__nonempty_series.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py__nonempty_series__nonempty_series.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 247, "end_line": 289, "span_ids": ["_nonempty_series"], "tokens": 435}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@meta_nonempty.register(pd.Series)\ndef _nonempty_series(s, idx=None):\n # TODO: Use register dtypes with make_array_nonempty\n if idx is None:\n idx = _nonempty_index(s.index)\n dtype = s.dtype\n if len(s) > 0:\n # use value from meta if provided\n data = [s.iloc[0]] * 2\n elif is_datetime64tz_dtype(dtype):\n entry = pd.Timestamp(\"1970-01-01\", tz=dtype.tz)\n data = [entry, entry]\n elif is_categorical_dtype(dtype):\n if len(s.cat.categories):\n data = [s.cat.categories[0]] * 2\n cats = s.cat.categories\n else:\n data = _nonempty_index(s.cat.categories)\n cats = s.cat.categories[:0]\n data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered)\n elif is_integer_na_dtype(dtype):\n data = pd.array([1, None], dtype=dtype)\n elif is_float_na_dtype(dtype):\n data = pd.array([1.0, None], dtype=dtype)\n elif is_period_dtype(dtype):\n # pandas 0.24.0+ should infer this to be Series[Period[freq]]\n freq = dtype.freq\n data = [pd.Period(\"2000\", freq), pd.Period(\"2001\", freq)]\n elif is_sparse(dtype):\n entry = _scalar_from_dtype(dtype.subtype)\n data = pd.array([entry, entry], dtype=dtype)\n elif is_interval_dtype(dtype):\n entry = _scalar_from_dtype(dtype.subtype)\n data = pd.array([entry, entry], dtype=dtype)\n elif type(dtype) in make_array_nonempty._lookup:\n data = make_array_nonempty(dtype)\n else:\n entry = _scalar_from_dtype(dtype)\n data = np.array([entry, entry], dtype=dtype)\n\n out = pd.Series(data, name=s.name, index=idx)\n out.attrs = s.attrs\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_union_categoricals_pandas_hash_object_pandas.return.pd_util_hash_pandas_objec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_union_categoricals_pandas_hash_object_pandas.return.pd_util_hash_pandas_objec", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 303, "end_line": 343, "span_ids": ["union_categoricals_pandas", "get_parallel_type_series", "get_parallel_type_dataframe", "get_parallel_type_object", "get_parallel_type_index", "get_parallel_type_frame", "hash_object_pandas"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@union_categoricals_dispatch.register(\n (pd.DataFrame, pd.Series, pd.Index, pd.Categorical)\n)\ndef union_categoricals_pandas(to_union, sort_categories=False, ignore_order=False):\n return pd.api.types.union_categoricals(\n to_union, sort_categories=sort_categories, ignore_order=ignore_order\n )\n\n\n@get_parallel_type.register(pd.Series)\ndef get_parallel_type_series(_):\n return Series\n\n\n@get_parallel_type.register(pd.DataFrame)\ndef get_parallel_type_dataframe(_):\n return DataFrame\n\n\n@get_parallel_type.register(pd.Index)\ndef get_parallel_type_index(_):\n return Index\n\n\n@get_parallel_type.register(_Frame)\ndef get_parallel_type_frame(o):\n return get_parallel_type(o._meta)\n\n\n@get_parallel_type.register(object)\ndef get_parallel_type_object(_):\n return Scalar\n\n\n@hash_object_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef hash_object_pandas(\n obj, index=True, encoding=\"utf8\", hash_key=None, categorize=True\n):\n return pd.util.hash_pandas_object(\n obj, index=index, encoding=encoding, hash_key=hash_key, categorize=categorize\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_ShuffleGroupResult_ShuffleGroupResult.__sizeof__.return.total_size": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_ShuffleGroupResult_ShuffleGroupResult.__sizeof__.return.total_size", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 346, "end_line": 360, "span_ids": ["ShuffleGroupResult.__sizeof__", "ShuffleGroupResult"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ShuffleGroupResult(SimpleSizeof, dict):\n def __sizeof__(self) -> int:\n \"\"\"\n The result of the shuffle split are typically small dictionaries\n (#keys << 100; typically <= 32) The splits are often non-uniformly\n distributed. Some of the splits may even be empty. Sampling the\n dictionary for size estimation can cause severe errors.\n\n See also https://github.com/dask/distributed/issues/4962\n \"\"\"\n total_size = super().__sizeof__()\n for k, df in self.items():\n total_size += sizeof(k)\n total_size += sizeof(df)\n return total_size", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_group_split_pandas_group_split_pandas.return.ShuffleGroupResult_zip_ra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_group_split_pandas_group_split_pandas.return.ShuffleGroupResult_zip_ra", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 363, "end_line": 374, "span_ids": ["group_split_pandas"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@group_split_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef group_split_pandas(df, c, k, ignore_index=False):\n indexer, locations = pd._libs.algos.groupsort_indexer(\n c.astype(np.int64, copy=False), k\n )\n df2 = df.take(indexer)\n locations = locations.cumsum()\n parts = [\n df2.iloc[a:b].reset_index(drop=True) if ignore_index else df2.iloc[a:b]\n for a, b in zip(locations[:-1], locations[1:])\n ]\n return ShuffleGroupResult(zip(range(k), parts))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_concat_pandas_concat_pandas._Concatenate_the_partiti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_concat_pandas_concat_pandas._Concatenate_the_partiti", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 366, "end_line": 425, "span_ids": ["concat_pandas"], "tokens": 494}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@concat_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef concat_pandas(\n dfs,\n axis=0,\n join=\"outer\",\n uniform=False,\n filter_warning=True,\n ignore_index=False,\n **kwargs,\n):\n ignore_order = kwargs.pop(\"ignore_order\", False)\n\n if axis == 1:\n return pd.concat(dfs, axis=axis, join=join, **kwargs)\n\n # Support concatenating indices along axis 0\n if isinstance(dfs[0], pd.Index):\n if isinstance(dfs[0], pd.CategoricalIndex):\n for i in range(1, len(dfs)):\n if not isinstance(dfs[i], pd.CategoricalIndex):\n dfs[i] = dfs[i].astype(\"category\")\n return pd.CategoricalIndex(\n union_categoricals(dfs, ignore_order=ignore_order), name=dfs[0].name\n )\n elif isinstance(dfs[0], pd.MultiIndex):\n first, rest = dfs[0], dfs[1:]\n if all(\n (isinstance(o, pd.MultiIndex) and o.nlevels >= first.nlevels)\n for o in rest\n ):\n arrays = [\n concat([i._get_level_values(n) for i in dfs])\n for n in range(first.nlevels)\n ]\n return pd.MultiIndex.from_arrays(arrays, names=first.names)\n\n to_concat = (first.values,) + tuple(k._values for k in rest)\n new_tuples = np.concatenate(to_concat)\n try:\n return pd.MultiIndex.from_tuples(new_tuples, names=first.names)\n except Exception:\n return pd.Index(new_tuples)\n return dfs[0].append(dfs[1:])\n\n # Handle categorical index separately\n dfs0_index = dfs[0].index\n\n has_categoricalindex = isinstance(dfs0_index, pd.CategoricalIndex) or (\n isinstance(dfs0_index, pd.MultiIndex)\n and any(isinstance(i, pd.CategoricalIndex) for i in dfs0_index.levels)\n )\n\n if has_categoricalindex:\n dfs2 = [df.reset_index(drop=True) for df in dfs]\n ind = concat([df.index for df in dfs])\n else:\n dfs2 = dfs\n ind = None\n\n # Concatenate the partitions together, handling categories as needed\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_concat_pandas.if__concat_pandas.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_concat_pandas.if__concat_pandas.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 426, "end_line": 511, "span_ids": ["concat_pandas"], "tokens": 757}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@concat_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef concat_pandas(\n dfs,\n axis=0,\n join=\"outer\",\n uniform=False,\n filter_warning=True,\n ignore_index=False,\n **kwargs,\n):\n # ... other code\n if (\n isinstance(dfs2[0], pd.DataFrame)\n if uniform\n else any(isinstance(df, pd.DataFrame) for df in dfs2)\n ):\n if uniform:\n dfs3 = dfs2\n cat_mask = dfs2[0].dtypes == \"category\"\n else:\n # When concatenating mixed dataframes and series on axis 1, Pandas\n # converts series to dataframes with a single column named 0, then\n # concatenates.\n dfs3 = [\n df\n if isinstance(df, pd.DataFrame)\n else df.to_frame().rename(columns={df.name: 0})\n for df in dfs2\n ]\n # pandas may raise a RuntimeWarning for comparing ints and strs\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n if filter_warning:\n warnings.simplefilter(\"ignore\", FutureWarning)\n cat_mask = pd.concat(\n [(df.dtypes == \"category\").to_frame().T for df in dfs3],\n join=join,\n **kwargs,\n ).any()\n\n if cat_mask.any():\n not_cat = cat_mask[~cat_mask].index\n # this should be aligned, so no need to filter warning\n out = pd.concat(\n [df[df.columns.intersection(not_cat)] for df in dfs3],\n join=join,\n **kwargs,\n )\n temp_ind = out.index\n for col in cat_mask.index.difference(not_cat):\n # Find an example of categoricals in this column\n for df in dfs3:\n sample = df.get(col)\n if sample is not None:\n break\n # Extract partitions, subbing in missing if needed\n parts = []\n for df in dfs3:\n if col in df.columns:\n parts.append(df[col])\n else:\n codes = np.full(len(df), -1, dtype=\"i8\")\n data = pd.Categorical.from_codes(\n codes, sample.cat.categories, sample.cat.ordered\n )\n parts.append(data)\n out[col] = union_categoricals(parts, ignore_order=ignore_order)\n # Pandas resets index type on assignment if frame is empty\n # https://github.com/pandas-dev/pandas/issues/17101\n if not len(temp_ind):\n out.index = temp_ind\n out = out.reindex(columns=cat_mask.index)\n else:\n # pandas may raise a RuntimeWarning for comparing ints and strs\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n if filter_warning:\n warnings.simplefilter(\"ignore\", FutureWarning)\n out = pd.concat(dfs3, join=join, sort=False)\n else:\n if is_categorical_dtype(dfs2[0].dtype):\n if ind is None:\n ind = concat([df.index for df in dfs2])\n return pd.Series(\n union_categoricals(dfs2, ignore_order=ignore_order),\n index=ind,\n name=dfs2[0].name,\n )\n with warnings.catch_warnings():\n if filter_warning:\n warnings.simplefilter(\"ignore\", FutureWarning)\n\n out = pd.concat(dfs2, join=join, **kwargs)\n # Re-add the index if needed\n if ind is not None:\n out.index = ind\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.add_prefix__Frame.any.return.self__reduction_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.add_prefix__Frame.any.return.self__reduction_agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1682, "end_line": 1712, "span_ids": ["_Frame.all", "_Frame.abs", "_Frame.add_prefix", "_Frame.add_suffix", "_Frame.any"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.DataFrame)\n def add_prefix(self, prefix):\n res = self.map_partitions(M.add_prefix, prefix)\n if self.known_divisions and is_series_like(self):\n res.divisions = tuple(prefix + str(division) for division in self.divisions)\n return res\n\n @derived_from(pd.DataFrame)\n def add_suffix(self, suffix):\n res = self.map_partitions(M.add_suffix, suffix)\n if self.known_divisions and is_series_like(self):\n res.divisions = tuple(str(division) + suffix for division in self.divisions)\n return res\n\n @derived_from(pd.DataFrame)\n def abs(self):\n _raise_if_object_series(self, \"abs\")\n meta = self._meta_nonempty.abs()\n return self.map_partitions(M.abs, meta=meta, enforce_metadata=False)\n\n @derived_from(pd.DataFrame)\n def all(self, axis=None, skipna=True, split_every=False, out=None):\n return self._reduction_agg(\n \"all\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )\n\n @derived_from(pd.DataFrame)\n def any(self, axis=None, skipna=True, split_every=False, out=None):\n return self._reduction_agg(\n \"any\", axis=axis, skipna=skipna, split_every=split_every, out=out\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/dispatch.py___union_categoricals_dispatch.Dispatch_union_categoric": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/dispatch.py___union_categoricals_dispatch.Dispatch_union_categoric", "embedding": null, "metadata": {"file_path": "dask/dataframe/dispatch.py", "file_name": "dispatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 24, "span_ids": ["docstring"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nDispatch in dask.dataframe.\n\nAlso see extension.py\n\"\"\"\n\nimport pandas as pd\n\nimport dask.array as da\nimport dask.dataframe as dd\n\nfrom ..utils import Dispatch\n\nmake_meta_dispatch = Dispatch(\"make_meta_dispatch\")\nmake_meta_obj = Dispatch(\"make_meta_obj\")\nmeta_nonempty = Dispatch(\"meta_nonempty\")\nhash_object_dispatch = Dispatch(\"hash_object_dispatch\")\ngroup_split_dispatch = Dispatch(\"group_split_dispatch\")\nget_parallel_type = Dispatch(\"get_parallel_type\")\ncategorical_dtype_dispatch = Dispatch(\"CategoricalDtype\")\nconcat_dispatch = Dispatch(\"concat\")\ntolist_dispatch = Dispatch(\"tolist\")\nis_categorical_dtype_dispatch = Dispatch(\"is_categorical_dtype\")\nunion_categoricals_dispatch = Dispatch(\"union_categoricals\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/dispatch.py_concat_tolist.return.func_obj_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/dispatch.py_concat_tolist.return.func_obj_", "embedding": null, "metadata": {"file_path": "dask/dataframe/dispatch.py", "file_name": "dispatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 27, "end_line": 85, "span_ids": ["concat", "is_categorical_dtype", "categorical_dtype", "tolist"], "tokens": 365}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concat(\n dfs,\n axis=0,\n join=\"outer\",\n uniform=False,\n filter_warning=True,\n ignore_index=False,\n **kwargs,\n):\n \"\"\"Concatenate, handling some edge cases:\n\n - Unions categoricals between partitions\n - Ignores empty partitions\n\n Parameters\n ----------\n dfs : list of DataFrame, Series, or Index\n axis : int or str, optional\n join : str, optional\n uniform : bool, optional\n Whether to treat ``dfs[0]`` as representative of ``dfs[1:]``. Set to\n True if all arguments have the same columns and dtypes (but not\n necessarily categories). Default is False.\n ignore_index : bool, optional\n Whether to allow index values to be ignored/dropped during\n concatenation. Default is False.\n ignore_order : bool, optional\n Whether to ignore the order when doing the union of categoricals.\n Default is False.\n \"\"\"\n if len(dfs) == 1:\n return dfs[0]\n else:\n func = concat_dispatch.dispatch(type(dfs[0]))\n return func(\n dfs,\n axis=axis,\n join=join,\n uniform=uniform,\n filter_warning=filter_warning,\n ignore_index=ignore_index,\n **kwargs,\n )\n\n\ndef is_categorical_dtype(obj):\n obj = getattr(obj, \"dtype\", obj)\n func = is_categorical_dtype_dispatch.dispatch(type(obj))\n return func(obj)\n\n\ndef categorical_dtype(meta, categories=None, ordered=False):\n func = categorical_dtype_dispatch.dispatch(type(meta))\n return func(categories=categories, ordered=ordered)\n\n\ndef tolist(obj):\n func = tolist_dispatch.dispatch(type(obj))\n return func(obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/dispatch.py_make_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/dispatch.py_make_meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/dispatch.py", "file_name": "dispatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 88, "end_line": 142, "span_ids": ["union_categoricals", "make_meta"], "tokens": 394}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_meta(x, index=None, parent_meta=None):\n \"\"\"\n This method creates meta-data based on the type of ``x``,\n and ``parent_meta`` if supplied.\n\n Parameters\n ----------\n x : Object of any type.\n Object to construct meta-data from.\n index : Index, optional\n Any index to use in the metadata. This is a pass-through\n parameter to dispatches registered.\n parent_meta : Object, default None\n If ``x`` is of arbitrary types and thus Dask cannot determine\n which back-end to be used to generate the meta-data for this\n object type, in which case ``parent_meta`` will be used to\n determine which back-end to select and dispatch to. To use\n utilize this parameter ``make_meta_obj`` has be dispatched.\n If ``parent_meta`` is ``None``, a pandas DataFrame is used for\n ``parent_meta`` thats chooses pandas as the backend.\n\n Returns\n -------\n A valid meta-data\n \"\"\"\n\n if isinstance(\n x,\n (\n dd._Frame,\n dd.core.Scalar,\n dd.groupby._GroupBy,\n dd.accessor.Accessor,\n da.Array,\n ),\n ):\n return x._meta\n\n try:\n return make_meta_dispatch(x, index=index)\n except TypeError:\n if parent_meta is not None:\n func = make_meta_obj.dispatch(type(parent_meta))\n return func(x, index=index)\n else:\n # Default to using the pandas backend\n # if ``parent_meta`` is not specified\n func = make_meta_obj.dispatch(pd.DataFrame)\n return func(x, index=index)\n\n\ndef union_categoricals(to_union, sort_categories=False, ignore_order=False):\n func = union_categoricals_dispatch.dispatch(type(to_union[0]))\n return func(to_union, sort_categories=sort_categories, ignore_order=ignore_order)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVFunctionWrapper_CSVFunctionWrapper.__init__.self.kwargs.kwargs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVFunctionWrapper_CSVFunctionWrapper.__init__.self.kwargs.kwargs", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 64, "span_ids": ["CSVFunctionWrapper.__init__", "CSVFunctionWrapper"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CSVFunctionWrapper:\n \"\"\"\n CSV Function-Wrapper Class\n Reads CSV data from disk to produce a partition (given a key).\n \"\"\"\n\n def __init__(\n self,\n full_columns,\n columns,\n colname,\n head,\n header,\n reader,\n dtypes,\n enforce,\n kwargs,\n ):\n self.full_columns = full_columns\n self.columns = columns\n self.colname = colname\n self.head = head\n self.header = header\n self.reader = reader\n self.dtypes = dtypes\n self.enforce = enforce\n self.kwargs = kwargs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVFunctionWrapper.project_columns_CSVFunctionWrapper.project_columns.return.CSVFunctionWrapper_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_CSVFunctionWrapper.project_columns_CSVFunctionWrapper.project_columns.return.CSVFunctionWrapper_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 66, "end_line": 84, "span_ids": ["CSVFunctionWrapper.project_columns"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CSVFunctionWrapper:\n\n def project_columns(self, columns):\n \"\"\"Return a new CSVFunctionWrapper object with\n a sub-column projection.\n \"\"\"\n # Make sure columns is ordered correctly\n columns = [c for c in self.head.columns if c in columns]\n if columns == self.columns:\n return self\n return CSVFunctionWrapper(\n self.full_columns,\n columns,\n self.colname,\n self.head[columns],\n self.header,\n self.reader,\n {c: self.dtypes[c] for c in columns},\n self.enforce,\n self.kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_np_make._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_np_make._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 64, "span_ids": ["make_float", "impl:3", "imports", "make_string", "impl:5", "make_int", "make_categorical"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\n\nfrom ...highlevelgraph import HighLevelGraph\nfrom ...layers import DataFrameIOLayer\nfrom ...utils import random_state_data\nfrom ..core import DataFrame, tokenize\n\n__all__ = [\"make_timeseries\"]\n\n\ndef make_float(n, rstate):\n return rstate.rand(n) * 2 - 1\n\n\ndef make_int(n, rstate, lam=1000):\n return rstate.poisson(lam, size=n)\n\n\nnames = [\n \"Alice\",\n \"Bob\",\n \"Charlie\",\n \"Dan\",\n \"Edith\",\n \"Frank\",\n \"George\",\n \"Hannah\",\n \"Ingrid\",\n \"Jerry\",\n \"Kevin\",\n \"Laura\",\n \"Michael\",\n \"Norbert\",\n \"Oliver\",\n \"Patricia\",\n \"Quinn\",\n \"Ray\",\n \"Sarah\",\n \"Tim\",\n \"Ursula\",\n \"Victor\",\n \"Wendy\",\n \"Xavier\",\n \"Yvonne\",\n \"Zelda\",\n]\n\n\ndef make_string(n, rstate):\n return rstate.choice(names, size=n)\n\n\ndef make_categorical(n, rstate):\n return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n), names)\n\n\nmake = {\n float: make_float,\n int: make_int,\n str: make_string,\n object: make_string,\n \"category\": make_categorical,\n}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_HDFFunctionWrapper_HDFFunctionWrapper.__call__.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_HDFFunctionWrapper_HDFFunctionWrapper.__call__.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 273, "end_line": 307, "span_ids": ["HDFFunctionWrapper.project_columns", "HDFFunctionWrapper.__init__", "HDFFunctionWrapper.__call__", "HDFFunctionWrapper"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HDFFunctionWrapper:\n \"\"\"\n HDF5 Function-Wrapper Class\n\n Reads HDF5 data from disk to produce a partition (given a key).\n \"\"\"\n\n def __init__(self, columns, dim, lock, common_kwargs):\n self.columns = columns\n self.lock = lock\n self.common_kwargs = common_kwargs\n self.dim = dim\n if columns and dim > 1:\n self.common_kwargs = merge(common_kwargs, {\"columns\": columns})\n\n def project_columns(self, columns):\n \"\"\"Return a new HDFFunctionWrapper object with\n a sub-column projection.\n \"\"\"\n if columns == self.columns:\n return self\n return HDFFunctionWrapper(columns, self.dim, self.lock, self.common_kwargs)\n\n def __call__(self, part):\n \"\"\"Read from hdf5 file with a lock\"\"\"\n\n path, key, kwargs = part\n if self.lock:\n self.lock.acquire()\n try:\n result = pd.read_hdf(path, key, **merge(self.common_kwargs, kwargs))\n finally:\n if self.lock:\n self.lock.release()\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf.None_7_read_hdf.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py_read_hdf.None_7_read_hdf.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 405, "end_line": 440, "span_ids": ["read_hdf"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_hdf(\n pattern,\n key,\n start=0,\n stop=None,\n columns=None,\n chunksize=1000000,\n sorted_index=False,\n lock=True,\n mode=\"r\",\n):\n # ... other code\n if (start != 0 or stop is not None) and sorted_index:\n raise ValueError(\n \"When assuming pre-partitioned data, data must be \"\n \"read in its entirety using the same chunksizes\"\n )\n\n # Build metadata\n with pd.HDFStore(paths[0], mode=mode) as hdf:\n meta_key = _expand_key(key, hdf)[0]\n meta = pd.read_hdf(paths[0], meta_key, mode=mode, stop=0)\n if columns is not None:\n meta = meta[columns]\n\n # Common kwargs\n if meta.ndim == 1:\n common_kwargs = {\"name\": meta.name, \"mode\": mode}\n else:\n common_kwargs = {\"mode\": mode}\n\n # Build parts\n parts, divisions = _build_parts(\n paths, key, start, stop, chunksize, sorted_index, mode\n )\n\n # Construct Layer and Collection\n label = \"read-hdf-\"\n name = label + tokenize(paths, key, start, stop, sorted_index, chunksize, mode)\n layer = DataFrameIOLayer(\n name,\n columns,\n parts,\n HDFFunctionWrapper(columns, meta.ndim, lock, common_kwargs),\n label=label,\n )\n graph = HighLevelGraph({name: layer}, {name: set()})\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__build_parts__build_parts.return.parts_global_divisions_o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__build_parts__build_parts.return.parts_global_divisions_o", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 441, "end_line": 462, "span_ids": ["_build_parts"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_parts(paths, key, start, stop, chunksize, sorted_index, mode):\n \"\"\"\n Build the list of partition inputs and divisions for read_hdf\n \"\"\"\n parts = []\n global_divisions = []\n for path in paths:\n\n keys, stops, divisions = _get_keys_stops_divisions(\n path, key, stop, sorted_index, chunksize, mode\n )\n\n for k, s, d in zip(keys, stops, divisions):\n\n if d and global_divisions:\n global_divisions = global_divisions[:-1] + d\n elif d:\n global_divisions = d\n\n parts.extend(_one_path_one_key(path, k, start, s, chunksize))\n\n return parts, global_divisions or [None] * (len(parts) + 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__one_path_one_key__one_path_one_key.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__one_path_one_key__one_path_one_key.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 467, "end_line": 482, "span_ids": ["_one_path_one_key"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _one_path_one_key(path, key, start, stop, chunksize):\n \"\"\"\n Get the DataFrame corresponding to one path and one key (which\n should not contain any wildcards).\n \"\"\"\n\n if start >= stop:\n raise ValueError(\n \"Start row number ({}) is above or equal to stop \"\n \"row number ({})\".format(start, stop)\n )\n\n return [\n (path, key, {\"start\": s, \"stop\": s + chunksize})\n for i, s in enumerate(range(start, stop, chunksize))\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__expand_key__expand_key.return.keys": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__expand_key__expand_key.return.keys", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 485, "end_line": 501, "span_ids": ["_expand_key"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _expand_key(key, hdf):\n import glob\n\n if not glob.has_magic(key):\n keys = [key]\n else:\n keys = [k for k in hdf.keys() if fnmatch(k, key)]\n # https://github.com/dask/dask/issues/5934\n # TODO: remove this part if/when pandas copes with all keys\n keys.extend(\n n._v_pathname\n for n in hdf._handle.walk_nodes(\"/\", classname=\"Table\")\n if fnmatch(n._v_pathname, key)\n and n._v_name != \"table\"\n and n._v_pathname not in keys\n )\n return keys", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__get_keys_stops_divisions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/hdf.py__get_keys_stops_divisions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/hdf.py", "file_name": "hdf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 504, "end_line": 548, "span_ids": ["_get_keys_stops_divisions", "impl:5"], "tokens": 341}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_keys_stops_divisions(path, key, stop, sorted_index, chunksize, mode):\n \"\"\"\n Get the \"keys\" or group identifiers which match the given key, which\n can contain wildcards (see _expand_path). This uses the hdf file\n identified by the given path. Also get the index of the last row of\n data for each matched key.\n \"\"\"\n with pd.HDFStore(path, mode=mode) as hdf:\n stops = []\n divisions = []\n keys = _expand_key(key, hdf)\n for k in keys:\n storer = hdf.get_storer(k)\n if storer.format_type != \"table\":\n raise TypeError(dont_use_fixed_error_message)\n if stop is None:\n stops.append(storer.nrows)\n elif stop > storer.nrows:\n raise ValueError(\n \"Stop keyword exceeds dataset number \"\n \"of rows ({})\".format(storer.nrows)\n )\n else:\n stops.append(stop)\n if sorted_index:\n division = [\n storer.read_column(\"index\", start=start, stop=start + 1)[0]\n for start in range(0, storer.nrows, chunksize)\n ]\n division_end = storer.read_column(\n \"index\", start=storer.nrows - 1, stop=storer.nrows\n )[0]\n\n division.append(division_end)\n divisions.append(division)\n else:\n divisions.append(None)\n\n return keys, stops, divisions\n\n\nfrom ..core import _Frame\n\n_Frame.to_hdf.__doc__ = to_hdf.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_json_del__pa_version": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_json_del__pa_version", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 44, "span_ids": ["imports"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import json\nimport warnings\nfrom collections import defaultdict\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nfrom packaging.version import parse as parse_version\n\nfrom dask import delayed\n\nfrom ....base import tokenize\nfrom ....core import flatten\nfrom ....delayed import Delayed\nfrom ....utils import getargspec, natural_sort_key\nfrom ...utils import clear_known_categories\nfrom ..utils import (\n _get_pyarrow_dtypes,\n _is_local_fs,\n _meta_from_dtypes,\n _open_input_files,\n)\nfrom .core import create_metadata_file\nfrom .utils import (\n Engine,\n _flatten_filters,\n _get_aggregation_depth,\n _normalize_index_columns,\n _parse_pandas_metadata,\n _process_open_file_options,\n _row_groups_to_parts,\n _set_metadata_task_size,\n _sort_and_analyze_paths,\n _split_user_options,\n)\n\n# Check PyArrow version for feature support\n_pa_version = parse_version(pa.__version__)\nfrom pyarrow import dataset as pa_ds\n\nsubset_stats_supported = _pa_version > parse_version(\"2.0.0\")\ndel _pa_version", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._get_dataset_offset_ArrowDatasetEngine._get_dataset_offset.return.fmd_i_offset_append": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._get_dataset_offset_ArrowDatasetEngine._get_dataset_offset.return.fmd_i_offset_append", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 490, "end_line": 517, "span_ids": ["ArrowDatasetEngine._get_dataset_offset"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _get_dataset_offset(cls, path, fs, append, ignore_divisions):\n fmd = None\n i_offset = 0\n if append:\n # Make sure there are existing file fragments.\n # Otherwise there is no need to set `append=True`\n i_offset = len(\n list(\n pa_ds.dataset(path, filesystem=fs, format=\"parquet\").get_fragments()\n )\n )\n if i_offset == 0:\n # No dataset to append to\n return fmd, i_offset, False\n try:\n with fs.open(fs.sep.join([path, \"_metadata\"]), mode=\"rb\") as fil:\n fmd = pq.read_metadata(fil)\n except OSError:\n # No _metadata file present - No appending allowed (for now)\n if not ignore_divisions:\n # TODO: Be more flexible about existing metadata.\n raise NotImplementedError(\n \"_metadata file needed to `append` \"\n \"with `engine='pyarrow-dataset'` \"\n \"unless `ignore_divisions` is `True`\"\n )\n return fmd, i_offset, append", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.initialize_write_ArrowDatasetEngine.initialize_write._Inspect_the_intial_meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.initialize_write_ArrowDatasetEngine.initialize_write._Inspect_the_intial_meta", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 519, "end_line": 584, "span_ids": ["ArrowDatasetEngine.initialize_write"], "tokens": 501}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n schema=None,\n index_cols=None,\n **kwargs,\n ):\n # Infer schema if \"infer\"\n # (also start with inferred schema if user passes a dict)\n if schema == \"infer\" or isinstance(schema, dict):\n\n # Start with schema from _meta_nonempty\n _schema = pa.Schema.from_pandas(\n df._meta_nonempty.set_index(index_cols)\n if index_cols\n else df._meta_nonempty\n )\n\n # Use dict to update our inferred schema\n if isinstance(schema, dict):\n schema = pa.schema(schema)\n for name in schema.names:\n i = _schema.get_field_index(name)\n j = schema.get_field_index(name)\n _schema = _schema.set(i, schema.field(j))\n\n # If we have object columns, we need to sample partitions\n # until we find non-null data for each column in `sample`\n sample = [col for col in df.columns if df[col].dtype == \"object\"]\n if sample and schema == \"infer\":\n delayed_schema_from_pandas = delayed(pa.Schema.from_pandas)\n for i in range(df.npartitions):\n # Keep data on worker\n _s = delayed_schema_from_pandas(\n df[sample].to_delayed()[i]\n ).compute()\n for name, typ in zip(_s.names, _s.types):\n if typ != \"null\":\n i = _schema.get_field_index(name)\n j = _s.get_field_index(name)\n _schema = _schema.set(i, _s.field(j))\n sample.remove(name)\n if not sample:\n break\n\n # Final (inferred) schema\n schema = _schema\n\n # Check that target directory exists\n fs.mkdirs(path, exist_ok=True)\n if append and division_info is None:\n ignore_divisions = True\n\n # Extract metadata and get file offset if appending\n fmd, i_offset, append = cls._get_dataset_offset(\n path, fs, append, ignore_divisions\n )\n\n # Inspect the intial metadata if appending\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.initialize_write.if_append__ArrowDatasetEngine.initialize_write.return.fmd_schema_i_offset": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine.initialize_write.if_append__ArrowDatasetEngine.initialize_write.return.fmd_schema_i_offset", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 623, "end_line": 680, "span_ids": ["ArrowDatasetEngine.initialize_write"], "tokens": 539}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n schema=None,\n index_cols=None,\n **kwargs,\n ):\n # ... other code\n if append:\n arrow_schema = fmd.schema.to_arrow_schema()\n names = arrow_schema.names\n has_pandas_metadata = (\n arrow_schema.metadata is not None and b\"pandas\" in arrow_schema.metadata\n )\n if has_pandas_metadata:\n pandas_metadata = json.loads(\n arrow_schema.metadata[b\"pandas\"].decode(\"utf8\")\n )\n categories = [\n c[\"name\"]\n for c in pandas_metadata[\"columns\"]\n if c[\"pandas_type\"] == \"categorical\"\n ]\n else:\n categories = None\n dtypes = _get_pyarrow_dtypes(arrow_schema, categories)\n if set(names) != set(df.columns) - set(partition_on):\n raise ValueError(\n \"Appended columns not the same.\\n\"\n \"Previous: {} | New: {}\".format(names, list(df.columns))\n )\n elif (pd.Series(dtypes).loc[names] != df[names].dtypes).any():\n # TODO Coerce values for compatible but different dtypes\n raise ValueError(\n \"Appended dtypes differ.\\n{}\".format(\n set(dtypes.items()) ^ set(df.dtypes.items())\n )\n )\n\n # Check divisions if necessary\n if division_info[\"name\"] not in names:\n ignore_divisions = True\n if not ignore_divisions:\n old_end = None\n row_groups = [fmd.row_group(i) for i in range(fmd.num_row_groups)]\n for row_group in row_groups:\n for i, name in enumerate(names):\n if name != division_info[\"name\"]:\n continue\n column = row_group.column(i)\n if column.statistics:\n if not old_end:\n old_end = column.statistics.max\n else:\n old_end = max(old_end, column.statistics.max)\n break\n\n divisions = division_info[\"divisions\"]\n if divisions[0] < old_end:\n raise ValueError(\n \"Appended divisions overlapping with the previous ones\"\n \" (set ignore_divisions=True to append anyway).\\n\"\n \"Previous: {} | New: {}\".format(old_end, divisions[0])\n )\n\n return fmd, schema, i_offset", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._get_dataset_offset_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._get_dataset_offset_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2308, "end_line": 2336, "span_ids": ["impl:6", "ArrowLegacyEngine._get_dataset_offset"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _get_dataset_offset(cls, path, fs, append, ignore_divisions):\n dataset = fmd = None\n i_offset = 0\n if append:\n try:\n # Allow append if the dataset exists.\n # Also need dataset.metadata object if\n # ignore_divisions is False (to check divisions)\n dataset = pq.ParquetDataset(path, filesystem=fs)\n if not dataset.metadata and not ignore_divisions:\n # TODO: Be more flexible about existing metadata.\n raise NotImplementedError(\n \"_metadata file needed to `append` \"\n \"with `engine='pyarrow-legacy'` \"\n \"unless `ignore_divisions` is `True`\"\n )\n fmd = dataset.metadata\n i_offset = len(dataset.pieces)\n except (OSError, ValueError, IndexError):\n # Original dataset does not exist - cannot append\n append = False\n return fmd, i_offset, append\n\n\n# Compatibility access to legacy ArrowEngine\n# (now called `ArrowLegacyEngine`)\nArrowEngine = ArrowLegacyEngine", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__read_sql_chunk__read_sql_chunk.if_len_df_0_.else_.return.df_astype_meta_dtypes_to_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__read_sql_chunk__read_sql_chunk.if_len_df_0_.else_.return.df_astype_meta_dtypes_to_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 220, "end_line": 234, "span_ids": ["_read_sql_chunk"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _read_sql_chunk(q, uri, meta, engine_kwargs=None, **kwargs):\n import sqlalchemy as sa\n\n engine_kwargs = engine_kwargs or {}\n engine = sa.create_engine(uri, **engine_kwargs)\n df = pd.read_sql(q, engine, **kwargs)\n engine.dispose()\n if len(df) == 0:\n return meta\n elif len(meta.dtypes.to_dict()) == 0:\n # only index column in loaded\n # required only for pandas < 1.0.0\n return df\n else:\n return df.astype(meta.dtypes.to_dict(), copy=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_raises_on_no_files_test_csv_with_integer_names.with_filetext_alice_1_nb.assert_list_df_columns_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_raises_on_no_files_test_csv_with_integer_names.with_filetext_alice_1_nb.assert_list_df_columns_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 879, "end_line": 918, "span_ids": ["test_multiple_read_csv_has_deterministic_name", "test_csv_with_integer_names", "test_read_csv_raises_on_no_files", "test_read_csv_has_deterministic_name", "test_read_csv_has_different_names_based_on_blocksize"], "tokens": 336}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_raises_on_no_files():\n fn = \".not.a.real.file.csv\"\n try:\n dd.read_csv(fn)\n assert False\n except OSError as e:\n assert fn in str(e)\n\n\ndef test_read_csv_has_deterministic_name():\n with filetext(csv_text) as fn:\n a = dd.read_csv(fn)\n b = dd.read_csv(fn)\n assert a._name == b._name\n assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)\n assert isinstance(a._name, str)\n\n c = dd.read_csv(fn, skiprows=1, na_values=[0])\n assert a._name != c._name\n\n\ndef test_multiple_read_csv_has_deterministic_name():\n with filetexts({\"_foo.1.csv\": csv_text, \"_foo.2.csv\": csv_text}):\n a = dd.read_csv(\"_foo.*.csv\")\n b = dd.read_csv(\"_foo.*.csv\")\n\n assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)\n\n\ndef test_read_csv_has_different_names_based_on_blocksize():\n with filetext(csv_text) as fn:\n a = dd.read_csv(fn, blocksize=\"10kB\")\n b = dd.read_csv(fn, blocksize=\"20kB\")\n assert a._name != b._name\n\n\ndef test_csv_with_integer_names():\n with filetext(\"alice,1\\nbob,2\") as fn:\n df = dd.read_csv(fn, header=None)\n assert list(df.columns) == [0, 1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_line_ending_test_to_csv_line_ending.assert_raw_in_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_to_csv_line_ending_test_to_csv_line_ending.assert_raw_in_expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1573, "end_line": 1589, "span_ids": ["test_to_csv_line_ending"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_csv_line_ending():\n df = pd.DataFrame({\"x\": [0]})\n ddf = dd.from_pandas(df, npartitions=1)\n expected = {b\"0\\r\\n\", b\"0\\n\"} # either/or\n # For comparison...\n # unexpected = {b'0\\r\\r\\n'}\n # This test addresses GH4809, and checks that only (at most) one\n # '\\r' character is written per line when writing to csv.\n # In case it's correct (on UNIX) to have no '\\r' at all, this test\n # considers either '\\r\\n' or '\\n' as appropriate line endings,\n # but not '\\r\\r\\n'.\n with tmpdir() as dn:\n ddf.to_csv(os.path.join(dn, \"foo*.csv\"), header=False, index=False)\n filename = os.path.join(dn, \"foo0.csv\")\n with open(filename, \"rb\") as f:\n raw = f.read()\n assert raw in expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_block_mask_test_reading_empty_csv_files_with_path.with_tmpdir_as_tdir_.assert_eq_result_df_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_block_mask_test_reading_empty_csv_files_with_path.with_tmpdir_as_tdir_.assert_eq_result_df_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1592, "end_line": 1626, "span_ids": ["test_block_mask", "test_reading_empty_csv_files_with_path"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"block_lists\",\n [\n [[1, 2], [3], [4, 5, 6]],\n [],\n [[], [], [1], [], [1]],\n [list(range(i)) for i in range(10)],\n ],\n)\ndef test_block_mask(block_lists):\n mask = list(block_mask(block_lists))\n assert len(mask) == len(list(flatten(block_lists)))\n\n\ndef test_reading_empty_csv_files_with_path():\n with tmpdir() as tdir:\n for k, content in enumerate([\"0, 1, 2\", \"\", \"6, 7, 8\"]):\n with open(os.path.join(tdir, str(k) + \".csv\"), \"w\") as file:\n file.write(content)\n result = dd.read_csv(\n os.path.join(tdir, \"*.csv\"),\n include_path_column=True,\n converters={\"path\": parse_filename},\n names=[\"A\", \"B\", \"C\"],\n ).compute()\n df = pd.DataFrame(\n {\n \"A\": [0, 6],\n \"B\": [1, 7],\n \"C\": [2, 8],\n \"path\": [\"0.csv\", \"2.csv\"],\n }\n )\n df[\"path\"] = df[\"path\"].astype(\"category\")\n assert_eq(result, df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_groupby_get_group_test_csv_getitem_column_order.assert_eq_df1_columns_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_groupby_get_group_test_csv_getitem_column_order.assert_eq_df1_columns_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1629, "end_line": 1655, "span_ids": ["test_read_csv_groupby_get_group", "test_csv_getitem_column_order"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_groupby_get_group(tmpdir):\n # https://github.com/dask/dask/issues/7005\n\n path = os.path.join(str(tmpdir), \"test.csv\")\n df1 = pd.DataFrame([{\"foo\": 10, \"bar\": 4}])\n df1.to_csv(path, index=False)\n\n ddf1 = dd.read_csv(path)\n ddfs = ddf1.groupby(\"foo\")\n\n assert_eq(df1, ddfs.get_group(10).compute())\n\n\ndef test_csv_getitem_column_order(tmpdir):\n # See: https://github.com/dask/dask/issues/7759\n\n path = os.path.join(str(tmpdir), \"test.csv\")\n columns = list(\"abcdefghijklmnopqrstuvwxyz\")\n values = list(range(len(columns)))\n\n df1 = pd.DataFrame([{c: v for c, v in zip(columns, values)}])\n df1.to_csv(path)\n\n # Use disordered and duplicated column selection\n columns = list(\"hczzkylaape\")\n df2 = dd.read_csv(path)[columns].head(1)\n assert_eq(df1[columns], df2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_nodes_test_to_hdf_multiple_nodes.None_4.assert_subgraph_columns_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_multiple_nodes_test_to_hdf_multiple_nodes.None_4.assert_subgraph_columns_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 136, "span_ids": ["test_to_hdf_multiple_nodes"], "tokens": 742}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_multiple_nodes():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n a = dd.from_pandas(df, 2)\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n b = dd.from_pandas(df16, 16)\n\n # saving to multiple nodes\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)\n\n # saving to multiple nodes making sure order is kept\n with tmpfile(\"h5\") as fn:\n b.to_hdf(fn, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df16, out)\n\n # saving to multiple datasets with custom name_function\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data_*\", name_function=lambda i: \"a\" * (i + 1))\n out = dd.read_hdf(fn, \"/data_*\")\n assert_eq(df, out)\n\n out = pd.read_hdf(fn, \"/data_a\")\n tm.assert_frame_equal(out, df.iloc[:2])\n out = pd.read_hdf(fn, \"/data_aa\")\n tm.assert_frame_equal(out, df.iloc[2:])\n\n # test multiple nodes with hdf object\n with tmpfile(\"h5\") as fn:\n with pd.HDFStore(fn) as hdf:\n b.to_hdf(hdf, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df16, out)\n\n # Test getitem optimization\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data*\")\n out = dd.read_hdf(fn, \"/data*\")[[\"x\"]]\n dsk = optimize_dataframe_getitem(out.dask, keys=out.__dask_keys__())\n read = [key for key in dsk.layers if key.startswith(\"read-hdf\")][0]\n subgraph = dsk.layers[read]\n assert isinstance(subgraph, DataFrameIOLayer)\n assert subgraph.columns == [\"x\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_test_roundtrip.if_.pytest_xfail_reason_fast": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_test_roundtrip.if_.pytest_xfail_reason_fast", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 889, "end_line": 936, "span_ids": ["test_roundtrip"], "tokens": 818}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"df,write_kwargs,read_kwargs\",\n [\n (pd.DataFrame({\"x\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}), {}, {}),\n (pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}), {}, {}),\n (pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}), {\"object_encoding\": \"bytes\"}, {}),\n (\n pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])}),\n {},\n {\"categories\": [\"x\"]},\n ),\n (pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])}), {}, {\"categories\": [\"x\"]}),\n (pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"), {}, {}),\n pytest.param(\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ns]\"),\n {},\n {},\n ),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, UTC]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, CET]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 5]}, index=pd.Index([1, 2, 3], name=\"foo\")), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]), {}, {}),\n (pd.DataFrame({\"0\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, None]}), {}, {}),\n (pd.DataFrame({\"-\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\".\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\" \": [3.0, 2.0, None]}), {}, {}),\n ],\n)\ndef test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):\n if \"x\" in df and df.x.dtype == \"M8[ns]\" and \"arrow\" in engine:\n pytest.xfail(reason=\"Parquet pyarrow v1 doesn't support nanosecond precision\")\n if (\n \"x\" in df\n and df.x.dtype == \"M8[ns]\"\n and engine == \"fastparquet\"\n and fastparquet_version <= parse_version(\"0.6.3\")\n ):\n pytest.xfail(reason=\"fastparquet doesn't support nanosecond precision yet\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip.None_2_test_roundtrip.oe.write_kwargs_pop_object_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip.None_2_test_roundtrip.oe.write_kwargs_pop_object_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 937, "end_line": 950, "span_ids": ["test_roundtrip"], "tokens": 829}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"df,write_kwargs,read_kwargs\",\n [\n (pd.DataFrame({\"x\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}), {}, {}),\n (pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}), {}, {}),\n (pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}), {\"object_encoding\": \"bytes\"}, {}),\n (\n pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])}),\n {},\n {\"categories\": [\"x\"]},\n ),\n (pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])}), {}, {\"categories\": [\"x\"]}),\n (pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"), {}, {}),\n pytest.param(\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ns]\"),\n {},\n {},\n ),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, UTC]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, CET]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 5]}, index=pd.Index([1, 2, 3], name=\"foo\")), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]), {}, {}),\n (pd.DataFrame({\"0\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, None]}), {}, {}),\n (pd.DataFrame({\"-\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\".\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\" \": [3.0, 2.0, None]}), {}, {}),\n ],\n)\ndef test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):\n # ... other code\n if (\n PANDAS_GT_130\n and read_kwargs.get(\"categories\", None)\n and engine == \"fastparquet\"\n and fastparquet_version <= parse_version(\"0.6.3\")\n ):\n pytest.xfail(\"https://github.com/dask/fastparquet/issues/577\")\n\n tmp = str(tmpdir)\n if df.index.name is None:\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n\n oe = write_kwargs.pop(\"object_encoding\", None)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip.if_oe_and_engine_fast_test_roundtrip.if_str_ddf2_dtypes_get_x.else_.assert_eq_ddf_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip.if_oe_and_engine_fast_test_roundtrip.if_str_ddf2_dtypes_get_x.else_.assert_eq_ddf_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 951, "end_line": 961, "span_ids": ["test_roundtrip"], "tokens": 866}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"df,write_kwargs,read_kwargs\",\n [\n (pd.DataFrame({\"x\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [\"c\", \"a\", \"b\"]}), {}, {}),\n (pd.DataFrame({\"x\": [\"cc\", \"a\", \"bbb\"]}), {}, {}),\n (pd.DataFrame({\"x\": [b\"a\", b\"b\", b\"c\"]}), {\"object_encoding\": \"bytes\"}, {}),\n (\n pd.DataFrame({\"x\": pd.Categorical([\"a\", \"b\", \"a\"])}),\n {},\n {\"categories\": [\"x\"]},\n ),\n (pd.DataFrame({\"x\": pd.Categorical([1, 2, 1])}), {}, {\"categories\": [\"x\"]}),\n (pd.DataFrame({\"x\": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"M8[ns]\"), {}, {}),\n pytest.param(\n pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ns]\"),\n {},\n {},\n ),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[us]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"M8[ms]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, UTC]\"), {}, {}),\n (pd.DataFrame({\"x\": [3000, 2000, 1000]}).astype(\"datetime64[ns, CET]\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"uint16\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, 1]}).astype(\"float32\"), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),\n (pd.DataFrame({\"x\": [3, 1, 5]}, index=pd.Index([1, 2, 3], name=\"foo\")), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [1, 2, 3], \"y\": [3, 2, 1]}, columns=[\"y\", \"x\"]), {}, {}),\n (pd.DataFrame({\"0\": [3, 2, 1]}), {}, {}),\n (pd.DataFrame({\"x\": [3, 2, None]}), {}, {}),\n (pd.DataFrame({\"-\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\".\": [3.0, 2.0, None]}), {}, {}),\n (pd.DataFrame({\" \": [3.0, 2.0, None]}), {}, {}),\n ],\n)\ndef test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):\n # ... other code\n if oe and engine == \"fastparquet\":\n dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)\n else:\n dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)\n ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)\n if str(ddf2.dtypes.get(\"x\")) == \"UInt16\" and engine == \"fastparquet\":\n # fastparquet choooses to use masked type to be able to get true repr of\n # 16-bit int\n assert_eq(ddf.astype(\"UInt16\"), ddf2)\n else:\n assert_eq(ddf, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_inconsistent_schema_pyarrow_test_read_inconsistent_schema_pyarrow.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_inconsistent_schema_pyarrow_test_read_inconsistent_schema_pyarrow.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2392, "end_line": 2426, "span_ids": ["test_read_inconsistent_schema_pyarrow"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_LE_MARK\ndef test_read_inconsistent_schema_pyarrow(tmpdir):\n # Note: This is a proxy test for a cudf-related issue fix\n # (see cudf#5062 github issue). The cause of that issue is\n # schema inconsistencies that do not actually correspond to\n # different types, but whether or not the file/column contains\n # null values.\n\n df1 = pd.DataFrame({\"id\": [0, 1], \"val\": [10, 20]})\n df2 = pd.DataFrame({\"id\": [2, 3], \"val\": [30, 40]})\n\n desired_type = \"int64\"\n other_type = \"int32\"\n df1.val = df1.val.astype(desired_type)\n df2.val = df2.val.astype(other_type)\n\n df_expect = pd.concat([df1, df2], ignore_index=True)\n df_expect[\"val\"] = df_expect.val.astype(desired_type)\n\n df1.to_parquet(os.path.join(tmpdir, \"0.parquet\"), engine=\"pyarrow\")\n df2.to_parquet(os.path.join(tmpdir, \"1.parquet\"), engine=\"pyarrow\")\n\n # Read Directory\n check = dd.read_parquet(\n str(tmpdir), dataset={\"validate_schema\": False}, engine=\"pyarrow-legacy\"\n )\n assert_eq(check.compute(), df_expect, check_index=False)\n\n # Read List\n check = dd.read_parquet(\n os.path.join(tmpdir, \"*.parquet\"),\n dataset={\"validate_schema\": False},\n engine=\"pyarrow-legacy\",\n )\n assert_eq(check.compute(), df_expect, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_blockwise_parquet_test_optimize_blockwise_parquet.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_optimize_blockwise_parquet_test_optimize_blockwise_parquet.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2498, "end_line": 2539, "span_ids": ["test_optimize_blockwise_parquet"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@ANY_ENGINE_MARK\ndef test_optimize_blockwise_parquet(tmpdir):\n size = 40\n npartitions = 2\n tmp = str(tmpdir)\n df = pd.DataFrame({\"a\": np.arange(size, dtype=np.int32)})\n expect = dd.from_pandas(df, npartitions=npartitions)\n expect.to_parquet(tmp)\n ddf = dd.read_parquet(tmp)\n\n # `ddf` should now have ONE Blockwise layer\n layers = ddf.__dask_graph__().layers\n assert len(layers) == 1\n assert isinstance(list(layers.values())[0], Blockwise)\n\n # Check single-layer result\n assert_eq(ddf, expect)\n\n # Increment by 1\n ddf += 1\n expect += 1\n\n # Increment by 10\n ddf += 10\n expect += 10\n\n # `ddf` should now have THREE Blockwise layers\n layers = ddf.__dask_graph__().layers\n assert len(layers) == 3\n assert all(isinstance(layer, Blockwise) for layer in layers.values())\n\n # Check that `optimize_blockwise` fuses all three\n # `Blockwise` layers together into a singe `Blockwise` layer\n keys = [(ddf._name, i) for i in range(npartitions)]\n graph = optimize_blockwise(ddf.__dask_graph__(), keys)\n layers = graph.layers\n name = list(layers.keys())[0]\n assert len(layers) == 1\n assert isinstance(layers[name], Blockwise)\n\n # Check final result\n assert_eq(ddf, expect)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_single_column_test_single_column.with_tmpfile_as_f_.assert_eq_test_data_pd_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_single_column_test_single_column.with_tmpfile_as_f_.assert_eq_test_data_pd_d", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 96, "span_ids": ["test_single_column"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\n \"ignore:The default dtype for empty Series \" \"will be 'object' instead of 'float64'\"\n)\n@pytest.mark.parametrize(\"use_head\", [True, False])\ndef test_single_column(db, use_head):\n from sqlalchemy import Column, Integer, MetaData, Table, create_engine\n\n with tmpfile() as f:\n uri = \"sqlite:///%s\" % f\n metadata = MetaData()\n engine = create_engine(uri)\n table = Table(\n \"single_column\",\n metadata,\n Column(\"id\", Integer, primary_key=True),\n )\n metadata.create_all(engine)\n test_data = pd.DataFrame({\"id\": list(range(50))}).set_index(\"id\")\n test_data.to_sql(table.name, uri, index=True, if_exists=\"replace\")\n\n if use_head:\n dask_df = read_sql_table(table.name, uri, index_col=\"id\", npartitions=2)\n else:\n dask_df = read_sql_table(\n table.name,\n uri,\n head_rows=0,\n npartitions=2,\n meta=test_data.iloc[:0],\n index_col=\"id\",\n )\n assert dask_df.index.name == \"id\"\n assert dask_df.npartitions == 2\n pd_dataframe = dask_df.compute()\n assert_eq(test_data, pd_dataframe)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_broadcast_join.if_lhs_npartitions_rhs__broadcast_join.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_broadcast_join.if_lhs_npartitions_rhs__broadcast_join.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1468, "end_line": 1500, "span_ids": ["broadcast_join"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_join(\n lhs,\n left_on,\n rhs,\n right_on,\n how=\"inner\",\n npartitions=None,\n suffixes=(\"_x\", \"_y\"),\n shuffle=None,\n indicator=False,\n parts_out=None,\n):\n # ... other code\n if lhs.npartitions < rhs.npartitions:\n npartitions = rhs.npartitions\n divisions = rhs.divisions\n _index_names = set(rhs._meta_nonempty.index.names)\n else:\n npartitions = lhs.npartitions\n divisions = lhs.divisions\n _index_names = set(lhs._meta_nonempty.index.names)\n\n # Cannot preserve divisions if the index is lost\n if _index_names != set(meta.index.names):\n divisions = [None] * (npartitions + 1)\n\n token = tokenize(lhs, rhs, npartitions, **merge_kwargs)\n name = \"bcast-join-\" + token\n broadcast_join_layer = BroadcastJoinLayer(\n name,\n npartitions,\n lhs_name,\n lhs.npartitions,\n rhs_name,\n rhs.npartitions,\n parts_out=parts_out,\n **merge_kwargs,\n )\n\n graph = HighLevelGraph.from_collections(\n name,\n broadcast_join_layer,\n dependencies=[lhs_dep, rhs_dep],\n )\n\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__recursive_pairwise_outer_join_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py__recursive_pairwise_outer_join_", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1503, "end_line": 1553, "span_ids": ["_recursive_pairwise_outer_join"], "tokens": 486}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _recursive_pairwise_outer_join(\n dataframes_to_merge, on, lsuffix, rsuffix, npartitions, shuffle\n):\n \"\"\"\n Schedule the merging of a list of dataframes in a pairwise method. This is a recursive function that results\n in a much more efficient scheduling of merges than a simple loop\n from:\n [A] [B] [C] [D] -> [AB] [C] [D] -> [ABC] [D] -> [ABCD]\n to:\n [A] [B] [C] [D] -> [AB] [CD] -> [ABCD]\n Note that either way, n-1 merges are still required, but using a pairwise reduction it can be completed in parallel.\n :param dataframes_to_merge: A list of Dask dataframes to be merged together on their index\n :return: A single Dask Dataframe, comprised of the pairwise-merges of all provided dataframes\n \"\"\"\n number_of_dataframes_to_merge = len(dataframes_to_merge)\n\n merge_options = {\n \"on\": on,\n \"lsuffix\": lsuffix,\n \"rsuffix\": rsuffix,\n \"npartitions\": npartitions,\n \"shuffle\": shuffle,\n }\n\n # Base case 1: just return the provided dataframe and merge with `left`\n if number_of_dataframes_to_merge == 1:\n return dataframes_to_merge[0]\n\n # Base case 2: merge the two provided dataframe to be merged with `left`\n if number_of_dataframes_to_merge == 2:\n merged_ddf = dataframes_to_merge[0].join(\n dataframes_to_merge[1], how=\"outer\", **merge_options\n )\n return merged_ddf\n\n # Recursive case: split the list of dfs into two ~even sizes and continue down\n else:\n middle_index = number_of_dataframes_to_merge // 2\n merged_ddf = _recursive_pairwise_outer_join(\n [\n _recursive_pairwise_outer_join(\n dataframes_to_merge[:middle_index], **merge_options\n ),\n _recursive_pairwise_outer_join(\n dataframes_to_merge[middle_index:], **merge_options\n ),\n ],\n **merge_options,\n )\n return merged_ddf", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_with_empty_test_merge_asof_with_empty.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_asof_with_empty_test_merge_asof_with_empty.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 585, "end_line": 634, "span_ids": ["test_merge_asof_with_empty"], "tokens": 395}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_asof_with_empty():\n good_df = pd.DataFrame({\"index_col\": list(range(10)), \"good_val\": list(range(10))})\n good_dd = dd.from_pandas(good_df, npartitions=2)\n empty_df = (\n good_df[good_df.index_col < 0].copy().rename(columns={\"good_val\": \"empty_val\"})\n )\n empty_dd = dd.from_pandas(empty_df, npartitions=2)\n\n # left good, right empty\n result_dd = dd.merge_asof(\n good_dd.set_index(\"index_col\"),\n empty_dd.set_index(\"index_col\"),\n left_index=True,\n right_index=True,\n )\n result_df = pd.merge_asof(\n good_df.set_index(\"index_col\"),\n empty_df.set_index(\"index_col\"),\n left_index=True,\n right_index=True,\n )\n assert_eq(result_dd, result_df, check_index=False)\n # left empty, right good\n result_dd = dd.merge_asof(\n empty_dd.set_index(\"index_col\"),\n good_dd.set_index(\"index_col\"),\n left_index=True,\n right_index=True,\n )\n result_df = pd.merge_asof(\n empty_df.set_index(\"index_col\"),\n good_df.set_index(\"index_col\"),\n left_index=True,\n right_index=True,\n )\n assert_eq(result_dd, result_df, check_index=False)\n # left/right both empty\n result_dd = dd.merge_asof(\n empty_dd.set_index(\"index_col\"),\n empty_dd.set_index(\"index_col\"),\n left_index=True,\n right_index=True,\n )\n result_df = pd.merge_asof(\n empty_df.set_index(\"index_col\"),\n empty_df.set_index(\"index_col\"),\n left_index=True,\n right_index=True,\n )\n assert_eq(result_dd, result_df, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_large_to_small_test_merge_tasks_large_to_small.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_tasks_large_to_small_test_merge_tasks_large_to_small.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2319, "end_line": 2379, "span_ids": ["test_merge_tasks_large_to_small"], "tokens": 447}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"left\", \"right\"])\n@pytest.mark.parametrize(\"npartitions\", [28, 32])\n@pytest.mark.parametrize(\"base\", [\"lg\", \"sm\"])\ndef test_merge_tasks_large_to_small(how, npartitions, base):\n\n size_lg = 3000\n size_sm = 300\n npartitions_lg = 30\n npartitions_sm = 3\n broadcast_bias = 1.0 # Prioritize broadcast\n\n lg = pd.DataFrame(\n {\n \"x\": np.random.choice(np.arange(100), size_lg),\n \"y\": np.arange(size_lg),\n }\n )\n ddf_lg = dd.from_pandas(lg, npartitions=npartitions_lg)\n\n sm = pd.DataFrame(\n {\n \"x\": np.random.choice(np.arange(100), size_sm),\n \"y\": np.arange(size_sm),\n }\n )\n ddf_sm = dd.from_pandas(sm, npartitions=npartitions_sm)\n\n if base == \"lg\":\n left = lg\n ddf_left = ddf_lg\n right = sm\n ddf_right = ddf_sm\n else:\n left = sm\n ddf_left = ddf_sm\n right = lg\n ddf_right = ddf_lg\n\n dd_result = dd.merge(\n ddf_left,\n ddf_right,\n on=\"y\",\n how=how,\n npartitions=npartitions,\n broadcast=broadcast_bias,\n shuffle=\"tasks\",\n )\n pd_result = pd.merge(left, right, on=\"y\", how=how)\n\n # Make sure `on` dtypes match\n dd_result[\"y\"] = dd_result[\"y\"].astype(np.int32)\n pd_result[\"y\"] = pd_result[\"y\"].astype(np.int32)\n\n if npartitions:\n assert dd_result.npartitions == npartitions\n\n assert_eq(\n dd_result.compute().sort_values(\"y\"),\n pd_result.sort_values(\"y\"),\n check_index=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_pairwise_rejects_unsupported_join_types_test_pairwise_rejects_unsupported_join_types.e_match_merge_multi_only": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_pairwise_rejects_unsupported_join_types_test_pairwise_rejects_unsupported_join_types.e_match_merge_multi_only", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2382, "end_line": 2398, "span_ids": ["test_pairwise_rejects_unsupported_join_types"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"right\", \"inner\"])\ndef test_pairwise_rejects_unsupported_join_types(how):\n base_df = dd.from_pandas(\n pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]), 3\n )\n dfs = [\n dd.from_pandas(\n pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]), 3\n ),\n dd.from_pandas(\n pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]), 3\n ),\n ]\n\n with pytest.raises(ValueError) as e:\n base_df.join(dfs, how=how)\n e.match(\"merge_multi only supports left or outer joins\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_pairwise_merge_results_in_identical_output_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_pairwise_merge_results_in_identical_output_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2401, "end_line": 2428, "span_ids": ["test_pairwise_merge_results_in_identical_output_df"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"left\", \"outer\"])\n@pytest.mark.parametrize(\"npartitions_base\", [1, 2, 3])\n@pytest.mark.parametrize(\"npartitions_other\", [1, 2, 3])\ndef test_pairwise_merge_results_in_identical_output_df(\n how, npartitions_base, npartitions_other\n):\n dfs_to_merge = []\n for i in range(10):\n df = pd.DataFrame(\n {\n f\"{i}A\": [5, 6, 7, 8],\n f\"{i}B\": [4, 3, 2, 1],\n },\n index=[0, 1, 2, 3],\n )\n ddf = dd.from_pandas(df, npartitions_other)\n dfs_to_merge.append(ddf)\n\n ddf_loop = dd.from_pandas(pd.DataFrame(index=[0, 1, 3]), npartitions_base)\n for ddf in dfs_to_merge:\n ddf_loop = ddf_loop.join(ddf, how=how)\n\n ddf_pairwise = dd.from_pandas(pd.DataFrame(index=[0, 1, 3]), npartitions_base)\n\n ddf_pairwise = ddf_pairwise.join(dfs_to_merge, how=how)\n\n assert_eq(ddf_pairwise, ddf_loop)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_np_shifted_sum.return.df_a_b_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_np_shifted_sum.return.df_a_b_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 43, "span_ids": ["imports", "shifted_sum"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nimport pytest\nfrom packaging.version import parse as parse_version\n\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\n\nN = 40\ndf = pd.DataFrame(\n {\n \"a\": np.random.randn(N).cumsum(),\n \"b\": np.random.randint(100, size=(N,)),\n \"c\": np.random.randint(100, size=(N,)),\n \"d\": np.random.randint(100, size=(N,)),\n \"e\": np.random.randint(100, size=(N,)),\n }\n)\nddf = dd.from_pandas(df, 3)\n\nidx = (\n pd.date_range(\"2016-01-01\", freq=\"3s\", periods=100).union(\n pd.date_range(\"2016-01-01\", freq=\"5s\", periods=100)\n )\n)[:N]\n\nts = pd.DataFrame(\n {\n \"a\": np.random.randn(N).cumsum(),\n \"b\": np.random.randint(100, size=(N,)),\n \"c\": np.random.randint(100, size=(N,)),\n \"d\": np.random.randint(100, size=(N,)),\n \"e\": np.random.randint(100, size=(N,)),\n },\n index=idx,\n)\ndts = dd.from_pandas(ts, 3)\n\n\ndef shifted_sum(df, before, after, c=0):\n a = df.shift(before)\n b = df.shift(-after)\n return df + a + b + c", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_uint64index_test_meta_nonempty_scalar.assert_meta_pd_Timesta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_uint64index_test_meta_nonempty_scalar.assert_meta_pd_Timesta", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 285, "end_line": 304, "span_ids": ["test_meta_nonempty_uint64index", "test_meta_nonempty_scalar"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_nonempty_uint64index():\n idx = pd.Index([1], name=\"foo\", dtype=\"uint64\")\n res = meta_nonempty(idx)\n assert type(res) is type(idx)\n assert res.dtype == \"uint64\"\n assert res.name == idx.name\n\n\ndef test_meta_nonempty_scalar():\n meta = meta_nonempty(np.float64(1.0))\n assert isinstance(meta, np.float64)\n\n x = pd.Timestamp(2000, 1, 1)\n meta = meta_nonempty(x)\n assert meta is x\n\n # DatetimeTZDtype\n x = pd.DatetimeTZDtype(tz=\"UTC\")\n meta = meta_nonempty(x)\n assert meta == pd.Timestamp(1, tz=x.tz, unit=x.unit)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_raise_on_meta_error_test_raise_on_meta_error.None_1.else_.assert_False_should_hav": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_raise_on_meta_error_test_raise_on_meta_error.None_1.else_.assert_False_should_hav", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 299, "end_line": 316, "span_ids": ["test_raise_on_meta_error"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_raise_on_meta_error():\n try:\n with raise_on_meta_error():\n raise RuntimeError(\"Bad stuff\")\n except Exception as e:\n assert e.args[0].startswith(\"Metadata inference failed.\\n\")\n assert \"RuntimeError\" in e.args[0]\n else:\n assert False, \"should have errored\"\n\n try:\n with raise_on_meta_error(\"myfunc\"):\n raise RuntimeError(\"Bad stuff\")\n except Exception as e:\n assert e.args[0].startswith(\"Metadata inference failed in `myfunc`.\\n\")\n assert \"RuntimeError\" in e.args[0]\n else:\n assert False, \"should have errored\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__empty_series_is_index_like.return.dask_is_index_like_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__empty_series_is_index_like.return.dask_is_index_like_s_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 279, "end_line": 331, "span_ids": ["is_series_like", "_scalar_from_dtype", "impl:16", "is_dataframe_like", "_empty_series", "_nonempty_scalar", "is_index_like"], "tokens": 374}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _empty_series(name, dtype, index=None):\n if isinstance(dtype, str) and dtype == \"category\":\n return pd.Series(\n pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index\n ).iloc[:0]\n return pd.Series([], dtype=dtype, name=name, index=index)\n\n\n_simple_fake_mapping = {\n \"b\": np.bool_(True),\n \"V\": np.void(b\" \"),\n \"M\": np.datetime64(\"1970-01-01\"),\n \"m\": np.timedelta64(1),\n \"S\": np.str_(\"foo\"),\n \"a\": np.str_(\"foo\"),\n \"U\": np.unicode_(\"foo\"),\n \"O\": \"foo\",\n}\n\n\ndef _scalar_from_dtype(dtype):\n if dtype.kind in (\"i\", \"f\", \"u\"):\n return dtype.type(1)\n elif dtype.kind == \"c\":\n return dtype.type(complex(1, 0))\n elif dtype.kind in _simple_fake_mapping:\n o = _simple_fake_mapping[dtype.kind]\n return o.astype(dtype) if dtype.kind in (\"m\", \"M\") else o\n else:\n raise TypeError(f\"Can't handle dtype: {dtype}\")\n\n\ndef _nonempty_scalar(x):\n if type(x) in make_scalar._lookup:\n return make_scalar(x)\n\n if np.isscalar(x):\n dtype = x.dtype if hasattr(x, \"dtype\") else np.dtype(type(x))\n return make_scalar(dtype)\n\n raise TypeError(f\"Can't handle meta of type '{typename(type(x))}'\")\n\n\ndef is_dataframe_like(df):\n return dask_is_dataframe_like(df)\n\n\ndef is_series_like(s):\n return dask_is_series_like(s)\n\n\ndef is_index_like(s):\n return dask_is_index_like(s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_works_under_error_test_profiler_works_under_error.assert_len_prof_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_works_under_error_test_profiler_works_under_error.assert_len_prof_results_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 53, "span_ids": ["test_profiler_works_under_error"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_profiler_works_under_error():\n div = lambda x, y: x / y\n dsk = {\"x\": (div, 1, 1), \"y\": (div, \"x\", 2), \"z\": (div, \"y\", 0)}\n\n with contextlib.suppress(ZeroDivisionError):\n with prof:\n get(dsk, \"z\")\n\n assert all(len(v) == 5 for v in prof.results)\n assert len(prof.results) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_two_gets_test_two_gets.assert_len_prof_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_two_gets_test_two_gets.assert_len_prof_results_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 56, "end_line": 72, "span_ids": ["test_two_gets"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_two_gets():\n with prof:\n get(dsk, \"e\")\n n = len(prof.results)\n\n dsk2 = {\"x\": (add, 1, 2), \"y\": (add, \"x\", \"x\")}\n\n with prof:\n get(dsk2, \"y\")\n m = len(prof.results)\n\n with prof:\n get(dsk, \"e\")\n get(dsk2, \"y\")\n get(dsk, \"e\")\n\n assert len(prof.results) == n + m + n", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer_Layer.get_output_keys._this_implementation_wil": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer_Layer.get_output_keys._this_implementation_wil", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 36, "end_line": 101, "span_ids": ["Layer", "Layer.is_materialized", "Layer.get_output_keys", "Layer.__init__"], "tokens": 484}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n \"\"\"High level graph layer\n\n This abstract class establish a protocol for high level graph layers.\n\n The main motivation of a layer is to represent a collection of tasks\n symbolically in order to speedup a series of operations significantly.\n Ideally, a layer should stay in this symbolic state until execution\n but in practice some operations will force the layer to generate all\n its internal tasks. We say that the layer has been materialized.\n\n Most of the default implementations in this class will materialize the\n layer. It is up to derived classes to implement non-materializing\n implementations.\n \"\"\"\n\n annotations: Mapping[str, Any] | None\n collection_annotations: Mapping[str, Any] | None\n\n def __init__(\n self,\n annotations: Mapping[str, Any] = None,\n collection_annotations: Mapping[str, Any] = None,\n ):\n \"\"\"Initialize Layer object.\n\n Parameters\n ----------\n annotations : Mapping[str, Any], optional\n By default, None.\n Annotations are metadata or soft constraints associated with tasks\n that dask schedulers may choose to respect:\n They signal intent without enforcing hard constraints.\n As such, they are primarily designed for use with the distributed\n scheduler. See the dask.annotate function for more information.\n collection_annotations : Mapping[str, Any], optional. By default, None.\n Experimental, intended to assist with visualizing the performance\n characteristics of Dask computations.\n These annotations are *not* passed to the distributed scheduler.\n \"\"\"\n self.annotations = annotations or copy.copy(config.get(\"annotations\", None))\n self.collection_annotations = collection_annotations or copy.copy(\n config.get(\"collection_annotations\", None)\n )\n\n @abc.abstractmethod\n def is_materialized(self) -> bool:\n \"\"\"Return whether the layer is materialized or not\"\"\"\n return True\n\n @abc.abstractmethod\n def get_output_keys(self) -> Set:\n \"\"\"Return a set of all output keys\n\n Output keys are all keys in the layer that might be referenced by\n other layers.\n\n Classes overriding this implementation should not cause the layer\n to be materialized.\n\n Returns\n -------\n keys: Set\n All output keys\n \"\"\"\n return self.keys() # this implementation will materialize the graph", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_MaterializedLayer_MaterializedLayer.get_output_keys.return.self_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_MaterializedLayer_MaterializedLayer.get_output_keys.return.self_keys_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 534, "end_line": 563, "span_ids": ["MaterializedLayer.is_materialized", "MaterializedLayer.__len__", "MaterializedLayer.__getitem__", "MaterializedLayer.get_output_keys", "MaterializedLayer.__contains__", "MaterializedLayer.__init__", "MaterializedLayer", "MaterializedLayer.__iter__"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MaterializedLayer(Layer):\n \"\"\"Fully materialized layer of `Layer`\n\n Parameters\n ----------\n mapping: Mapping\n The mapping between keys and tasks, typically a dask graph.\n \"\"\"\n\n def __init__(self, mapping: Mapping, annotations=None):\n super().__init__(annotations=annotations)\n self.mapping = mapping\n\n def __contains__(self, k):\n return k in self.mapping\n\n def __getitem__(self, k):\n return self.mapping[k]\n\n def __iter__(self):\n return iter(self.mapping)\n\n def __len__(self):\n return len(self.mapping)\n\n def is_materialized(self):\n return True\n\n def get_output_keys(self):\n return self.keys()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ArrayOverlapLayer_ArrayOverlapLayer.get_output_keys._FIXME_this_implementat": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ArrayOverlapLayer_ArrayOverlapLayer.get_output_keys._FIXME_this_implementat", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 106, "end_line": 163, "span_ids": ["ArrayOverlapLayer.__repr__", "ArrayOverlapLayer", "ArrayOverlapLayer.__getitem__", "ArrayOverlapLayer.__iter__", "ArrayOverlapLayer.__init__", "ArrayOverlapLayer.__len__", "ArrayOverlapLayer._dict", "ArrayOverlapLayer.is_materialized", "ArrayOverlapLayer.get_output_keys"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrayOverlapLayer(Layer):\n \"\"\"Simple HighLevelGraph array overlap layer.\n\n Lazily computed High-level graph layer for a array overlap operations.\n\n Parameters\n ----------\n name : str\n Name of new output overlap array.\n array : Dask array\n axes: Mapping\n Axes dictionary indicating overlap in each dimension,\n e.g. ``{'0': 1, '1': 1}``\n \"\"\"\n\n def __init__(\n self,\n name,\n axes,\n chunks,\n numblocks,\n token,\n ):\n super().__init__()\n self.name = name\n self.axes = axes\n self.chunks = chunks\n self.numblocks = numblocks\n self.token = token\n self._cached_keys = None\n\n def __repr__(self):\n return f\"ArrayOverlapLayer>> _expand_keys_around_center(('x', 2, 3), dims=[5, 5], name='y', axes={0: 1, 1: 1}) # noqa: E501 # doctest: +NORMALIZE_WHITESPACE\n [[('y', 1.1, 2.1), ('y', 1.1, 3), ('y', 1.1, 3.9)],\n [('y', 2, 2.1), ('y', 2, 3), ('y', 2, 3.9)],\n [('y', 2.9, 2.1), ('y', 2.9, 3), ('y', 2.9, 3.9)]]\n\n >>> _expand_keys_around_center(('x', 0, 4), dims=[5, 5], name='y', axes={0: 1, 1: 1}) # noqa: E501 # doctest: +NORMALIZE_WHITESPACE\n [[('y', 0, 3.1), ('y', 0, 4)],\n [('y', 0.9, 3.1), ('y', 0.9, 4)]]\n \"\"\"\n\n def inds(i, ind):\n rv = []\n if ind - 0.9 > 0:\n rv.append(ind - 0.9)\n rv.append(ind)\n if ind + 0.9 < dims[i] - 1:\n rv.append(ind + 0.9)\n return rv\n\n shape = []\n for i, ind in enumerate(k[1:]):\n num = 1\n if ind > 0:\n num += 1\n if ind < dims[i] - 1:\n num += 1\n shape.append(num)\n\n args = [\n inds(i, ind) if any((axes.get(i, 0),)) else [ind] for i, ind in enumerate(k[1:])\n ]\n if name is not None:\n args = [[name]] + args\n seq = list(product(*args))\n shape2 = [d if any((axes.get(i, 0),)) else 1 for i, d in enumerate(shape)]\n result = reshapelist(shape2, seq)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_reshapelist_fractional_slice.if_all_ind_slice_None_.else_.return._operator_getitem_rounde": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_reshapelist_fractional_slice.if_all_ind_slice_None_.else_.return._operator_getitem_rounde", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 285, "end_line": 335, "span_ids": ["reshapelist", "fractional_slice"], "tokens": 486}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reshapelist(shape, seq):\n \"\"\"Reshape iterator to nested shape\n\n >>> reshapelist((2, 3), range(6))\n [[0, 1, 2], [3, 4, 5]]\n \"\"\"\n if len(shape) == 1:\n return list(seq)\n else:\n n = int(len(seq) / shape[0])\n return [reshapelist(shape[1:], part) for part in toolz.partition(n, seq)]\n\n\ndef fractional_slice(task, axes):\n \"\"\"\n\n >>> fractional_slice(('x', 5.1), {0: 2})\n (, ('x', 5), (slice(-2, None, None),))\n\n >>> fractional_slice(('x', 3, 5.1), {0: 2, 1: 3})\n (, ('x', 3, 5), (slice(None, None, None), slice(-3, None, None)))\n\n >>> fractional_slice(('x', 2.9, 5.1), {0: 2, 1: 3})\n (, ('x', 3, 5), (slice(0, 2, None), slice(-3, None, None)))\n \"\"\"\n rounded = (task[0],) + tuple(int(round(i)) for i in task[1:])\n\n index = []\n for i, (t, r) in enumerate(zip(task[1:], rounded[1:])):\n depth = axes.get(i, 0)\n if isinstance(depth, tuple):\n left_depth = depth[0]\n right_depth = depth[1]\n else:\n left_depth = depth\n right_depth = depth\n\n if t == r:\n index.append(slice(None, None, None))\n elif t < r and right_depth:\n index.append(slice(0, right_depth))\n elif t > r and left_depth:\n index.append(slice(-left_depth, None))\n else:\n index.append(slice(0, 0))\n index = tuple(index)\n\n if all(ind == slice(None, None, None) for ind in index):\n return task\n else:\n return (operator.getitem, rounded, index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer._keys_to_parts_SimpleShuffleLayer._cull_dependencies.return.deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer._keys_to_parts_SimpleShuffleLayer._cull_dependencies.return.deps", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 455, "end_line": 481, "span_ids": ["SimpleShuffleLayer._cull_dependencies", "SimpleShuffleLayer._keys_to_parts"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n\n def _keys_to_parts(self, keys):\n \"\"\"Simple utility to convert keys to partition indices.\"\"\"\n parts = set()\n for key in keys:\n try:\n _name, _part = key\n except ValueError:\n continue\n if _name != self.name:\n continue\n parts.add(_part)\n return parts\n\n def _cull_dependencies(self, keys, parts_out=None):\n \"\"\"Determine the necessary dependencies to produce `keys`.\n\n For a simple shuffle, output partitions always depend on\n all input partitions. This method does not require graph\n materialization.\n \"\"\"\n deps = defaultdict(set)\n parts_out = parts_out or self._keys_to_parts(keys)\n for part in parts_out:\n deps[(self.name, part)] |= {\n (self.name_input, i) for i in range(self.npartitions_input)\n }\n return deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer._cull_SimpleShuffleLayer.cull.if_parts_out_set_self_.else_.return.self_culled_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_SimpleShuffleLayer._cull_SimpleShuffleLayer.cull.if_parts_out_set_self_.else_.return.self_culled_deps", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 483, "end_line": 509, "span_ids": ["SimpleShuffleLayer._cull", "SimpleShuffleLayer.cull"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleShuffleLayer(Layer):\n\n def _cull(self, parts_out):\n return SimpleShuffleLayer(\n self.name,\n self.column,\n self.npartitions,\n self.npartitions_input,\n self.ignore_index,\n self.name_input,\n self.meta_input,\n parts_out=parts_out,\n )\n\n def cull(self, keys, all_keys):\n \"\"\"Cull a SimpleShuffleLayer HighLevelGraph layer.\n\n The underlying graph will only include the necessary\n tasks to produce the keys (indicies) included in `parts_out`.\n Therefore, \"culling\" the layer only requires us to reset this\n parameter.\n \"\"\"\n parts_out = self._keys_to_parts(keys)\n culled_deps = self._cull_dependencies(keys, parts_out=parts_out)\n if parts_out != set(self.parts_out):\n culled_layer = self._cull(parts_out)\n return culled_layer, culled_deps\n else:\n return self, culled_deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_itertools_sizeof_array.return.o_itemsize_len_o_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_itertools_sizeof_array.return.o_itemsize_len_o_", "embedding": null, "metadata": {"file_path": "dask/sizeof.py", "file_name": "sizeof.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 38, "span_ids": ["sizeof_bytes", "imports", "sizeof_default", "sizeof_memoryview", "sizeof_array"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import itertools\nimport random\nimport sys\nfrom array import array\n\nfrom .utils import Dispatch\n\ntry: # PyPy does not support sys.getsizeof\n sys.getsizeof(1)\n getsizeof = sys.getsizeof\nexcept (AttributeError, TypeError): # Monkey patch\n\n def getsizeof(x): # type: ignore\n return 100\n\n\nsizeof = Dispatch(name=\"sizeof\")\n\n\n@sizeof.register(object)\ndef sizeof_default(o):\n return getsizeof(o)\n\n\n@sizeof.register(bytes)\n@sizeof.register(bytearray)\ndef sizeof_bytes(o):\n return len(o)\n\n\n@sizeof.register(memoryview)\ndef sizeof_memoryview(o):\n return o.nbytes\n\n\n@sizeof.register(array)\ndef sizeof_array(o):\n return o.itemsize * len(o)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_sizeof_python_collection_sizeof_python_collection.if_num_items_num_sample.else_.return.getsizeof_seq_sum_map_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_sizeof_python_collection_sizeof_python_collection.if_num_items_num_sample.else_.return.getsizeof_seq_sum_map_", "embedding": null, "metadata": {"file_path": "dask/sizeof.py", "file_name": "sizeof.py", "file_type": "text/x-python", "category": "implementation", "start_line": 43, "end_line": 60, "span_ids": ["sizeof_python_collection"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@sizeof.register(list)\n@sizeof.register(tuple)\n@sizeof.register(set)\n@sizeof.register(frozenset)\ndef sizeof_python_collection(seq):\n num_items = len(seq)\n num_samples = 10\n if num_items > num_samples:\n if isinstance(seq, (set, frozenset)):\n # As of Python v3.9, it is deprecated to call random.sample() on\n # sets but since sets are unordered anyways we can simply pick\n # the first `num_samples` items.\n samples = itertools.islice(seq, num_samples)\n else:\n samples = random.sample(seq, num_samples)\n return getsizeof(seq) + int(num_items / num_samples * sum(map(sizeof, samples)))\n else:\n return getsizeof(seq) + sum(map(sizeof, seq))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_SimpleSizeof_register_numpy.sizeof_numpy_ndarray.return.int_x_nbytes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/sizeof.py_SimpleSizeof_register_numpy.sizeof_numpy_ndarray.return.int_x_nbytes_", "embedding": null, "metadata": {"file_path": "dask/sizeof.py", "file_name": "sizeof.py", "file_type": "text/x-python", "category": "implementation", "start_line": 63, "end_line": 135, "span_ids": ["sizeof_blocked", "register_numba", "register_rmm", "SimpleSizeof", "register_numpy", "sizeof_python_dict", "register_cupy"], "tokens": 403}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SimpleSizeof:\n \"\"\"Sentinel class to mark a class to be skipped by the dispatcher. This only\n works if this sentinel mixin is first in the mro.\n\n Examples\n --------\n\n >>> class TheAnswer(SimpleSizeof):\n ... def __sizeof__(self):\n ... # Sizeof always add overhead of an object for GC\n ... return 42 - sizeof(object())\n\n >>> sizeof(TheAnswer())\n 42\n\n \"\"\"\n\n\n@sizeof.register(SimpleSizeof)\ndef sizeof_blocked(d):\n return getsizeof(d)\n\n\n@sizeof.register(dict)\ndef sizeof_python_dict(d):\n return (\n getsizeof(d)\n + sizeof(list(d.keys()))\n + sizeof(list(d.values()))\n - 2 * sizeof(list())\n )\n\n\n@sizeof.register_lazy(\"cupy\")\ndef register_cupy():\n import cupy\n\n @sizeof.register(cupy.ndarray)\n def sizeof_cupy_ndarray(x):\n return int(x.nbytes)\n\n\n@sizeof.register_lazy(\"numba\")\ndef register_numba():\n import numba.cuda\n\n @sizeof.register(numba.cuda.cudadrv.devicearray.DeviceNDArray)\n def sizeof_numba_devicendarray(x):\n return int(x.nbytes)\n\n\n@sizeof.register_lazy(\"rmm\")\ndef register_rmm():\n import rmm\n\n # Only included in 0.11.0+\n if hasattr(rmm, \"DeviceBuffer\"):\n\n @sizeof.register(rmm.DeviceBuffer)\n def sizeof_rmm_devicebuffer(x):\n return int(x.nbytes)\n\n\n@sizeof.register_lazy(\"numpy\")\ndef register_numpy():\n import numpy as np\n\n @sizeof.register(np.ndarray)\n def sizeof_numpy_ndarray(x):\n if 0 in x.strides:\n xs = x[tuple(slice(None) if s != 0 else slice(1) for s in x.strides)]\n return xs.nbytes\n return int(x.nbytes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_test_visualize.with_tmpdir_as_d_.x_visualize_filename_None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_test_visualize.with_tmpdir_as_d_.x_visualize_filename_None", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1005, "end_line": 1037, "span_ids": ["test_visualize"], "tokens": 355}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da\")\n@pytest.mark.skipif(\n sys.flags.optimize, reason=\"graphviz exception with Python -OO flag\"\n)\n@pytest.mark.xfail(\n sys.platform == \"win32\",\n reason=\"graphviz/pango on conda-forge currently broken for windows\",\n strict=False,\n)\ndef test_visualize():\n pytest.importorskip(\"graphviz\")\n with tmpdir() as d:\n x = da.arange(5, chunks=2)\n x.visualize(filename=os.path.join(d, \"mydask\"))\n assert os.path.exists(os.path.join(d, \"mydask.png\"))\n\n x.visualize(filename=os.path.join(d, \"mydask.pdf\"))\n assert os.path.exists(os.path.join(d, \"mydask.pdf\"))\n\n visualize(x, 1, 2, filename=os.path.join(d, \"mydask.png\"))\n assert os.path.exists(os.path.join(d, \"mydask.png\"))\n\n dsk = {\"a\": 1, \"b\": (add, \"a\", 2), \"c\": (mul, \"a\", 1)}\n visualize(x, dsk, filename=os.path.join(d, \"mydask.png\"))\n assert os.path.exists(os.path.join(d, \"mydask.png\"))\n\n x = Tuple(dsk, [\"a\", \"b\", \"c\"])\n visualize(x, filename=os.path.join(d, \"mydask.png\"))\n assert os.path.exists(os.path.join(d, \"mydask.png\"))\n\n # To see if visualize() works when the filename parameter is set to None\n # If the function raises an error, the test will fail\n x.visualize(filename=None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_order_test_visualize_order.with_tmpfile_extension_d.assert_color_in_text": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_order_test_visualize_order.with_tmpfile_extension_d.assert_color_in_text", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 999, "end_line": 1011, "span_ids": ["test_visualize_order"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da\")\n@pytest.mark.skipif(\n sys.flags.optimize, reason=\"graphviz exception with Python -OO flag\"\n)\ndef test_visualize_order():\n pytest.importorskip(\"graphviz\")\n pytest.importorskip(\"matplotlib.pyplot\")\n x = da.arange(5, chunks=2)\n with tmpfile(extension=\"dot\") as fn:\n x.visualize(color=\"order\", filename=fn, cmap=\"RdBu\")\n with open(fn) as f:\n text = f.read()\n assert 'color=\"#' in text", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_fusion_after_compute_test_blockwise_fusion_after_compute.assert_df_x_result_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_fusion_after_compute_test_blockwise_fusion_after_compute.assert_df_x_result_comput", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 360, "end_line": 376, "span_ids": ["test_blockwise_fusion_after_compute"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_fusion_after_compute(c):\n # See: https://github.com/dask/dask/issues/7720\n\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n # Simple sequence of Dask-Dataframe manipulations\n df = pd.DataFrame({\"x\": [1, 2, 3] * 5})\n series = dd.from_pandas(df, npartitions=2)[\"x\"]\n result = series < 3\n\n # Trigger an optimization of the `series` graph\n # (which `result` depends on), then compute `result`.\n # This is essentially a test of `rewrite_blockwise`.\n series_len = len(series)\n assert series_len == 15\n assert df.x[result.compute()].sum() == 15", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_different_optimization_test_blockwise_different_optimization.np_testing_assert_equal_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_different_optimization_test_blockwise_different_optimization.np_testing_assert_equal_y", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 411, "end_line": 430, "span_ids": ["test_blockwise_different_optimization"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_different_optimization(c):\n # Regression test for incorrect results due to SubgraphCallable.__eq__\n # not correctly handling subgraphs with the same outputs and arity but\n # different internals (GH-7632). The bug is triggered by distributed\n # because it uses a function cache.\n da = pytest.importorskip(\"dask.array\")\n np = pytest.importorskip(\"numpy\")\n\n u = da.from_array(np.arange(3))\n v = da.from_array(np.array([10 + 2j, 7 - 3j, 8 + 1j]))\n cv = v.conj()\n x = u * cv\n (cv,) = dask.optimize(cv)\n y = u * cv\n expected = np.array([0 + 0j, 7 + 3j, 16 - 2j])\n with dask.config.set({\"optimization.fuse.active\": False}):\n x_value = x.compute()\n y_value = y.compute()\n np.testing.assert_equal(x_value, expected)\n np.testing.assert_equal(y_value, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_shuffle_priority_test_shuffle_priority.assert_late_split_early": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_shuffle_priority_test_shuffle_priority.assert_late_split_early", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 534, "end_line": 566, "span_ids": ["test_shuffle_priority"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.flaky(reruns=5, reruns_delay=5)\n@gen_cluster(client=True)\nasync def test_shuffle_priority(c, s, a, b):\n pd = pytest.importorskip(\"pandas\")\n np = pytest.importorskip(\"numpy\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n # Test marked as \"flaky\" since the scheduling behavior\n # is not deterministic. Note that the test is still\n # very likely to fail every time if the \"split\" tasks\n # are not prioritized correctly\n\n df = pd.DataFrame({\"a\": range(1000)})\n ddf = dd.from_pandas(df, npartitions=10)\n ddf2 = ddf.shuffle(\"a\", shuffle=\"tasks\", max_branch=32)\n await c.compute(ddf2)\n\n # Parse transition log for processing tasks\n log = [\n eval(l[0])[0]\n for l in s.transition_log\n if l[1] == \"processing\" and \"simple-shuffle-\" in l[0]\n ]\n\n # Make sure most \"split\" tasks are processing before\n # any \"combine\" tasks begin\n late_split = np.quantile(\n [i for i, st in enumerate(log) if st.startswith(\"split\")], 0.75\n )\n early_combine = np.quantile(\n [i for i, st in enumerate(log) if st.startswith(\"simple\")], 0.25\n )\n assert late_split < early_combine", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_map_partitions_da_input_test_map_partitions_da_input.await_c_compute_df_map_pa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_map_partitions_da_input_test_map_partitions_da_input.await_c_compute_df_map_pa", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 569, "end_line": 584, "span_ids": ["test_map_partitions_da_input"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_map_partitions_da_input(c, s, a, b):\n \"\"\"Check that map_partitions can handle a dask array input\"\"\"\n np = pytest.importorskip(\"numpy\")\n pd = pytest.importorskip(\"pandas\")\n da = pytest.importorskip(\"dask.array\")\n datasets = pytest.importorskip(\"dask.datasets\")\n\n def f(d, a):\n assert isinstance(d, pd.DataFrame)\n assert isinstance(a, np.ndarray)\n return d\n\n df = datasets.timeseries(freq=\"1d\").persist()\n arr = da.ones((1,), chunks=1).persist()\n await c.compute(df.map_partitions(f, arr, meta=df._meta))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_map_partitions_df_input_test_map_partitions_df_input.with_distributed_LocalClu.with_distributed_Client_c.main_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_map_partitions_df_input_test_map_partitions_df_input.with_distributed_LocalClu.with_distributed_Client_c.main_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 592, "end_line": 626, "span_ids": ["test_map_partitions_df_input"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_partitions_df_input():\n \"\"\"\n Check that map_partitions can handle a delayed\n partition of a dataframe input\n \"\"\"\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n def f(d, a):\n assert isinstance(d, pd.DataFrame)\n assert isinstance(a, pd.DataFrame)\n return d\n\n def main():\n item_df = dd.from_pandas(pd.DataFrame({\"a\": range(10)}), npartitions=1)\n ddf = item_df.to_delayed()[0].persist()\n merged_df = dd.from_pandas(pd.DataFrame({\"b\": range(10)}), npartitions=1)\n\n # Notice, we include a shuffle in order to trigger a complex culling\n merged_df = merged_df.shuffle(on=\"b\")\n\n merged_df.map_partitions(\n f, ddf, meta=merged_df, enforce_metadata=False\n ).compute()\n\n with distributed.LocalCluster(\n scheduler_port=0,\n dashboard_address=\":0\",\n asynchronous=False,\n n_workers=1,\n nthreads=1,\n processes=False,\n ) as cluster:\n with distributed.Client(cluster, asynchronous=False):\n main()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_repr_html_hlg_layers_test_repr_html_hlg_layers.for_layer_in_hg_layers_va.assert_xml_etree_ElementT": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_repr_html_hlg_layers_test_repr_html_hlg_layers.for_layer_in_hg_layers_va.assert_xml_etree_ElementT", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 145, "end_line": 153, "span_ids": ["test_repr_html_hlg_layers"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_repr_html_hlg_layers():\n pytest.importorskip(\"jinja2\")\n hg = HighLevelGraph(\n {\"a\": {\"a\": 1, (\"a\", 0): 2, \"b\": 3}, \"b\": {\"c\": 4}},\n {\"a\": set(), \"b\": set()},\n )\n assert xml.etree.ElementTree.fromstring(hg._repr_html_()) is not None\n for layer in hg.layers.values():\n assert xml.etree.ElementTree.fromstring(layer._repr_html_()) is not None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__dataframe_broadcast_join__array_map_overlap.return.array_map_overlap_lambda_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__dataframe_broadcast_join__array_map_overlap.return.array_map_overlap_lambda_", "embedding": null, "metadata": {"file_path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 65, "span_ids": ["_array_creation", "_array_map_overlap", "_dataframe_broadcast_join"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _dataframe_broadcast_join(tmpdir):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n # Perform a computation using an HLG-based broadcast join\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10, 20)})\n ddf1 = dd.from_pandas(df, npartitions=4)\n ddf2 = dd.from_pandas(df, npartitions=1)\n return ddf1.merge(ddf2, how=\"left\", broadcast=True, shuffle=\"tasks\")\n\n\ndef _array_creation(tmpdir):\n da = pytest.importorskip(\"dask.array\")\n\n # Perform a computation using HLG-based array creation\n return da.ones((100,)) + da.zeros((100,))\n\n\ndef _array_map_overlap(tmpdir):\n da = pytest.importorskip(\"dask.array\")\n array = da.ones((100,))\n return array.map_overlap(lambda x: x, depth=1, boundary=\"none\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_fractional_slice_test_fractional_slice.assert_isinstance_fs_1_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_fractional_slice_test_fractional_slice.assert_isinstance_fs_1_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 92, "span_ids": ["test_fractional_slice"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fractional_slice():\n assert fractional_slice((\"x\", 4.9), {0: 2}) == (getitem, (\"x\", 5), (slice(0, 2),))\n\n assert fractional_slice((\"x\", 3, 5.1), {0: 2, 1: 3}) == (\n getitem,\n (\"x\", 3, 5),\n (slice(None, None, None), slice(-3, None)),\n )\n\n assert fractional_slice((\"x\", 2.9, 5.1), {0: 2, 1: 3}) == (\n getitem,\n (\"x\", 3, 5),\n (slice(0, 2), slice(-3, None)),\n )\n\n fs = fractional_slice((\"x\", 4.9), {0: 2})\n assert isinstance(fs[1][1], int)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_itertools_double.return.x_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_itertools_double.return.x_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 45, "span_ids": ["imports", "double", "_subgraph_callables_eq", "compare_subgraph_callables"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import itertools\nimport pickle\nfrom functools import partial\n\nimport pytest\n\nimport dask\nfrom dask.base import tokenize\nfrom dask.core import get_dependencies\nfrom dask.local import get_sync\nfrom dask.optimization import (\n SubgraphCallable,\n cull,\n functions_of,\n fuse,\n fuse_linear,\n inline,\n inline_functions,\n)\nfrom dask.utils import apply, partial_by_order\nfrom dask.utils_test import add, inc\n\n\ndef _subgraph_callables_eq(self, other):\n return (\n type(self) is type(other)\n and self.outkey == other.outkey\n and set(self.inkeys) == set(other.inkeys)\n and tokenize(self.dsk) == tokenize(other.dsk)\n )\n\n\n@pytest.fixture\ndef compare_subgraph_callables(monkeypatch):\n \"\"\"Ignore name when comparing instances of :class:`SubgraphCallable`.\n\n They have UUID-generated names which prevents instances generated by the\n test from comparing equal to reference values. Instead, compare the\n embedded graph using ``tokenize``.\n \"\"\"\n monkeypatch.setattr(SubgraphCallable, \"__eq__\", _subgraph_callables_eq)\n\n\ndef double(x):\n return x * 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_cull_with_deps.return.dsk_k_get_dependencies": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_cull_with_deps.return.dsk_k_get_dependencies", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 48, "end_line": 72, "span_ids": ["with_deps", "fuse2", "test_cull"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cull():\n # 'out' depends on 'x' and 'y', but not 'z'\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (inc, \"x\"), \"out\": (add, \"y\", 10)}\n culled, dependencies = cull(d, \"out\")\n assert culled == {\"x\": 1, \"y\": (inc, \"x\"), \"out\": (add, \"y\", 10)}\n assert dependencies == {\"x\": [], \"y\": [\"x\"], \"out\": [\"y\"]}\n\n assert cull(d, \"out\") == cull(d, [\"out\"])\n assert cull(d, [\"out\", \"z\"])[0] == d\n assert cull(d, [[\"out\"], [\"z\"]]) == cull(d, [\"out\", \"z\"])\n pytest.raises(KeyError, lambda: cull(d, \"badkey\"))\n\n\ndef fuse2(*args, **kwargs):\n \"\"\"Run both ``fuse`` and ``fuse_linear`` and compare results\"\"\"\n rv1 = fuse_linear(*args, **kwargs)\n if kwargs.get(\"rename_keys\") is not False:\n return rv1\n rv2 = fuse(*args, **kwargs)\n assert rv1 == rv2\n return rv1\n\n\ndef with_deps(dsk):\n return dsk, {k: get_dependencies(dsk, k) for k in dsk}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_SubgraphCallable_eq_test_SubgraphCallable_eq.assert_unnamed1_unname": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_optimization.py_test_SubgraphCallable_eq_test_SubgraphCallable_eq.assert_unnamed1_unname", "embedding": null, "metadata": {"file_path": "dask/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 1182, "end_line": 1206, "span_ids": ["test_SubgraphCallable_eq"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_SubgraphCallable_eq():\n dsk1 = {\"a\": 1, \"b\": 2, \"c\": (add, \"d\", \"e\")}\n dsk2 = {\"a\": (inc, 0), \"b\": (inc, \"a\"), \"c\": (add, \"d\", \"e\")}\n f1 = SubgraphCallable(dsk1, \"c\", [\"d\", \"e\"])\n f2 = SubgraphCallable(dsk2, \"c\", [\"d\", \"e\"])\n # Different graphs must compare unequal (when no name given)\n assert f1 != f2\n\n # Different inputs must compare unequal\n f3 = SubgraphCallable(dsk2, \"c\", [\"d\", \"f\"], name=f1.name)\n assert f3 != f1\n\n # Different outputs must compare unequal\n f4 = SubgraphCallable(dsk2, \"a\", [\"d\", \"e\"], name=f1.name)\n assert f4 != f1\n\n # Reordering the inputs must not prevent equality\n f5 = SubgraphCallable(dsk1, \"c\", [\"e\", \"d\"], name=f1.name)\n assert f1 == f5\n assert hash(f1) == hash(f5)\n\n # Explicitly named graphs with different names must be unequal\n unnamed1 = SubgraphCallable(dsk1, \"c\", [\"d\", \"e\"], name=\"first\")\n unnamed2 = SubgraphCallable(dsk1, \"c\", [\"d\", \"e\"], name=\"second\")\n assert unnamed1 != unnamed2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_stringify_collection_keys_test_format_bytes.assert_format_bytes_int_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_stringify_collection_keys_test_format_bytes.assert_format_bytes_int_n", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 673, "end_line": 702, "span_ids": ["test_format_bytes", "test_stringify_collection_keys"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_stringify_collection_keys():\n obj = \"Hello\"\n assert stringify_collection_keys(obj) is obj\n\n obj = [(\"a\", 0), (b\"a\", 0), (1, 1)]\n res = stringify_collection_keys(obj)\n assert res[0] == str(obj[0])\n assert res[1] == str(obj[1])\n assert res[2] == obj[2]\n\n\n@pytest.mark.parametrize(\n \"n,expect\",\n [\n (0, \"0 B\"),\n (920, \"920 B\"),\n (930, \"0.91 kiB\"),\n (921.23 * 2**10, \"921.23 kiB\"),\n (931.23 * 2**10, \"0.91 MiB\"),\n (921.23 * 2**20, \"921.23 MiB\"),\n (931.23 * 2**20, \"0.91 GiB\"),\n (921.23 * 2**30, \"921.23 GiB\"),\n (931.23 * 2**30, \"0.91 TiB\"),\n (921.23 * 2**40, \"921.23 TiB\"),\n (931.23 * 2**40, \"0.91 PiB\"),\n (2**60, \"1024.00 PiB\"),\n ],\n)\ndef test_format_bytes(n, expect):\n assert format_bytes(int(n)) == expect", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_deprecated_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_deprecated_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 705, "end_line": 794, "span_ids": ["test_deprecated_version", "test_typename_on_instances", "test_cached_cumsum", "test_cached_cumsum_nan", "test_deprecated", "test_deprecated_message", "test_deprecated_category", "test_cached_cumsum_non_tuple", "MyType", "test_typename", "test_deprecated_after_version"], "tokens": 599}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deprecated():\n @_deprecated()\n def foo():\n return \"bar\"\n\n with pytest.warns(FutureWarning) as record:\n assert foo() == \"bar\"\n\n assert len(record) == 1\n msg = str(record[0].message)\n assert \"foo is deprecated\" in msg\n assert \"removed in a future release\" in msg\n\n\ndef test_deprecated_version():\n @_deprecated(version=\"1.2.3\")\n def foo():\n return \"bar\"\n\n with pytest.warns(FutureWarning, match=\"deprecated in version 1.2.3\"):\n assert foo() == \"bar\"\n\n\ndef test_deprecated_after_version():\n @_deprecated(after_version=\"1.2.3\")\n def foo():\n return \"bar\"\n\n with pytest.warns(FutureWarning, match=\"deprecated after version 1.2.3\"):\n assert foo() == \"bar\"\n\n\ndef test_deprecated_category():\n @_deprecated(category=DeprecationWarning)\n def foo():\n return \"bar\"\n\n with pytest.warns(DeprecationWarning):\n assert foo() == \"bar\"\n\n\ndef test_deprecated_message():\n @_deprecated(message=\"woohoo\")\n def foo():\n return \"bar\"\n\n with pytest.warns(FutureWarning) as record:\n assert foo() == \"bar\"\n\n assert len(record) == 1\n assert str(record[0].message) == \"woohoo\"\n\n\ndef test_typename():\n assert typename(HighLevelGraph) == \"dask.highlevelgraph.HighLevelGraph\"\n assert typename(HighLevelGraph, short=True) == \"dask.HighLevelGraph\"\n\n\nclass MyType:\n pass\n\n\ndef test_typename_on_instances():\n instance = MyType()\n assert typename(instance) == typename(MyType)\n\n\ndef test_cached_cumsum():\n a = (1, 2, 3, 4)\n x = cached_cumsum(a)\n y = cached_cumsum(a, initial_zero=True)\n assert x == (1, 3, 6, 10)\n assert y == (0, 1, 3, 6, 10)\n\n\ndef test_cached_cumsum_nan():\n np = pytest.importorskip(\"numpy\")\n a = (1, np.nan, 3)\n x = cached_cumsum(a)\n y = cached_cumsum(a, initial_zero=True)\n np.testing.assert_equal(x, (1, np.nan, np.nan))\n np.testing.assert_equal(y, (0, 1, np.nan, np.nan))\n\n\ndef test_cached_cumsum_non_tuple():\n a = [1, 2, 3]\n assert cached_cumsum(a) == (1, 3, 6)\n a[1] = 4\n assert cached_cumsum(a) == (1, 5, 8)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__deprecated__deprecated._Decorator_to_mark_a_fu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__deprecated__deprecated._Decorator_to_mark_a_fu", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 44, "end_line": 79, "span_ids": ["_deprecated"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _deprecated(\n *,\n version: str | None = None,\n after_version: str | None = None,\n message: str | None = None,\n use_instead: str | None = None,\n category: type[Warning] = FutureWarning,\n):\n \"\"\"Decorator to mark a function as deprecated\n\n Parameters\n ----------\n version : str, optional\n Version of Dask in which the function was deprecated. If specified, the version\n will be included in the default warning message. This should no longer be used\n after the introduction of automated versioning system.\n after_version : str, optional\n Version of Dask after which the function was deprecated. If specified, the\n version will be included in the default warning message.\n message : str, optional\n Custom warning message to raise.\n use_instead : str, optional\n Name of function to use in place of the deprecated function.\n If specified, this will be included in the default warning\n message.\n category : type[Warning], optional\n Type of warning to raise. Defaults to ``FutureWarning``.\n\n Examples\n --------\n\n >>> from dask.utils import _deprecated\n >>> @_deprecated(after_version=\"X.Y.Z\", use_instead=\"bar\")\n ... def foo():\n ... return \"baz\"\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__deprecated.decorator__deprecated.return.decorator": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py__deprecated.decorator__deprecated.return.decorator", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 81, "end_line": 104, "span_ids": ["_deprecated"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _deprecated(\n *,\n version: str | None = None,\n after_version: str | None = None,\n message: str | None = None,\n use_instead: str | None = None,\n category: type[Warning] = FutureWarning,\n):\n\n def decorator(func):\n if message is None:\n msg = f\"{func.__name__} \"\n if after_version is not None:\n msg += f\"was deprecated after version {after_version} \"\n elif version is not None:\n msg += f\"was deprecated in version {version} \"\n else:\n msg += \"is deprecated \"\n msg += \"and will be removed in a future release.\"\n\n if use_instead is not None:\n msg += f\" Please use {use_instead} instead.\"\n else:\n msg = message\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n warnings.warn(msg, category=category, stacklevel=2)\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_np_percentile.return._percentile_a_q_method_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/backends.py_np_percentile.return._percentile_a_q_method_", "embedding": null, "metadata": {"file_path": "dask/array/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 26, "span_ids": ["imports", "percentile"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\n\nfrom .dispatch import (\n concatenate_lookup,\n divide_lookup,\n einsum_lookup,\n empty_lookup,\n percentile_lookup,\n tensordot_lookup,\n)\nfrom .numpy_compat import divide as np_divide\nfrom .numpy_compat import ma_divide\nfrom .percentile import _percentile\n\nconcatenate_lookup.register((object, np.ndarray), np.concatenate)\ntensordot_lookup.register((object, np.ndarray), np.tensordot)\neinsum_lookup.register((object, np.ndarray), np.einsum)\nempty_lookup.register((object, np.ndarray), np.empty)\nempty_lookup.register(np.ma.masked_array, np.ma.empty)\ndivide_lookup.register((object, np.ndarray), np_divide)\ndivide_lookup.register(np.ma.masked_array, ma_divide)\n\n\n@percentile_lookup.register(np.ndarray)\ndef percentile(a, q, method=\"linear\"):\n return _percentile(a, q, method)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_aggregate_slice_with_int_dask_array_aggregate.return.chunk_outputs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_slice_with_int_dask_array_aggregate_slice_with_int_dask_array_aggregate.return.chunk_outputs_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 345, "end_line": 398, "span_ids": ["slice_with_int_dask_array_aggregate"], "tokens": 492}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def slice_with_int_dask_array_aggregate(idx, chunk_outputs, x_chunks, axis):\n \"\"\"Final aggregation function of `slice_with_int_dask_array_on_axis`.\n Aggregate all chunks of x by one chunk of idx, reordering the output of\n `slice_with_int_dask_array`.\n\n Note that there is no combine function, as a recursive aggregation (e.g.\n with split_every) would not give any benefit.\n\n Parameters\n ----------\n idx: ndarray, ndim=1, dtype=any integer\n j-th chunk of idx\n chunk_outputs: ndarray\n concatenation along axis of the outputs of `slice_with_int_dask_array`\n for all chunks of x and the j-th chunk of idx\n x_chunks: tuple\n dask chunks of the x da.Array along axis, e.g. ``(3, 3, 2)``\n axis: int\n normalized axis to take elements from (0 <= axis < x.ndim)\n\n Returns\n -------\n Selection from all chunks of x for the j-th chunk of idx, in the correct\n order\n \"\"\"\n # Needed when idx is unsigned\n idx = idx.astype(np.int64)\n\n # Normalize negative indices\n idx = np.where(idx < 0, idx + sum(x_chunks), idx)\n\n x_chunk_offset = 0\n chunk_output_offset = 0\n\n # Assemble the final index that picks from the output of the previous\n # kernel by adding together one layer per chunk of x\n # FIXME: this could probably be reimplemented with a faster search-based\n # algorithm\n idx_final = np.zeros_like(idx)\n for x_chunk in x_chunks:\n idx_filter = (idx >= x_chunk_offset) & (idx < x_chunk_offset + x_chunk)\n idx_cum = np.cumsum(idx_filter)\n idx_final += np.where(idx_filter, idx_cum - 1 + chunk_output_offset, 0)\n x_chunk_offset += x_chunk\n if idx_cum.size > 0:\n chunk_output_offset += idx_cum[-1]\n\n # np.take does not support slice indices\n # return np.take(chunk_outputs, idx_final, axis)\n return chunk_outputs[\n tuple(\n idx_final if i == axis else slice(None) for i in range(chunk_outputs.ndim)\n )\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_getitem_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_getitem_", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 401, "end_line": 429, "span_ids": ["getitem"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getitem(obj, index):\n \"\"\"Getitem function\n\n This function creates a copy of the desired selection for array-like\n inputs when the selection is smaller than half of the original array. This\n avoids excess memory usage when extracting a small portion from a large array.\n For more information, see\n https://numpy.org/doc/stable/reference/arrays.indexing.html#basic-slicing-and-indexing.\n\n Parameters\n ----------\n obj: ndarray, string, tuple, list\n Object to get item from.\n index: int, list[int], slice()\n Desired selection to extract from obj.\n\n Returns\n -------\n Selection obj[index]\n\n \"\"\"\n result = obj[index]\n try:\n if not result.flags.owndata and obj.size >= 2 * result.size:\n result = result.copy()\n except AttributeError:\n pass\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from___future___import_an_PerformanceWarning._A_warning_given_when_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_from___future___import_an_PerformanceWarning._A_warning_given_when_b", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 97, "span_ids": ["imports", "PerformanceWarning"], "tokens": 607}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport contextlib\nimport math\nimport operator\nimport os\nimport pickle\nimport re\nimport sys\nimport traceback\nimport uuid\nimport warnings\nfrom bisect import bisect\nfrom collections.abc import (\n Collection,\n Hashable,\n Iterable,\n Iterator,\n Mapping,\n MutableMapping,\n Sequence,\n)\nfrom functools import partial, reduce, wraps\nfrom itertools import product, zip_longest\nfrom numbers import Integral, Number\nfrom operator import add, mul\nfrom threading import Lock\nfrom typing import Any\n\nimport numpy as np\nfrom fsspec import get_mapper\nfrom tlz import accumulate, concat, first, frequencies, groupby, partition\nfrom tlz.curried import pluck\n\nfrom .. import compute, config, core, threaded\nfrom ..base import (\n DaskMethodsMixin,\n compute_as_if_collection,\n dont_optimize,\n is_dask_collection,\n persist,\n tokenize,\n)\nfrom ..blockwise import blockwise as core_blockwise\nfrom ..blockwise import broadcast_dimensions\nfrom ..context import globalmethod\nfrom ..core import quote\nfrom ..delayed import Delayed, delayed\nfrom ..highlevelgraph import HighLevelGraph, MaterializedLayer\nfrom ..layers import ArraySliceDep, reshapelist\nfrom ..sizeof import sizeof\nfrom ..utils import (\n IndexCallable,\n M,\n SerializableLock,\n cached_cumsum,\n cached_property,\n concrete,\n derived_from,\n factors,\n format_bytes,\n funcname,\n has_keyword,\n is_arraylike,\n is_dataframe_like,\n is_index_like,\n is_integer,\n is_series_like,\n ndeepmap,\n ndimlist,\n parse_bytes,\n typename,\n)\nfrom ..widgets import get_template\nfrom . import chunk\nfrom .chunk import getitem\nfrom .chunk_types import is_valid_array_chunk, is_valid_chunk_type\n\n# Keep einsum_lookup and tensordot_lookup here for backwards compatibility\nfrom .dispatch import concatenate_lookup, einsum_lookup, tensordot_lookup # noqa: F401\nfrom .numpy_compat import _numpy_120, _Recurser\nfrom .slicing import replace_ellipsis, setitem_array, slice_array\n\nconfig.update_defaults({\"array\": {\"chunk-size\": \"128MiB\", \"rechunk-threshold\": 4}})\n\nunknown_chunk_message = (\n \"\\n\\n\"\n \"A possible solution: \"\n \"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\\n\"\n \"Summary: to compute chunks sizes, use\\n\\n\"\n \" x.compute_chunk_sizes() # for Dask Array `x`\\n\"\n \" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`\"\n)\n\n\nclass PerformanceWarning(Warning):\n \"\"\"A warning given when bad chunking may cause poor performance\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__concatenate2__concatenate2.if_isinstance_arrays_0_.else_.return.concatenate_arrays_axis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__concatenate2__concatenate2.if_isinstance_arrays_0_.else_.return.concatenate_arrays_axis_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 299, "end_line": 359, "span_ids": ["_concatenate2"], "tokens": 583}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _concatenate2(arrays, axes=[]):\n \"\"\"Recursively concatenate nested lists of arrays along axes\n\n Each entry in axes corresponds to each level of the nested list. The\n length of axes should correspond to the level of nesting of arrays.\n If axes is an empty list or tuple, return arrays, or arrays[0] if\n arrays is a list.\n\n >>> x = np.array([[1, 2], [3, 4]])\n >>> _concatenate2([x, x], axes=[0])\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> _concatenate2([x, x], axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n >>> _concatenate2([[x, x], [x, x]], axes=[0, 1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4],\n [1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Supports Iterators\n >>> _concatenate2(iter([x, x]), axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Special Case\n >>> _concatenate2([x, x], axes=())\n array([[1, 2],\n [3, 4]])\n \"\"\"\n if axes == ():\n if isinstance(arrays, list):\n return arrays[0]\n else:\n return arrays\n\n if isinstance(arrays, Iterator):\n arrays = list(arrays)\n if not isinstance(arrays, (list, tuple)):\n return arrays\n if len(axes) > 1:\n arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]\n concatenate = concatenate_lookup.dispatch(\n type(max(arrays, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n if isinstance(arrays[0], dict):\n # Handle concatenation of `dict`s, used as a replacement for structured\n # arrays when that's not supported by the array library (e.g., CuPy).\n keys = list(arrays[0].keys())\n assert all(list(a.keys()) == keys for a in arrays)\n ret = dict()\n for k in keys:\n ret[k] = concatenate(list(a[k] for a in arrays), axis=axes[0])\n return ret\n else:\n return concatenate(arrays, axis=axes[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_not_callable_func__map_blocks.extra_names._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks.if_not_callable_func__map_blocks.extra_names._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 696, "end_line": 790, "span_ids": ["map_blocks"], "tokens": 809}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n if not callable(func):\n msg = (\n \"First argument must be callable function, not %s\\n\"\n \"Usage: da.map_blocks(function, x)\\n\"\n \" or: da.map_blocks(function, x, y, z)\"\n )\n raise TypeError(msg % type(func).__name__)\n if token:\n warnings.warn(\n \"The `token=` keyword to `map_blocks` has been moved to `name=`. \"\n \"Please use `name=` instead as the `token=` keyword will be removed \"\n \"in a future release.\",\n category=FutureWarning,\n )\n name = token\n\n name = f\"{name or funcname(func)}-{tokenize(func, dtype, chunks, drop_axis, new_axis, *args, **kwargs)}\"\n new_axes = {}\n\n if isinstance(drop_axis, Number):\n drop_axis = [drop_axis]\n if isinstance(new_axis, Number):\n new_axis = [new_axis] # TODO: handle new_axis\n\n arrs = [a for a in args if isinstance(a, Array)]\n\n argpairs = [\n (a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)\n for a in args\n ]\n if arrs:\n out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]\n else:\n out_ind = ()\n\n original_kwargs = kwargs\n\n if dtype is None and meta is None:\n try:\n meta = compute_meta(func, dtype, *args, **kwargs)\n except Exception:\n pass\n\n dtype = apply_infer_dtype(func, args, original_kwargs, \"map_blocks\")\n\n if drop_axis:\n ndim_out = len(out_ind)\n if any(i < -ndim_out or i >= ndim_out for i in drop_axis):\n raise ValueError(\n f\"drop_axis out of range (drop_axis={drop_axis}, \"\n f\"but output is {ndim_out}d).\"\n )\n drop_axis = [i % ndim_out for i in drop_axis]\n out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)\n if new_axis is None and chunks is not None and len(out_ind) < len(chunks):\n new_axis = range(len(chunks) - len(out_ind))\n if new_axis:\n # new_axis = [x + len(drop_axis) for x in new_axis]\n out_ind = list(out_ind)\n for ax in sorted(new_axis):\n n = len(out_ind) + len(drop_axis)\n out_ind.insert(ax, n)\n if chunks is not None:\n new_axes[n] = chunks[ax]\n else:\n new_axes[n] = 1\n out_ind = tuple(out_ind)\n if max(new_axis) > max(out_ind):\n raise ValueError(\"New_axis values do not fill in all dimensions\")\n\n if chunks is not None:\n if len(chunks) != len(out_ind):\n raise ValueError(\n f\"Provided chunks have {len(chunks)} dims; expected {len(out_ind)} dims\"\n )\n adjust_chunks = dict(zip(out_ind, chunks))\n else:\n adjust_chunks = None\n\n out = blockwise(\n func,\n out_ind,\n *concat(argpairs),\n name=name,\n new_axes=new_axes,\n dtype=dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=adjust_chunks,\n meta=meta,\n **kwargs,\n )\n\n extra_argpairs = []\n extra_names = []\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks._If_func_has_block_id_as_map_blocks._objects_and_prepare_to_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_map_blocks._If_func_has_block_id_as_map_blocks._objects_and_prepare_to_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 791, "end_line": 809, "span_ids": ["map_blocks"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n # If func has block_id as an argument, construct an array of block IDs and\n # prepare to inject it.\n if has_keyword(func, \"block_id\"):\n block_id_name = \"block-id-\" + out.name\n block_id_dsk = {\n (block_id_name,) + block_id: block_id\n for block_id in product(*(range(len(c)) for c in out.chunks))\n }\n block_id_array = Array(\n block_id_dsk,\n block_id_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_id_array, out_ind))\n extra_names.append(\"block_id\")\n\n # If func has block_info as an argument, construct an array of block info\n # objects and prepare to inject it.\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store_store.if_len_sources_1_and_l.regions_len_sources_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store_store.if_len_sources_1_and_l.regions_len_sources_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 956, "end_line": 1041, "span_ids": ["store"], "tokens": 744}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def store(\n sources: Array | Collection[Array],\n targets,\n lock: bool | Lock = True,\n regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,\n compute: bool = True,\n return_stored: bool = False,\n **kwargs,\n):\n \"\"\"Store dask arrays in array-like objects, overwrite data in target\n\n This stores dask arrays into object that supports numpy-style setitem\n indexing. It stores values chunk by chunk so that it does not have to\n fill up memory. For best performance you can align the block size of\n the storage target with the block size of your array.\n\n If your data fits in memory then you may prefer calling\n ``np.array(myarray)`` instead.\n\n Parameters\n ----------\n\n sources: Array or collection of Arrays\n targets: array-like or Delayed or collection of array-likes and/or Delayeds\n These should support setitem syntax ``target[10:20] = ...``\n lock: boolean or threading.Lock, optional\n Whether or not to lock the data stores while storing.\n Pass True (lock each file individually), False (don't lock) or a\n particular :class:`threading.Lock` object to be shared among all writes.\n regions: tuple of slices or collection of tuples of slices\n Each ``region`` tuple in ``regions`` should be such that\n ``target[region].shape = source.shape``\n for the corresponding source and target in sources and targets,\n respectively. If this is a tuple, the contents will be assumed to be\n slices, so do not provide a tuple of tuples.\n compute: boolean, optional\n If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.\n return_stored: boolean, optional\n Optionally return the stored result (default False).\n kwargs:\n Parameters passed to compute/persist (only used if compute=True)\n\n Returns\n -------\n\n If return_stored=True\n tuple of Arrays\n If return_stored=False and compute=True\n None\n If return_stored=False and compute=False\n Delayed\n\n Examples\n --------\n\n >>> import h5py # doctest: +SKIP\n >>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP\n >>> dset = f.create_dataset('/data', shape=x.shape,\n ... chunks=x.chunks,\n ... dtype='f8') # doctest: +SKIP\n\n >>> store(x, dset) # doctest: +SKIP\n\n Alternatively store many arrays at the same time\n\n >>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP\n \"\"\"\n\n if isinstance(sources, Array):\n sources = [sources]\n targets = [targets]\n\n if any(not isinstance(s, Array) for s in sources):\n raise ValueError(\"All sources must be dask array objects\")\n\n if len(sources) != len(targets):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d]\"\n % (len(sources), len(targets))\n )\n\n if isinstance(regions, tuple) or regions is None:\n regions = [regions]\n\n if len(sources) > 1 and len(regions) == 1:\n regions *= len(sources)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store.if_len_sources_len_re_store.for_s_t_n_r_in_zip_sou.map_keys_map_layer_key": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store.if_len_sources_len_re_store.for_s_t_n_r_in_zip_sou.map_keys_map_layer_key", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1043, "end_line": 1099, "span_ids": ["store"], "tokens": 577}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def store(\n sources: Array | Collection[Array],\n targets,\n lock: bool | Lock = True,\n regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,\n compute: bool = True,\n return_stored: bool = False,\n **kwargs,\n):\n # ... other code\n\n if len(sources) != len(regions):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d] than regions [%d]\"\n % (len(sources), len(targets), len(regions))\n )\n\n # Optimize all sources together\n sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])\n sources_layer = Array.__dask_optimize__(\n sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))\n )\n sources_name = \"store-sources-\" + tokenize(sources)\n layers = {sources_name: sources_layer}\n dependencies = {sources_name: set()}\n\n # Optimize all targets together\n targets_keys = []\n targets_dsks = []\n for t in targets:\n if isinstance(t, Delayed):\n targets_keys.append(t.key)\n targets_dsks.append(t.__dask_graph__())\n elif is_dask_collection(t):\n raise TypeError(\"Targets must be either Delayed objects or array-likes\")\n\n if targets_dsks:\n targets_hlg = HighLevelGraph.merge(*targets_dsks)\n targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)\n targets_name = \"store-targets-\" + tokenize(targets_keys)\n layers[targets_name] = targets_layer\n dependencies[targets_name] = set()\n\n load_stored = return_stored and not compute\n\n map_names = [\n \"store-map-\" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)\n for s, t, r in zip(sources, targets, regions)\n ]\n map_keys = []\n\n for s, t, n, r in zip(sources, targets, map_names, regions):\n map_layer = insert_to_ooc(\n keys=s.__dask_keys__(),\n chunks=s.chunks,\n out=t.key if isinstance(t, Delayed) else t,\n name=n,\n lock=lock,\n region=r,\n return_stored=return_stored,\n load_stored=load_stored,\n )\n layers[n] = map_layer\n if isinstance(t, Delayed):\n dependencies[n] = {sources_name, targets_name}\n else:\n dependencies[n] = {sources_name}\n map_keys += map_layer.keys()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store.if_return_stored__store.if_return_stored_.else_.return.Delayed_key_store_dsk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_store.if_return_stored__store.if_return_stored_.else_.return.Delayed_key_store_dsk_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1101, "end_line": 1126, "span_ids": ["store"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def store(\n sources: Array | Collection[Array],\n targets,\n lock: bool | Lock = True,\n regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,\n compute: bool = True,\n return_stored: bool = False,\n **kwargs,\n):\n # ... other code\n\n if return_stored:\n store_dsk = HighLevelGraph(layers, dependencies)\n load_store_dsk = store_dsk\n if compute:\n store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]\n store_dlyds = persist(*store_dlyds, **kwargs)\n store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])\n load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)\n map_names = [\"load-\" + n for n in map_names]\n\n return tuple(\n Array(load_store_dsk, n, s.chunks, meta=s)\n for s, n in zip(sources, map_names)\n )\n\n elif compute:\n store_dsk = HighLevelGraph(layers, dependencies)\n compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)\n return None\n\n else:\n key = \"store-\" + tokenize(map_names)\n layers[key] = {key: map_keys}\n dependencies[key] = set(map_names)\n store_dsk = HighLevelGraph(layers, dependencies)\n return Delayed(key, store_dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.shape_Array.__len__.return.int_sum_self_chunks_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.shape_Array.__len__.return.int_sum_self_chunks_0_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1393, "end_line": 1447, "span_ids": ["Array._chunks_18", "Array.chunks", "Array.chunks_20", "Array.dtype", "Array.chunksize", "Array._chunks", "Array.__len__", "Array.shape"], "tokens": 383}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @cached_property\n def shape(self):\n return tuple(cached_cumsum(c, initial_zero=True)[-1] for c in self.chunks)\n\n @property\n def chunksize(self):\n return tuple(max(c) for c in self.chunks)\n\n @property\n def dtype(self):\n if isinstance(self._meta, tuple):\n dtype = self._meta[0].dtype\n else:\n dtype = self._meta.dtype\n return dtype\n\n @property\n def _chunks(self):\n \"\"\"Non-public chunks property. Allows setting a chunk value.\"\"\"\n return self.__chunks\n\n @_chunks.setter\n def _chunks(self, chunks):\n self.__chunks = chunks\n\n # When the chunks changes the cached properties that was\n # dependent on it needs to be deleted:\n for key in [\"numblocks\", \"npartitions\", \"shape\", \"ndim\", \"size\", \"_key_array\"]:\n self._reset_cache(key)\n\n @property\n def chunks(self):\n \"\"\"Chunks property.\"\"\"\n return self.__chunks\n\n @chunks.setter\n def chunks(self, chunks):\n raise TypeError(\n \"Can not set chunks directly\\n\\n\"\n \"Please use the rechunk method instead:\\n\"\n f\" x.rechunk({chunks})\\n\\n\"\n \"If trying to avoid unknown chunks, use\\n\"\n \" x.compute_chunk_sizes()\"\n )\n\n def __len__(self):\n if not self.chunks:\n raise TypeError(\"len() of unsized object\")\n if np.isnan(self.chunks[0]).any():\n msg = (\n \"Cannot call len() on object with unknown chunk size.\"\n f\"{unknown_chunk_message}\"\n )\n raise ValueError(msg)\n return int(sum(self.chunks[0]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__repr___Array.__repr__.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.__repr___Array.__repr__.return._", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1488, "end_line": 1506, "span_ids": ["Array.__repr__"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def __repr__(self):\n \"\"\"\n\n >>> import dask.array as da\n >>> da.ones((10, 10), chunks=(5, 5), dtype='i4')\n dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>\n \"\"\"\n chunksize = str(self.chunksize)\n name = self.name.rsplit(\"-\", 1)[0]\n return (\n \"dask.array<{}, shape={}, dtype={}, chunksize={}, chunktype={}.{}>\".format(\n name,\n self.shape,\n self.dtype,\n chunksize,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._repr_html__Array._repr_html_.return.get_template_array_html_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array._repr_html__Array._repr_html_.return.get_template_array_html_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1508, "end_line": 1529, "span_ids": ["Array._repr_html_"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n def _repr_html_(self):\n try:\n grid = self.to_svg(size=config.get(\"array.svg.size\", 120))\n except NotImplementedError:\n grid = \"\"\n\n if \"sparse\" in typename(type(self._meta)):\n nbytes = None\n cbytes = None\n elif not math.isnan(self.nbytes):\n nbytes = format_bytes(self.nbytes)\n cbytes = format_bytes(np.prod(self.chunksize) * self.dtype.itemsize)\n else:\n nbytes = \"unknown\"\n cbytes = \"unknown\"\n\n return get_template(\"array.html.j2\").render(\n array=self,\n grid=grid,\n nbytes=nbytes,\n cbytes=cbytes,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.blocks_Array.blocks.return.BlockView_self_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_Array.blocks_Array.blocks.return.BlockView_self_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1901, "end_line": 1939, "span_ids": ["Array.blocks"], "tokens": 442}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Array(DaskMethodsMixin):\n\n @property\n def blocks(self):\n \"\"\"An array-like interface to the blocks of an array.\n\n This returns a ``Blockview`` object that provides an array-like interface\n to the blocks of a dask array. Numpy-style indexing of a ``Blockview`` object\n returns a selection of blocks as a new dask array.\n\n You can index ``array.blocks`` like a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.blocks.size). The dimensionality of the output array matches\n the dimension of this array, even if integer indices are passed.\n Slicing with ``np.newaxis`` or multiple lists is not supported.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(8, chunks=2)\n >>> x.blocks.shape # aliases x.numblocks\n (4,)\n >>> x.blocks[0].compute()\n array([0, 1])\n >>> x.blocks[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.blocks[::2].compute()\n array([0, 1, 4, 5])\n >>> x.blocks[[-1, 0]].compute()\n array([6, 7, 0, 1])\n >>> x.blocks.ravel() # doctest: +NORMALIZE_WHITESPACE\n [dask.array,\n dask.array,\n dask.array,\n dask.array]\n\n Returns\n -------\n An instance of ``dask.array.Blockview``\n \"\"\"\n return BlockView(self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asarray_asarray._Convert_the_input_to_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asarray_asarray._Convert_the_input_to_a", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4208, "end_line": 4257, "span_ids": ["asarray"], "tokens": 559}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def asarray(\n a, allow_unknown_chunksizes=False, dtype=None, order=None, *, like=None, **kwargs\n):\n \"\"\"Convert the input to a dask array.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array. This\n includes lists, lists of tuples, tuples, tuples of tuples, tuples of\n lists and ndarrays.\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n dtype : data-type, optional\n By default, the data-type is inferred from the input data.\n order : {\u2018C\u2019, \u2018F\u2019, \u2018A\u2019, \u2018K\u2019}, optional\n Memory layout. \u2018A\u2019 and \u2018K\u2019 depend on the order of input array a.\n \u2018C\u2019 row-major (C-style), \u2018F\u2019 column-major (Fortran-style) memory\n representation. \u2018A\u2019 (any) means \u2018F\u2019 if a is Fortran contiguous, \u2018C\u2019\n otherwise \u2018K\u2019 (keep) preserve input order. Defaults to \u2018C\u2019.\n like: array-like\n Reference object to allow the creation of Dask arrays with chunks\n that are not NumPy arrays. If an array-like passed in as ``like``\n supports the ``__array_function__`` protocol, the chunk type of the\n resulting array will be definde by it. In this case, it ensures the\n creation of a Dask array compatible with that passed in via this\n argument. If ``like`` is a Dask array, the chunk type of the\n resulting array will be defined by the chunk type of ``like``.\n Requires NumPy 1.20.0 or higher.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asarray(x)\n dask.array\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asarray(y)\n dask.array\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asarray.if_like_is_None__asarray.return.from_array_a_getitem_get": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asarray.if_like_is_None__asarray.return.from_array_a_getitem_get", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4258, "end_line": 4278, "span_ids": ["asarray"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def asarray(\n a, allow_unknown_chunksizes=False, dtype=None, order=None, *, like=None, **kwargs\n):\n if like is None:\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.split(\".\")[0] == \"xarray\" and hasattr(a, \"data\"):\n return asarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a, allow_unknown_chunksizes=allow_unknown_chunksizes)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asarray(a, dtype=dtype, order=order)\n else:\n if not _numpy_120:\n raise RuntimeError(\"The use of ``like`` required NumPy >= 1.20\")\n\n like_meta = meta_from_array(like)\n if isinstance(a, Array):\n return a.map_blocks(np.asarray, like=like_meta, dtype=dtype, order=order)\n else:\n a = np.asarray(a, like=like_meta, dtype=dtype, order=order)\n return from_array(a, getitem=getter_inline, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_elemwise_elemwise.expr_inds.tuple_range_out_ndim_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_elemwise_elemwise.expr_inds.tuple_range_out_ndim_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4419, "end_line": 4485, "span_ids": ["elemwise"], "tokens": 590}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def elemwise(op, *args, out=None, where=True, dtype=None, name=None, **kwargs):\n \"\"\"Apply an elementwise ufunc-like function blockwise across arguments.\n\n Like numpy ufuncs, broadcasting rules are respected.\n\n Parameters\n ----------\n op : callable\n The function to apply. Should be numpy ufunc-like in the parameters\n that it accepts.\n *args : Any\n Arguments to pass to `op`. Non-dask array-like objects are first\n converted to dask arrays, then all arrays are broadcast together before\n applying the function blockwise across all arguments. Any scalar\n arguments are passed as-is following normal numpy ufunc behavior.\n out : dask array, optional\n If out is a dask.array then this overwrites the contents of that array\n with the result.\n where : array_like, optional\n An optional boolean mask marking locations where the ufunc should be\n applied. Can be a scalar, dask array, or any other array-like object.\n Mirrors the ``where`` argument to numpy ufuncs, see e.g. ``numpy.add``\n for more information.\n dtype : dtype, optional\n If provided, overrides the output array dtype.\n name : str, optional\n A unique key name to use when building the backing dask graph. If not\n provided, one will be automatically generated based on the input\n arguments.\n\n Examples\n --------\n >>> elemwise(add, x, y) # doctest: +SKIP\n >>> elemwise(sin, x) # doctest: +SKIP\n >>> elemwise(sin, x, out=dask_array) # doctest: +SKIP\n\n See Also\n --------\n blockwise\n \"\"\"\n if kwargs:\n raise TypeError(\n f\"{op.__name__} does not take the following keyword arguments \"\n f\"{sorted(kwargs)}\"\n )\n\n out = _elemwise_normalize_out(out)\n where = _elemwise_normalize_where(where)\n args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]\n\n shapes = []\n for arg in args:\n shape = getattr(arg, \"shape\", ())\n if any(is_dask_collection(x) for x in shape):\n # Want to exclude Delayed shapes and dd.Scalar\n shape = ()\n shapes.append(shape)\n if isinstance(where, Array):\n shapes.append(where.shape)\n if isinstance(out, Array):\n shapes.append(out.shape)\n\n shapes = [s if isinstance(s, Iterable) else () for s in shapes]\n out_ndim = len(\n broadcast_shapes(*shapes)\n ) # Raises ValueError if dimensions mismatch\n expr_inds = tuple(range(out_ndim))[::-1]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_elemwise.if_dtype_is_not_None__elemwise.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_elemwise.if_dtype_is_not_None__elemwise.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4487, "end_line": 4537, "span_ids": ["elemwise"], "tokens": 458}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def elemwise(op, *args, out=None, where=True, dtype=None, name=None, **kwargs):\n # ... other code\n\n if dtype is not None:\n need_enforce_dtype = True\n else:\n # We follow NumPy's rules for dtype promotion, which special cases\n # scalars and 0d ndarrays (which it considers equivalent) by using\n # their values to compute the result dtype:\n # https://github.com/numpy/numpy/issues/6240\n # We don't inspect the values of 0d dask arrays, because these could\n # hold potentially very expensive calculations. Instead, we treat\n # them just like other arrays, and if necessary cast the result of op\n # to match.\n vals = [\n np.empty((1,) * max(1, a.ndim), dtype=a.dtype)\n if not is_scalar_for_elemwise(a)\n else a\n for a in args\n ]\n try:\n dtype = apply_infer_dtype(op, vals, {}, \"elemwise\", suggest_dtype=False)\n except Exception:\n return NotImplemented\n need_enforce_dtype = any(\n not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args\n )\n\n if not name:\n name = f\"{funcname(op)}-{tokenize(op, dtype, *args, where)}\"\n\n blockwise_kwargs = dict(dtype=dtype, name=name, token=funcname(op).strip(\"_\"))\n\n if where is not True:\n blockwise_kwargs[\"elemwise_where_function\"] = op\n op = _elemwise_handle_where\n args.extend([where, out])\n\n if need_enforce_dtype:\n blockwise_kwargs[\"enforce_dtype\"] = dtype\n blockwise_kwargs[\"enforce_dtype_function\"] = op\n op = _enforce_dtype\n\n result = blockwise(\n op,\n expr_inds,\n *concat(\n (a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)\n for a in args\n ),\n **blockwise_kwargs,\n )\n\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__elemwise_normalize_where__elemwise_normalize_out.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py__elemwise_normalize_where__elemwise_normalize_out.return.out", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4540, "end_line": 4569, "span_ids": ["_elemwise_normalize_where", "_elemwise_handle_where", "_elemwise_normalize_out"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _elemwise_normalize_where(where):\n if where is True:\n return True\n elif where is False or where is None:\n return False\n return asarray(where)\n\n\ndef _elemwise_handle_where(*args, **kwargs):\n function = kwargs.pop(\"elemwise_where_function\")\n *args, where, out = args\n if hasattr(out, \"copy\"):\n out = out.copy()\n return function(*args, where=where, out=out, **kwargs)\n\n\ndef _elemwise_normalize_out(out):\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n if not (out is None or isinstance(out, Array)):\n raise NotImplementedError(\n f\"The out parameter is not fully supported.\"\n f\" Received type {type(out).__name__}, expected Dask Array\"\n )\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_new_da_object_new_da_object.if_is_dataframe_like_meta.else_.return.Array_dsk_name_name_chu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_new_da_object_new_da_object.if_is_dataframe_like_meta.else_.return.Array_dsk_name_name_chu", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5415, "end_line": 5427, "span_ids": ["new_da_object"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def new_da_object(dsk, name, chunks, meta=None, dtype=None):\n \"\"\"Generic constructor for dask.array or dask.dataframe objects.\n\n Decides the appropriate output class based on the type of `meta` provided.\n \"\"\"\n if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):\n from ..dataframe.core import new_dd_object\n\n assert all(len(c) == 1 for c in chunks[1:])\n divisions = [None] * (len(chunks[0]) + 1)\n return new_dd_object(dsk, name, meta, divisions)\n else:\n return Array(dsk, name=name, chunks=chunks, meta=meta, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_BlockView_BlockView.__init__.self._array.array": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_BlockView_BlockView.__init__.self._array.array", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5430, "end_line": 5473, "span_ids": ["BlockView.__init__", "BlockView"], "tokens": 461}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockView:\n \"\"\"An array-like interface to the blocks of an array.\n\n ``BlockView`` provides an array-like interface\n to the blocks of a dask array. Numpy-style indexing of a\n ``BlockView`` returns a selection of blocks as a new dask array.\n\n You can index ``BlockView`` like a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.blocks.size). The dimensionality of the output array matches\n the dimension of this array, even if integer indices are passed.\n Slicing with ``np.newaxis`` or multiple lists is not supported.\n\n Examples\n --------\n >>> import dask.array as da\n >>> from dask.array.core import BlockView\n >>> x = da.arange(8, chunks=2)\n >>> bv = BlockView(x)\n >>> bv.shape # aliases x.numblocks\n (4,)\n >>> bv.size\n 4\n >>> bv[0].compute()\n array([0, 1])\n >>> bv[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> bv[::2].compute()\n array([0, 1, 4, 5])\n >>> bv[[-1, 0]].compute()\n array([6, 7, 0, 1])\n >>> bv.ravel() # doctest: +NORMALIZE_WHITESPACE\n [dask.array,\n dask.array,\n dask.array,\n dask.array]\n\n Returns\n -------\n An instance of ``da.array.Blockview``\n \"\"\"\n\n def __init__(self, array: Array) -> BlockView:\n self._array = array", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_BlockView.__getitem___BlockView.__getitem__.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_BlockView.__getitem___BlockView.__getitem__.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5475, "end_line": 5502, "span_ids": ["BlockView.__getitem__"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockView:\n\n def __getitem__(\n self, index: int | Sequence[int] | slice | Sequence[slice]\n ) -> Array:\n from .slicing import normalize_index\n\n if not isinstance(index, tuple):\n index = (index,)\n if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:\n raise ValueError(\"Can only slice with a single list\")\n if any(ind is None for ind in index):\n raise ValueError(\"Slicing with np.newaxis or None is not supported\")\n index = normalize_index(index, self._array.numblocks)\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n\n name = \"blocks-\" + tokenize(self._array, index)\n\n new_keys = self._array._key_array[index]\n\n chunks = tuple(\n tuple(np.array(c)[i].tolist()) for c, i in zip(self._array.chunks, index)\n )\n\n keys = product(*(range(len(c)) for c in chunks))\n\n layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}\n\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self._array])\n return Array(graph, name, chunks, meta=self._array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_BlockView.__eq___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_BlockView.__eq___", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5578, "end_line": 5607, "span_ids": ["BlockView.shape", "impl:11", "BlockView.ravel", "BlockView.size", "BlockView.__eq__"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockView:\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, BlockView):\n return self._array is other._array\n else:\n return NotImplemented\n\n @property\n def size(self) -> int:\n \"\"\"\n The total number of blocks in the array.\n \"\"\"\n return np.prod(self.shape)\n\n @property\n def shape(self) -> tuple[int, ...]:\n \"\"\"\n The number of blocks per axis. Alias of ``dask.array.numblocks``.\n \"\"\"\n return self._array.numblocks\n\n def ravel(self) -> list[Array]:\n \"\"\"\n Return a flattened list of all the blocks in the array in C order.\n \"\"\"\n return [self[idx] for idx in np.ndindex(self.shape)]\n\n\nfrom .blockwise import blockwise\nfrom .utils import compute_meta, meta_from_array", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_re__SIGNATURE.f__INPUT_ARGUMENTS__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_re__SIGNATURE.f__INPUT_ARGUMENTS__", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 23, "span_ids": ["imports"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import re\n\nimport numpy as np\nfrom tlz import concat, merge, unique\n\nfrom ..core import flatten\nfrom ..highlevelgraph import HighLevelGraph\nfrom .core import Array, apply_infer_dtype, asarray, blockwise, getitem\nfrom .utils import meta_from_array\n\n# Modified version of `numpy.lib.function_base._parse_gufunc_signature`\n# Modifications:\n# - Allow for zero input arguments\n# See https://docs.scipy.org/doc/numpy/reference/c-api/generalized-ufuncs.html\n_DIMENSION_NAME = r\"\\w+\"\n_CORE_DIMENSION_LIST = \"(?:{0:}(?:,{0:})*,?)?\".format(_DIMENSION_NAME)\n_ARGUMENT = rf\"\\({_CORE_DIMENSION_LIST}\\)\"\n_INPUT_ARGUMENTS = \"(?:{0:}(?:,{0:})*,?)?\".format(_ARGUMENT)\n_OUTPUT_ARGUMENTS = \"{0:}(?:,{0:})*\".format(\n _ARGUMENT\n) # Use `'{0:}(?:,{0:})*,?'` if gufunc-\n# signature should be allowed for length 1 tuple returns\n_SIGNATURE = f\"^{_INPUT_ARGUMENTS}->{_OUTPUT_ARGUMENTS}$\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc._Input_processing__apply_gufunc._Cast_all_input_arrays_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc._Input_processing__apply_gufunc._Cast_all_input_arrays_", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 290, "end_line": 374, "span_ids": ["apply_gufunc"], "tokens": 816}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_gufunc(\n func,\n signature,\n *args,\n axes=None,\n axis=None,\n keepdims=False,\n output_dtypes=None,\n output_sizes=None,\n vectorize=None,\n allow_rechunk=False,\n meta=None,\n **kwargs,\n):\n # Input processing:\n ## Signature\n if not isinstance(signature, str):\n raise TypeError(\"`signature` has to be of type string\")\n # NumPy versions before https://github.com/numpy/numpy/pull/19627\n # would not ignore whitespace characters in `signature` like they\n # are supposed to. We remove the whitespace here as a workaround.\n signature = re.sub(r\"\\s+\", \"\", signature)\n input_coredimss, output_coredimss = _parse_gufunc_signature(signature)\n\n ## Determine nout: nout = None for functions of one direct return; nout = int for return tuples\n nout = None if not isinstance(output_coredimss, list) else len(output_coredimss)\n\n ## Consolidate onto `meta`\n if meta is not None and output_dtypes is not None:\n raise ValueError(\n \"Only one of `meta` and `output_dtypes` should be given (`meta` is preferred).\"\n )\n if meta is None:\n if output_dtypes is None:\n ## Infer `output_dtypes`\n if vectorize:\n tempfunc = np.vectorize(func, signature=signature)\n else:\n tempfunc = func\n output_dtypes = apply_infer_dtype(\n tempfunc, args, kwargs, \"apply_gufunc\", \"output_dtypes\", nout\n )\n\n ## Turn `output_dtypes` into `meta`\n if (\n nout is None\n and isinstance(output_dtypes, (tuple, list))\n and len(output_dtypes) == 1\n ):\n output_dtypes = output_dtypes[0]\n sample = args[0] if args else None\n if nout is None:\n meta = meta_from_array(sample, dtype=output_dtypes)\n else:\n meta = tuple(meta_from_array(sample, dtype=odt) for odt in output_dtypes)\n\n ## Normalize `meta` format\n meta = meta_from_array(meta)\n if isinstance(meta, list):\n meta = tuple(meta)\n\n ## Validate `meta`\n if nout is None:\n if isinstance(meta, tuple):\n if len(meta) == 1:\n meta = meta[0]\n else:\n raise ValueError(\n \"For a function with one output, must give a single item for `output_dtypes`/`meta`, \"\n \"not a tuple or list.\"\n )\n else:\n if not isinstance(meta, tuple):\n raise ValueError(\n f\"For a function with {nout} outputs, must give a tuple or list for `output_dtypes`/`meta`, \"\n \"not a single item.\"\n )\n if len(meta) != nout:\n raise ValueError(\n f\"For a function with {nout} outputs, must give a tuple or list of {nout} items for \"\n f\"`output_dtypes`/`meta`, not {len(meta)}.\"\n )\n\n ## Vectorize function, if required\n if vectorize:\n otypes = [x.dtype for x in meta] if isinstance(meta, tuple) else [meta.dtype]\n func = np.vectorize(func, signature=signature, otypes=otypes)\n\n ## Miscellaneous\n if output_sizes is None:\n output_sizes = {}\n\n ## Axes\n input_axes, output_axes = _validate_normalize_axes(\n axes, axis, keepdims, input_coredimss, output_coredimss\n )\n\n # Main code:\n ## Cast all input arrays to dask\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.args_apply_gufunc._Assert_correct_partit": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/gufunc.py_apply_gufunc.args_apply_gufunc._Assert_correct_partit", "embedding": null, "metadata": {"file_path": "dask/array/gufunc.py", "file_name": "gufunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 375, "end_line": 425, "span_ids": ["apply_gufunc"], "tokens": 632}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def apply_gufunc(\n func,\n signature,\n *args,\n axes=None,\n axis=None,\n keepdims=False,\n output_dtypes=None,\n output_sizes=None,\n vectorize=None,\n allow_rechunk=False,\n meta=None,\n **kwargs,\n):\n # ... other code\n args = [asarray(a) for a in args]\n\n if len(input_coredimss) != len(args):\n raise ValueError(\n \"According to `signature`, `func` requires %d arguments, but %s given\"\n % (len(input_coredimss), len(args))\n )\n\n ## Axes: transpose input arguments\n transposed_args = []\n for arg, iax, input_coredims in zip(args, input_axes, input_coredimss):\n shape = arg.shape\n iax = tuple(a if a < 0 else a - len(shape) for a in iax)\n tidc = tuple(i for i in range(-len(shape) + 0, 0) if i not in iax) + iax\n transposed_arg = arg.transpose(tidc)\n transposed_args.append(transposed_arg)\n args = transposed_args\n\n ## Assess input args for loop dims\n input_shapes = [a.shape for a in args]\n input_chunkss = [a.chunks for a in args]\n num_loopdims = [len(s) - len(cd) for s, cd in zip(input_shapes, input_coredimss)]\n max_loopdims = max(num_loopdims) if num_loopdims else None\n core_input_shapes = [\n dict(zip(icd, s[n:]))\n for s, n, icd in zip(input_shapes, num_loopdims, input_coredimss)\n ]\n core_shapes = merge(*core_input_shapes)\n core_shapes.update(output_sizes)\n\n loop_input_dimss = [\n tuple(\"__loopdim%d__\" % d for d in range(max_loopdims - n, max_loopdims))\n for n in num_loopdims\n ]\n input_dimss = [l + c for l, c in zip(loop_input_dimss, input_coredimss)]\n\n loop_output_dims = max(loop_input_dimss, key=len) if loop_input_dimss else tuple()\n\n ## Assess input args for same size and chunk sizes\n ### Collect sizes and chunksizes of all dims in all arrays\n dimsizess = {}\n chunksizess = {}\n for dims, shape, chunksizes in zip(input_dimss, input_shapes, input_chunkss):\n for dim, size, chunksize in zip(dims, shape, chunksizes):\n dimsizes = dimsizess.get(dim, [])\n dimsizes.append(size)\n dimsizess[dim] = dimsizes\n chunksizes_ = chunksizess.get(dim, [])\n chunksizes_.append(chunksize)\n chunksizess[dim] = chunksizes_\n ### Assert correct partitioning, for case:\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser.walk__Recurser.walk.for_i_xi_in_enumerate_x_.yield_from_self_walk_xi_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/numpy_compat.py__Recurser.walk__Recurser.walk.for_i_xi_in_enumerate_x_.yield_from_self_walk_xi_", "embedding": null, "metadata": {"file_path": "dask/array/numpy_compat.py", "file_name": "numpy_compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 112, "end_line": 128, "span_ids": ["_Recurser.walk"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Recurser:\n\n def walk(self, x, index=()):\n \"\"\"\n Iterate over x, yielding (index, value, entering), where\n\n * ``index``: a tuple of indices up to this point\n * ``value``: equal to ``x[index[0]][...][index[-1]]``. On the first iteration, is\n ``x`` itself\n * ``entering``: bool. The result of ``recurse_if(value)``\n \"\"\"\n do_recurse = self.recurse_if(x)\n yield index, x, do_recurse\n\n if not do_recurse:\n return\n for i, xi in enumerate(x):\n # yield from ...\n yield from self.walk(xi, index + (i,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_warnings__percentiles_from_tdigest.return.np_array_t_quantile_qs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_warnings__percentiles_from_tdigest.return.np_array_t_quantile_qs_", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 64, "span_ids": ["_percentile", "imports", "_tdigest_chunk", "_percentiles_from_tdigest"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\nfrom collections.abc import Iterator\nfrom functools import wraps\nfrom numbers import Number\n\nimport numpy as np\nfrom tlz import merge\n\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom .core import Array\nfrom .numpy_compat import _numpy_122\nfrom .numpy_compat import percentile as np_percentile\n\n\n@wraps(np.percentile)\ndef _percentile(a, q, method=\"linear\"):\n n = len(a)\n if not len(a):\n return None, n\n if isinstance(q, Iterator):\n q = list(q)\n if a.dtype.name == \"category\":\n result = np_percentile(a.cat.codes, q, method=method)\n import pandas as pd\n\n return pd.Categorical.from_codes(result, a.dtype.categories, a.dtype.ordered), n\n if type(a.dtype).__name__ == \"DatetimeTZDtype\":\n import pandas as pd\n\n if isinstance(a, (pd.Series, pd.Index)):\n a = a.values\n\n if np.issubdtype(a.dtype, np.datetime64):\n values = a\n a2 = values.view(\"i8\")\n result = np_percentile(a2, q, method=method).astype(values.dtype)\n if q[0] == 0:\n # https://github.com/dask/dask/issues/6864\n result[0] = min(result[0], values.min())\n return result, n\n if not np.issubdtype(a.dtype, np.number):\n method = \"nearest\"\n return np_percentile(a, q, method=method), n\n\n\ndef _tdigest_chunk(a):\n\n from crick import TDigest\n\n t = TDigest()\n t.update(a)\n\n return t\n\n\ndef _percentiles_from_tdigest(qs, digests):\n\n from crick import TDigest\n\n t = TDigest()\n t.merge(*digests)\n\n return np.array(t.quantile(qs / 100.0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles_merge_percentiles.sort_order.np_argsort_combined_vals_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles_merge_percentiles.sort_order.np_argsort_combined_vals_", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 200, "end_line": 274, "span_ids": ["merge_percentiles"], "tokens": 791}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_percentiles(finalq, qs, vals, method=\"lower\", Ns=None, raise_on_nan=True):\n \"\"\"Combine several percentile calculations of different data.\n\n Parameters\n ----------\n\n finalq : numpy.array\n Percentiles to compute (must use same scale as ``qs``).\n qs : sequence of :class:`numpy.array`s\n Percentiles calculated on different sets of data.\n vals : sequence of :class:`numpy.array`s\n Resulting values associated with percentiles ``qs``.\n Ns : sequence of integers\n The number of data elements associated with each data set.\n method : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n Specify the interpolation method to use to calculate final\n percentiles. For more information, see :func:`numpy.percentile`.\n\n Examples\n --------\n\n >>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]\n >>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]\n >>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]\n >>> Ns = [100, 100] # Both original arrays had 100 elements\n\n >>> merge_percentiles(finalq, qs, vals, Ns=Ns)\n array([ 1, 2, 3, 4, 10, 11, 12, 13])\n \"\"\"\n from .utils import array_safe\n\n if isinstance(finalq, Iterator):\n finalq = list(finalq)\n finalq = array_safe(finalq, like=finalq)\n qs = list(map(list, qs))\n vals = list(vals)\n if Ns is None:\n vals, Ns = zip(*vals)\n Ns = list(Ns)\n\n L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))\n if not L:\n if raise_on_nan:\n raise ValueError(\"No non-trivial arrays found\")\n return np.full(len(qs[0]) - 2, np.nan)\n qs, vals, Ns = L\n\n # TODO: Perform this check above in percentile once dtype checking is easy\n # Here we silently change meaning\n if vals[0].dtype.name == \"category\":\n result = merge_percentiles(\n finalq, qs, [v.codes for v in vals], method, Ns, raise_on_nan\n )\n import pandas as pd\n\n return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)\n if not np.issubdtype(vals[0].dtype, np.number):\n method = \"nearest\"\n\n if len(vals) != len(qs) or len(Ns) != len(qs):\n raise ValueError(\"qs, vals, and Ns parameters must be the same length\")\n\n # transform qs and Ns into number of observations between percentiles\n counts = []\n for q, N in zip(qs, Ns):\n count = np.empty_like(finalq, shape=len(q))\n count[1:] = np.diff(array_safe(q, like=q[0]))\n count[0] = q[0]\n count *= N\n counts.append(count)\n\n # Sort by calculated percentile values, then number of observations.\n combined_vals = np.concatenate(vals)\n combined_counts = array_safe(np.concatenate(counts), like=combined_vals)\n sort_order = np.argsort(combined_vals)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles.combined_vals_9_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/percentile.py_merge_percentiles.combined_vals_9_", "embedding": null, "metadata": {"file_path": "dask/array/percentile.py", "file_name": "percentile.py", "file_type": "text/x-python", "category": "implementation", "start_line": 275, "end_line": 314, "span_ids": ["merge_percentiles"], "tokens": 414}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def merge_percentiles(finalq, qs, vals, method=\"lower\", Ns=None, raise_on_nan=True):\n # ... other code\n combined_vals = np.take(combined_vals, sort_order)\n combined_counts = np.take(combined_counts, sort_order)\n\n # percentile-like, but scaled by total number of observations\n combined_q = np.cumsum(combined_counts)\n\n # rescale finalq percentiles to match combined_q\n finalq = array_safe(finalq, like=combined_vals)\n desired_q = finalq * sum(Ns)\n\n # the behavior of different interpolation methods should be\n # investigated further.\n if method == \"linear\":\n rv = np.interp(desired_q, combined_q, combined_vals)\n else:\n left = np.searchsorted(combined_q, desired_q, side=\"left\")\n right = np.searchsorted(combined_q, desired_q, side=\"right\") - 1\n np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index\n lower = np.minimum(left, right)\n upper = np.maximum(left, right)\n if method == \"lower\":\n rv = combined_vals[lower]\n elif method == \"higher\":\n rv = combined_vals[upper]\n elif method == \"midpoint\":\n rv = 0.5 * (combined_vals[lower] + combined_vals[upper])\n elif method == \"nearest\":\n lower_residual = np.abs(combined_q[lower] - desired_q)\n upper_residual = np.abs(combined_q[upper] - desired_q)\n mask = lower_residual > upper_residual\n index = lower # alias; we no longer need lower\n index[mask] = upper[mask]\n rv = combined_vals[index]\n else:\n raise ValueError(\n \"interpolation method can only be 'linear', 'lower', \"\n \"'higher', 'midpoint', or 'nearest'\"\n )\n return rv", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmin_nanmin.return.reduction_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_nanmin_nanmin.return.reduction_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 516, "end_line": 533, "span_ids": ["nanmin"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef nanmin(a, axis=None, keepdims=False, split_every=None, out=None):\n if np.isnan(a.size):\n raise ValueError(f\"Arrays chunk sizes are unknown. {unknown_chunk_message}\")\n if a.size == 0:\n raise ValueError(\n \"zero-size array to reduction operation fmin which has no identity\"\n )\n return reduction(\n a,\n _nanmin_skip,\n _nanmin_skip,\n axis=axis,\n keepdims=keepdims,\n dtype=a.dtype,\n split_every=split_every,\n out=out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__nanmin_skip__nanmax_skip.if_x_chunk_size_0_.else_.return.asarray_safe_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py__nanmin_skip__nanmax_skip.if_x_chunk_size_0_.else_.return.asarray_safe_", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 536, "end_line": 571, "span_ids": ["nanmax", "_nanmax_skip", "_nanmin_skip"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _nanmin_skip(x_chunk, axis, keepdims):\n if x_chunk.size > 0:\n return np.nanmin(x_chunk, axis=axis, keepdims=keepdims)\n else:\n return asarray_safe(\n np.array([], dtype=x_chunk.dtype), like=meta_from_array(x_chunk)\n )\n\n\n@derived_from(np)\ndef nanmax(a, axis=None, keepdims=False, split_every=None, out=None):\n if np.isnan(a.size):\n raise ValueError(f\"Arrays chunk sizes are unknown. {unknown_chunk_message}\")\n if a.size == 0:\n raise ValueError(\n \"zero-size array to reduction operation fmax which has no identity\"\n )\n return reduction(\n a,\n _nanmax_skip,\n _nanmax_skip,\n axis=axis,\n keepdims=keepdims,\n dtype=a.dtype,\n split_every=split_every,\n out=out,\n )\n\n\ndef _nanmax_skip(x_chunk, axis, keepdims):\n if x_chunk.size > 0:\n return np.nanmax(x_chunk, axis=axis, keepdims=keepdims)\n else:\n return asarray_safe(\n np.array([], dtype=x_chunk.dtype), like=meta_from_array(x_chunk)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_numel_numel.return.np_full_like_x_prod_sha": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_numel_numel.return.np_full_like_x_prod_sha", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 574, "end_line": 603, "span_ids": ["numel"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def numel(x, **kwargs):\n \"\"\"A reduction to count the number of elements\"\"\"\n\n if hasattr(x, \"mask\"):\n return chunk.sum(np.ones_like(x), **kwargs)\n\n shape = x.shape\n keepdims = kwargs.get(\"keepdims\", False)\n axis = kwargs.get(\"axis\", None)\n dtype = kwargs.get(\"dtype\", np.float64)\n\n if axis is None:\n prod = np.prod(shape, dtype=dtype)\n return (\n np.full_like(x, prod, shape=(1,) * len(shape), dtype=dtype)\n if keepdims is True\n else prod\n )\n\n if not isinstance(axis, tuple or list):\n axis = [axis]\n\n prod = np.prod([shape[dim] for dim in axis])\n if keepdims is True:\n new_shape = tuple(\n shape[dim] if dim not in axis else 1 for dim in range(len(shape))\n )\n else:\n new_shape = tuple(shape[dim] for dim in range(len(shape)) if dim not in axis)\n return np.full_like(x, prod, shape=new_shape, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_agg_mean_agg.with_np_errstate_divide_.return.divide_total_n_dtype_dt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reductions.py_mean_agg_mean_agg.with_np_errstate_divide_.return.divide_total_n_dtype_dt", "embedding": null, "metadata": {"file_path": "dask/array/reductions.py", "file_name": "reductions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 647, "end_line": 659, "span_ids": ["mean_agg"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def mean_agg(pairs, dtype=\"f8\", axis=None, computing_meta=False, **kwargs):\n ns = deepmap(lambda pair: pair[\"n\"], pairs) if not computing_meta else pairs\n n = _concatenate2(ns, axes=axis)\n n = np.sum(n, axis=axis, dtype=dtype, **kwargs)\n\n if computing_meta:\n return n\n\n totals = deepmap(lambda pair: pair[\"total\"], pairs)\n total = _concatenate2(totals, axes=axis).sum(axis=axis, dtype=dtype, **kwargs)\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n return divide(total, n, dtype=dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_warnings_reshape_rechunk.return.tuple_result_inchunks_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_warnings_reshape_rechunk.return.tuple_result_inchunks_t", "embedding": null, "metadata": {"file_path": "dask/array/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 92, "span_ids": ["imports", "reshape_rechunk"], "tokens": 927}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\nfrom collections import Counter\nfrom functools import reduce\nfrom itertools import product\nfrom operator import mul\n\nimport numpy as np\n\nfrom .. import config\nfrom ..base import tokenize\nfrom ..core import flatten\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import M, parse_bytes\nfrom .core import Array, normalize_chunks\nfrom .utils import meta_from_array\n\n\ndef reshape_rechunk(inshape, outshape, inchunks):\n assert all(isinstance(c, tuple) for c in inchunks)\n ii = len(inshape) - 1\n oi = len(outshape) - 1\n result_inchunks = [None for i in range(len(inshape))]\n result_outchunks = [None for i in range(len(outshape))]\n\n while ii >= 0 or oi >= 0:\n if inshape[ii] == outshape[oi]:\n result_inchunks[ii] = inchunks[ii]\n result_outchunks[oi] = inchunks[ii]\n ii -= 1\n oi -= 1\n continue\n din = inshape[ii]\n dout = outshape[oi]\n if din == 1:\n result_inchunks[ii] = (1,)\n ii -= 1\n elif dout == 1:\n result_outchunks[oi] = (1,)\n oi -= 1\n elif din < dout: # (4, 4, 4) -> (64,)\n ileft = ii - 1\n while (\n ileft >= 0 and reduce(mul, inshape[ileft : ii + 1]) < dout\n ): # 4 < 64, 4*4 < 64, 4*4*4 == 64\n ileft -= 1\n if reduce(mul, inshape[ileft : ii + 1]) != dout:\n raise ValueError(\"Shapes not compatible\")\n\n # Special case to avoid intermediate rechunking:\n # When all the lower axis are completely chunked (chunksize=1) then\n # we're simply moving around blocks.\n if all(len(inchunks[i]) == inshape[i] for i in range(ii)):\n for i in range(ii + 1):\n result_inchunks[i] = inchunks[i]\n result_outchunks[oi] = inchunks[ii] * np.prod(\n list(map(len, inchunks[ileft:ii]))\n )\n else:\n for i in range(ileft + 1, ii + 1): # need single-shape dimensions\n result_inchunks[i] = (inshape[i],) # chunks[i] = (4,)\n\n chunk_reduction = reduce(mul, map(len, inchunks[ileft + 1 : ii + 1]))\n result_inchunks[ileft] = expand_tuple(inchunks[ileft], chunk_reduction)\n\n prod = reduce(mul, inshape[ileft + 1 : ii + 1]) # 16\n result_outchunks[oi] = tuple(\n prod * c for c in result_inchunks[ileft]\n ) # (1, 1, 1, 1) .* 16\n\n oi -= 1\n ii = ileft - 1\n elif din > dout: # (64,) -> (4, 4, 4)\n oleft = oi - 1\n while oleft >= 0 and reduce(mul, outshape[oleft : oi + 1]) < din:\n oleft -= 1\n if reduce(mul, outshape[oleft : oi + 1]) != din:\n raise ValueError(\"Shapes not compatible\")\n\n # TODO: don't coalesce shapes unnecessarily\n cs = reduce(mul, outshape[oleft + 1 : oi + 1])\n\n result_inchunks[ii] = contract_tuple(inchunks[ii], cs) # (16, 16, 16, 16)\n\n for i in range(oleft + 1, oi + 1):\n result_outchunks[i] = (outshape[i],)\n\n result_outchunks[oleft] = tuple(c // cs for c in result_inchunks[ii])\n\n oi = oleft - 1\n ii -= 1\n\n return tuple(result_inchunks), tuple(result_outchunks)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_reshape_reshape.name._reshape_tokenize_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_reshape_reshape.name._reshape_tokenize_x_", "embedding": null, "metadata": {"file_path": "dask/array/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 149, "end_line": 220, "span_ids": ["reshape"], "tokens": 707}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reshape(x, shape, merge_chunks=True, limit=None):\n \"\"\"Reshape array to new shape\n\n Parameters\n ----------\n shape : int or tuple of ints\n The new shape should be compatible with the original shape. If\n an integer, then the result will be a 1-D array of that length.\n One shape dimension can be -1. In this case, the value is\n inferred from the length of the array and remaining dimensions.\n merge_chunks : bool, default True\n Whether to merge chunks using the logic in :meth:`dask.array.rechunk`\n when communication is necessary given the input array chunking and\n the output shape. With ``merge_chunks==False``, the input array will\n be rechunked to a chunksize of 1, which can create very many tasks.\n limit: int (optional)\n The maximum block size to target in bytes. If no limit is provided,\n it defaults to using the ``array.chunk-size`` Dask config value.\n\n Notes\n -----\n This is a parallelized version of the ``np.reshape`` function with the\n following limitations:\n\n 1. It assumes that the array is stored in `row-major order`_\n 2. It only allows for reshapings that collapse or merge dimensions like\n ``(1, 2, 3, 4) -> (1, 6, 4)`` or ``(64,) -> (4, 4, 4)``\n\n .. _`row-major order`: https://en.wikipedia.org/wiki/Row-_and_column-major_order\n\n When communication is necessary this algorithm depends on the logic within\n rechunk. It endeavors to keep chunk sizes roughly the same when possible.\n\n See :ref:`array-chunks.reshaping` for a discussion the tradeoffs of\n ``merge_chunks``.\n\n See Also\n --------\n dask.array.rechunk\n numpy.reshape\n \"\"\"\n # Sanitize inputs, look for -1 in shape\n from .core import PerformanceWarning\n from .slicing import sanitize_index\n\n shape = tuple(map(sanitize_index, shape))\n known_sizes = [s for s in shape if s != -1]\n if len(known_sizes) < len(shape):\n if len(shape) - len(known_sizes) > 1:\n raise ValueError(\"can only specify one unknown dimension\")\n # Fastpath for x.reshape(-1) on 1D arrays, allows unknown shape in x\n # for this case only.\n if len(shape) == 1 and x.ndim == 1:\n return x\n missing_size = sanitize_index(x.size / reduce(mul, known_sizes, 1))\n shape = tuple(missing_size if s == -1 else s for s in shape)\n\n if np.isnan(sum(x.shape)):\n raise ValueError(\n \"Array chunk size or shape is unknown. shape: %s\\n\\n\"\n \"Possible solution with x.compute_chunk_sizes()\" % str(x.shape)\n )\n\n if reduce(mul, shape, 1) != x.size:\n raise ValueError(\"total size of new array must be unchanged\")\n\n if x.shape == shape:\n return x\n\n meta = meta_from_array(x, len(shape))\n\n name = \"reshape-\" + tokenize(x, shape)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_reshape.if_x_npartitions_1__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/reshape.py_reshape.if_x_npartitions_1__", "embedding": null, "metadata": {"file_path": "dask/array/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 222, "end_line": 288, "span_ids": ["reshape"], "tokens": 684}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reshape(x, shape, merge_chunks=True, limit=None):\n # ... other code\n\n if x.npartitions == 1:\n key = next(flatten(x.__dask_keys__()))\n dsk = {(name,) + (0,) * len(shape): (M.reshape, key, shape)}\n chunks = tuple((d,) for d in shape)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, meta=meta)\n\n # Logic or how to rechunk\n din = len(x.shape)\n dout = len(shape)\n if not merge_chunks and din > dout:\n x = x.rechunk({i: 1 for i in range(din - dout)})\n\n inchunks, outchunks = reshape_rechunk(x.shape, shape, x.chunks)\n # Check output chunks are not too large\n max_chunksize_in_bytes = reduce(mul, [max(i) for i in outchunks]) * x.dtype.itemsize\n\n if limit is None:\n limit = parse_bytes(config.get(\"array.chunk-size\"))\n split = config.get(\"array.slicing.split-large-chunks\", None)\n else:\n limit = parse_bytes(limit)\n split = True\n\n if max_chunksize_in_bytes > limit:\n if split is None:\n msg = (\n \"Reshaping is producing a large chunk. To accept the large\\n\"\n \"chunk and silence this warning, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\\n\"\n \" ... array.reshape(shape)\\n\\n\"\n \"To avoid creating the large chunks, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\\n\"\n \" ... array.reshape(shape)\"\n \"Explictly passing ``limit`` to ``reshape`` will also silence this warning\\n\"\n \" >>> array.reshape(shape, limit='128 MiB')\"\n )\n warnings.warn(msg, PerformanceWarning, stacklevel=6)\n elif split:\n # Leave chunk sizes unaltered where possible\n matching_chunks = Counter(inchunks) & Counter(outchunks)\n chunk_plan = []\n for out in outchunks:\n if matching_chunks[out] > 0:\n chunk_plan.append(out)\n matching_chunks[out] -= 1\n else:\n chunk_plan.append(\"auto\")\n outchunks = normalize_chunks(\n chunk_plan,\n shape=shape,\n limit=limit,\n dtype=x.dtype,\n previous_chunks=inchunks,\n )\n\n x2 = x.rechunk(inchunks)\n\n # Construct graph\n in_keys = list(product([x2.name], *[range(len(c)) for c in inchunks]))\n out_keys = list(product([name], *[range(len(c)) for c in outchunks]))\n shapes = list(product(*outchunks))\n dsk = {a: (M.reshape, b, shape) for a, b, shape in zip(out_keys, in_keys, shapes)}\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x2])\n return Array(graph, name, outchunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_2d_vstack.return.concatenate_tup_axis_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_atleast_2d_vstack.return.concatenate_tup_axis_0_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 83, "end_line": 125, "span_ids": ["atleast_2d", "vstack", "atleast_1d"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef atleast_2d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None, None]\n elif x.ndim == 1:\n x = x[None, :]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef atleast_1d(*arys):\n new_arys = []\n for x in arys:\n x = asanyarray(x)\n if x.ndim == 0:\n x = x[None]\n\n new_arys.append(x)\n\n if len(new_arys) == 1:\n return new_arys[0]\n else:\n return new_arys\n\n\n@derived_from(np)\ndef vstack(tup, allow_unknown_chunksizes=False):\n if isinstance(tup, Array):\n raise NotImplementedError(\n \"``vstack`` expects a sequence of arrays as the first argument\"\n )\n\n tup = tuple(atleast_2d(x) for x in tup)\n return concatenate(tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_hstack_dstack.return.concatenate_tup_axis_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_hstack_dstack.return.concatenate_tup_axis_2_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 153, "span_ids": ["dstack", "hstack"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef hstack(tup, allow_unknown_chunksizes=False):\n if isinstance(tup, Array):\n raise NotImplementedError(\n \"``hstack`` expects a sequence of arrays as the first argument\"\n )\n\n if all(x.ndim == 1 for x in tup):\n return concatenate(\n tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes\n )\n else:\n return concatenate(\n tup, axis=1, allow_unknown_chunksizes=allow_unknown_chunksizes\n )\n\n\n@derived_from(np)\ndef dstack(tup, allow_unknown_chunksizes=False):\n if isinstance(tup, Array):\n raise NotImplementedError(\n \"``dstack`` expects a sequence of arrays as the first argument\"\n )\n\n tup = tuple(atleast_3d(x) for x in tup)\n return concatenate(tup, axis=2, allow_unknown_chunksizes=allow_unknown_chunksizes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_dot__chunk_sum.if_keepdims_.else_.return.out_squeeze_axis_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_dot__chunk_sum.if_keepdims_.else_.return.out_squeeze_axis_0_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 326, "end_line": 353, "span_ids": ["vdot", "_chunk_sum", "dot"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef dot(a, b):\n return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))\n\n\n@derived_from(np)\ndef vdot(a, b):\n return dot(a.conj().ravel(), b.ravel())\n\n\ndef _chunk_sum(a, axis=None, dtype=None, keepdims=None):\n # Caution: this is not your conventional array-sum: due\n # to the special nature of the preceding blockwise con-\n # traction, each chunk is expected to have exactly the\n # same shape, with a size of 1 for the dimension given\n # by `axis` (the reduction axis). This makes mere ele-\n # ment-wise addition of the arrays possible. Besides,\n # the output can be merely squeezed to lose the `axis`-\n # dimension when keepdims = False\n if type(a) is list:\n out = reduce(partial(np.add, dtype=dtype), a)\n else:\n out = a\n\n if keepdims:\n return out\n else:\n return out.squeeze(axis[0])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__sum_wo_cat__matmul.return.chunk_xp_newaxis_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__sum_wo_cat__matmul.return.chunk_xp_newaxis_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 356, "end_line": 387, "span_ids": ["_sum_wo_cat", "_matmul"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _sum_wo_cat(a, axis=None, dtype=None):\n if dtype is None:\n dtype = getattr(np.zeros(1, dtype=a.dtype).sum(), \"dtype\", object)\n\n if a.shape[axis] == 1:\n return a.squeeze(axis)\n\n return reduction(\n a, _chunk_sum, _chunk_sum, axis=axis, dtype=dtype, concatenate=False\n )\n\n\ndef _matmul(a, b):\n xp = np\n\n if is_cupy_type(a):\n # This branch appears to be unnecessary since cupy\n # version 9.0. See the following link:\n # https://github.com/dask/dask/pull/8423#discussion_r768291271\n # But it remains here for backward-compatibility.\n # Consider removing it in a future version of dask.\n import cupy\n\n xp = cupy\n\n chunk = xp.matmul(a, b)\n # Since we have performed the contraction via xp.matmul\n # but blockwise expects all dimensions back (including\n # the contraction-axis in the 2nd-to-last position of\n # the output), we must then put it back in the expected\n # the position ourselves:\n return chunk[..., xp.newaxis, :]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_over_axes_ptp.return.a_max_axis_axis_a_min_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_apply_over_axes_ptp.return.a_max_axis_axis_a_min_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 516, "end_line": 546, "span_ids": ["ptp", "apply_over_axes"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef apply_over_axes(func, a, axes):\n # Validate arguments\n a = asarray(a)\n try:\n axes = tuple(axes)\n except TypeError:\n axes = (axes,)\n\n sl = a.ndim * (slice(None),)\n\n # Compute using `apply_along_axis`.\n result = a\n for i in axes:\n result = apply_along_axis(func, i, result, 0)\n\n # Restore original dimensionality or error.\n if result.ndim == (a.ndim - 1):\n result = result[sl[:i] + (None,)]\n elif result.ndim != a.ndim:\n raise ValueError(\n \"func must either preserve dimensionality of the input\"\n \" or reduce it by one.\"\n )\n\n return result\n\n\n@derived_from(np)\ndef ptp(a, axis=None):\n return a.max(axis=axis) - a.min(axis=axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_diff_diff.return.r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_diff_diff.return.r", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 549, "end_line": 595, "span_ids": ["diff"], "tokens": 333}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef diff(a, n=1, axis=-1, prepend=None, append=None):\n a = asarray(a)\n n = int(n)\n axis = int(axis)\n\n if n == 0:\n return a\n if n < 0:\n raise ValueError(\"order must be non-negative but got %d\" % n)\n\n combined = []\n if prepend is not None:\n prepend = asarray_safe(prepend, like=meta_from_array(a))\n if prepend.ndim == 0:\n shape = list(a.shape)\n shape[axis] = 1\n prepend = broadcast_to(prepend, tuple(shape))\n combined.append(prepend)\n\n combined.append(a)\n\n if append is not None:\n append = asarray_safe(append, like=meta_from_array(a))\n if append.ndim == 0:\n shape = list(a.shape)\n shape[axis] = 1\n append = np.broadcast_to(append, tuple(shape))\n combined.append(append)\n\n if len(combined) > 1:\n a = concatenate(combined, axis)\n\n sl_1 = a.ndim * [slice(None)]\n sl_2 = a.ndim * [slice(None)]\n\n sl_1[axis] = slice(1, None)\n sl_2[axis] = slice(None, -1)\n\n sl_1 = tuple(sl_1)\n sl_2 = tuple(sl_2)\n\n r = a\n for i in range(n):\n r = r[sl_1] - r[sl_2]\n\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram2d_histogram2d.return.counts_edges_0_edges_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_histogram2d_histogram2d.return.counts_edges_0_edges_1", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1038, "end_line": 1109, "span_ids": ["histogram2d"], "tokens": 680}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, density=None):\n \"\"\"Blocked variant of :func:`numpy.histogram2d`.\n\n Parameters\n ----------\n x : dask.array.Array\n An array containing the `x`-coordinates of the points to be\n histogrammed.\n y : dask.array.Array\n An array containing the `y`-coordinates of the points to be\n histogrammed.\n bins : sequence of arrays describing bin edges, int, or sequence of ints\n The bin specification. See the `bins` argument description for\n :py:func:`histogramdd` for a complete description of all\n possible bin configurations (this function is a 2D specific\n version of histogramdd).\n range : tuple of pairs, optional.\n The leftmost and rightmost edges of the bins along each\n dimension when integers are passed to `bins`; of the form:\n ((xmin, xmax), (ymin, ymax)).\n normed : bool, optional\n An alias for the density argument that behaves identically. To\n avoid confusion with the broken argument in the `histogram`\n function, `density` should be preferred.\n weights : dask.array.Array, optional\n An array of values weighing each sample in the input data. The\n chunks of the weights must be identical to the chunking along\n the 0th (row) axis of the data sample.\n density : bool, optional\n If False (the default) return the number of samples in each\n bin. If True, the returned array represents the probability\n density function at each bin.\n\n Returns\n -------\n dask.array.Array\n The values of the histogram.\n dask.array.Array\n The edges along the `x`-dimension.\n dask.array.Array\n The edges along the `y`-dimension.\n\n See Also\n --------\n histogram\n histogramdd\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.array([2, 4, 2, 4, 2, 4])\n >>> y = da.array([2, 2, 4, 4, 2, 4])\n >>> bins = 2\n >>> range = ((0, 6), (0, 6))\n >>> h, xedges, yedges = da.histogram2d(x, y, bins=bins, range=range)\n >>> h\n dask.array\n >>> xedges\n dask.array\n >>> h.compute()\n array([[2., 1.],\n [1., 2.]])\n \"\"\"\n counts, edges = histogramdd(\n (x, y),\n bins=bins,\n range=range,\n normed=normed,\n weights=weights,\n density=density,\n )\n return counts, edges[0], edges[1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique_no_structured_arr_unique_no_structured_arr.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique_no_structured_arr_unique_no_structured_arr.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1630, "end_line": 1683, "span_ids": ["unique_no_structured_arr"], "tokens": 375}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def unique_no_structured_arr(\n ar, return_index=False, return_inverse=False, return_counts=False\n):\n # A simplified version of `unique`, that allows computing unique for array\n # types that don't support structured arrays (such as cupy.ndarray), but\n # can only compute values at the moment.\n\n if (\n return_index is not False\n or return_inverse is not False\n or return_counts is not False\n ):\n raise ValueError(\n \"dask.array.unique does not support `return_index`, `return_inverse` \"\n \"or `return_counts` with array types that don't support structured \"\n \"arrays.\"\n )\n\n ar = ar.ravel()\n\n args = [ar, \"i\"]\n meta = meta_from_array(ar)\n\n out = blockwise(np.unique, \"i\", *args, meta=meta)\n out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)\n\n out_parts = [out]\n\n name = \"unique-aggregate-\" + out.name\n dsk = {\n (name, 0): (\n (np.unique,)\n + tuple(\n (np.concatenate, o.__dask_keys__())\n if hasattr(o, \"__dask_keys__\")\n else o\n for o in out_parts\n )\n )\n }\n\n dependencies = [o for o in out_parts if hasattr(o, \"__dask_keys__\")]\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n chunks = ((np.nan,),)\n out = Array(graph, name, chunks, meta=meta)\n\n result = [out]\n\n if len(result) == 1:\n result = result[0]\n else:\n result = tuple(result)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique_unique.None_7.result_append_out_indice": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique_unique.None_7.result_append_out_indice", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1686, "end_line": 1774, "span_ids": ["unique"], "tokens": 745}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False):\n # Test whether the downstream library supports structured arrays. If the\n # `np.empty_like` call raises a `TypeError`, the downstream library (e.g.,\n # CuPy) doesn't support it. In that case we return the\n # `unique_no_structured_arr` implementation, otherwise (e.g., NumPy) just\n # continue as normal.\n try:\n meta = meta_from_array(ar)\n np.empty_like(meta, dtype=[(\"a\", int), (\"b\", float)])\n except TypeError:\n return unique_no_structured_arr(\n ar,\n return_index=return_index,\n return_inverse=return_inverse,\n return_counts=return_counts,\n )\n\n ar = ar.ravel()\n\n # Run unique on each chunk and collect results in a Dask Array of\n # unknown size.\n\n args = [ar, \"i\"]\n out_dtype = [(\"values\", ar.dtype)]\n if return_index:\n args.extend([arange(ar.shape[0], dtype=np.intp, chunks=ar.chunks[0]), \"i\"])\n out_dtype.append((\"indices\", np.intp))\n else:\n args.extend([None, None])\n if return_counts:\n args.extend([ones((ar.shape[0],), dtype=np.intp, chunks=ar.chunks[0]), \"i\"])\n out_dtype.append((\"counts\", np.intp))\n else:\n args.extend([None, None])\n\n out = blockwise(_unique_internal, \"i\", *args, dtype=out_dtype, return_inverse=False)\n out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)\n\n # Take the results from the unique chunks and do the following.\n #\n # 1. Collect all results as arguments.\n # 2. Concatenate each result into one big array.\n # 3. Pass all results as arguments to the internal unique again.\n #\n # TODO: This should be replaced with a tree reduction using this strategy.\n # xref: https://github.com/dask/dask/issues/2851\n\n out_parts = [out[\"values\"]]\n if return_index:\n out_parts.append(out[\"indices\"])\n else:\n out_parts.append(None)\n if return_counts:\n out_parts.append(out[\"counts\"])\n else:\n out_parts.append(None)\n\n name = \"unique-aggregate-\" + out.name\n dsk = {\n (name, 0): (\n (_unique_internal,)\n + tuple(\n (np.concatenate, o.__dask_keys__())\n if hasattr(o, \"__dask_keys__\")\n else o\n for o in out_parts\n )\n + (return_inverse,)\n )\n }\n out_dtype = [(\"values\", ar.dtype)]\n if return_index:\n out_dtype.append((\"indices\", np.intp))\n if return_inverse:\n out_dtype.append((\"inverse\", np.intp))\n if return_counts:\n out_dtype.append((\"counts\", np.intp))\n\n dependencies = [o for o in out_parts if hasattr(o, \"__dask_keys__\")]\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n chunks = ((np.nan,),)\n out = Array(graph, name, chunks, out_dtype)\n\n # Split out all results to return to the user.\n\n result = [out[\"values\"]]\n if return_index:\n result.append(out[\"indices\"])\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique.None_8_unique.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_unique.None_8_unique.return.result", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1775, "end_line": 1792, "span_ids": ["unique"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False):\n # ... other code\n if return_inverse:\n # Using the returned unique values and arange of unknown length, find\n # each value matching a unique value and replace it with its\n # corresponding index or `0`. There should be only one entry for this\n # index in axis `1` (the one of unknown length). Reduce axis `1`\n # through summing to get an array with known dimensionality and the\n # mapping of the original values.\n mtches = (ar[:, None] == out[\"values\"][None, :]).astype(np.intp)\n result.append((mtches * out[\"inverse\"]).sum(axis=1))\n if return_counts:\n result.append(out[\"counts\"])\n\n if len(result) == 1:\n result = result[0]\n else:\n result = tuple(result)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take_take.if_math_isnan_other_numel.else_.warnsize.maxsize_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take_take.if_math_isnan_other_numel.else_.warnsize.maxsize_5", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 585, "end_line": 648, "span_ids": ["take"], "tokens": 769}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def take(outname, inname, chunks, index, itemsize, axis=0):\n \"\"\"Index array with an iterable of index\n\n Handles a single index by a single list\n\n Mimics ``np.take``\n\n >>> from pprint import pprint\n >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], 8, axis=0)\n >>> chunks\n ((2, 1, 1),)\n >>> pprint(dsk) # doctest: +ELLIPSIS\n {('y', 0): (, ('x', 0), (array([5, 1]),)),\n ('y', 1): (, ('x', 2), (array([7]),)),\n ('y', 2): (, ('x', 0), (array([3]),))}\n\n When list is sorted we retain original block structure\n\n >>> chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], 8, axis=0)\n >>> chunks\n ((3, 1),)\n >>> pprint(dsk) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n {('y', 0): (,\n ('x', 0),\n (array([1, 3, 5]),)),\n ('y', 1): (, ('x', 2), (array([7]),))}\n\n When any indexed blocks would otherwise grow larger than\n dask.config.array.chunk-size, we might split them,\n depending on the value of ``dask.config.slicing.split-large-chunks``.\n\n >>> import dask\n >>> with dask.config.set({\"array.slicing.split-large-chunks\": True}):\n ... chunks, dsk = take('y', 'x', [(1, 1, 1), (1000, 1000), (1000, 1000)],\n ... [0] + [1] * 6 + [2], axis=0, itemsize=8)\n >>> chunks\n ((1, 3, 3, 1), (1000, 1000), (1000, 1000))\n \"\"\"\n from .core import PerformanceWarning\n\n plan = slicing_plan(chunks[axis], index)\n if len(plan) >= len(chunks[axis]) * 10:\n factor = math.ceil(len(plan) / len(chunks[axis]))\n\n warnings.warn(\n \"Slicing with an out-of-order index is generating %d \"\n \"times more chunks\" % factor,\n PerformanceWarning,\n stacklevel=6,\n )\n if not is_arraylike(index):\n index = np.asarray(index)\n\n # Check for chunks from the plan that would violate the user's\n # configured chunk size.\n nbytes = utils.parse_bytes(config.get(\"array.chunk-size\"))\n other_chunks = [chunks[i] for i in range(len(chunks)) if i != axis]\n other_numel = np.prod([sum(x) for x in other_chunks])\n\n if math.isnan(other_numel) or other_numel == 0:\n warnsize = maxsize = math.inf\n else:\n maxsize = math.ceil(nbytes / (other_numel * itemsize))\n warnsize = maxsize * 5\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take.split_take.return.tuple_chunks2_dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_take.split_take.return.tuple_chunks2_dsk", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 650, "end_line": 703, "span_ids": ["take"], "tokens": 509}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def take(outname, inname, chunks, index, itemsize, axis=0):\n # ... other code\n\n split = config.get(\"array.slicing.split-large-chunks\", None)\n\n # Warn only when the default is not specified.\n warned = split is not None\n\n for _, index_list in plan:\n if not warned and len(index_list) > warnsize:\n msg = (\n \"Slicing is producing a large chunk. To accept the large\\n\"\n \"chunk and silence this warning, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': False}):\\n\"\n \" ... array[indexer]\\n\\n\"\n \"To avoid creating the large chunks, set the option\\n\"\n \" >>> with dask.config.set(**{'array.slicing.split_large_chunks': True}):\\n\"\n \" ... array[indexer]\"\n )\n warnings.warn(msg, PerformanceWarning, stacklevel=6)\n warned = True\n\n where_index = []\n index_lists = []\n for where_idx, index_list in plan:\n index_length = len(index_list)\n if split and index_length > maxsize:\n index_sublist = np.array_split(\n index_list, math.ceil(index_length / maxsize)\n )\n index_lists.extend(index_sublist)\n where_index.extend([where_idx] * len(index_sublist))\n else:\n if not is_arraylike(index_list):\n index_list = np.array(index_list)\n index_lists.append(index_list)\n where_index.append(where_idx)\n\n dims = [range(len(bd)) for bd in chunks]\n\n indims = list(dims)\n indims[axis] = list(range(len(where_index)))\n keys = list(product([outname], *indims))\n\n outdims = list(dims)\n outdims[axis] = where_index\n slices = [[colon] * len(bd) for bd in chunks]\n slices[axis] = index_lists\n slices = list(product(*slices))\n inkeys = list(product([inname], *outdims))\n values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]\n\n chunks2 = list(chunks)\n chunks2[axis] = tuple(map(len, index_lists))\n dsk = dict(zip(keys, values))\n\n return tuple(chunks2), dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_check_index_check_index.if_np_isnan_dimension_.elif_ind_dimension_or_.raise_IndexError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_check_index_check_index.if_np_isnan_dimension_.elif_ind_dimension_or_.raise_IndexError_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 924, "end_line": 991, "span_ids": ["check_index"], "tokens": 573}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_index(axis, ind, dimension):\n \"\"\"Check validity of index for a given dimension\n\n Examples\n --------\n >>> check_index(0, 3, 5)\n >>> check_index(0, 5, 5)\n Traceback (most recent call last):\n ...\n IndexError: Index 5 is out of bounds for axis 0 with size 5\n\n >>> check_index(1, 6, 5)\n Traceback (most recent call last):\n ...\n IndexError: Index 6 is out of bounds for axis 1 with size 5\n\n >>> check_index(1, -1, 5)\n >>> check_index(1, -6, 5)\n Traceback (most recent call last):\n ...\n IndexError: Index -6 is out of bounds for axis 1 with size 5\n\n >>> check_index(0, [1, 2], 5)\n >>> check_index(0, [6, 3], 5)\n Traceback (most recent call last):\n ...\n IndexError: Index is out of bounds for axis 0 with size 5\n\n >>> check_index(1, slice(0, 3), 5)\n\n >>> check_index(0, [True], 1)\n >>> check_index(0, [True, True], 3)\n Traceback (most recent call last):\n ...\n IndexError: Boolean array with size 2 is not long enough for axis 0 with size 3\n >>> check_index(0, [True, True, True], 1)\n Traceback (most recent call last):\n ...\n IndexError: Boolean array with size 3 is not long enough for axis 0 with size 1\n \"\"\"\n if isinstance(ind, list):\n ind = np.asanyarray(ind)\n\n # unknown dimension, assumed to be in bounds\n if np.isnan(dimension):\n return\n elif is_dask_collection(ind):\n return\n elif is_arraylike(ind):\n if ind.dtype == bool:\n if ind.size != dimension:\n raise IndexError(\n f\"Boolean array with size {ind.size} is not long enough \"\n f\"for axis {axis} with size {dimension}\"\n )\n elif (ind >= dimension).any() or (ind < -dimension).any():\n raise IndexError(\n f\"Index is out of bounds for axis {axis} with size {dimension}\"\n )\n elif isinstance(ind, slice):\n return\n elif ind is None:\n return\n\n elif ind >= dimension or ind < -dimension:\n raise IndexError(\n f\"Index {ind} is out of bounds for axis {axis} with size {dimension}\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.for_i_a_b_j_in_enume_setitem_array.out_name._out_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_setitem_array.for_i_a_b_j_in_enume_setitem_array.out_name._out_name_", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1890, "end_line": 1938, "span_ids": ["setitem_array"], "tokens": 407}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setitem_array(out_name, array, indices, value):\n # ... other code\n\n for i, (a, b, j) in enumerate(\n zip(array_common_shape, value_common_shape, implied_shape_positions)\n ):\n index = indices[j]\n if is_dask_collection(index) and index.dtype == bool:\n if math.isnan(b) or b <= index.size:\n base_value_indices.append(None)\n non_broadcast_dimensions.append(i)\n else:\n raise ValueError(\n f\"shape mismatch: value array dimension size of {b} is \"\n \"greater then corresponding boolean index size of \"\n f\"{index.size}\"\n )\n\n continue\n\n if b == 1:\n base_value_indices.append(slice(None))\n elif a == b:\n base_value_indices.append(None)\n non_broadcast_dimensions.append(i)\n elif math.isnan(a):\n base_value_indices.append(None)\n non_broadcast_dimensions.append(i)\n else:\n raise ValueError(\n f\"shape mismatch: value array of shape {value_shape} \"\n \"could not be broadcast to indexing result of shape \"\n f\"{tuple(implied_shape)}\"\n )\n\n # Translate chunks tuple to a set of array locations in product\n # order\n chunks = array.chunks\n cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]\n array_locations = [\n [(s, s + dim) for s, dim in zip(starts, shapes)]\n for starts, shapes in zip(cumdims, chunks)\n ]\n array_locations = product(*array_locations)\n\n # Get the dask keys of the most recent layer in the same order as\n # the array locations.\n in_keys = list(flatten(array.__dask_keys__()))\n\n # Create a new \"setitem\" dask entry for each block in the array\n dsk = {}\n out_name = (out_name,)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_test_map_blocks.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_reshape_test_map_blocks.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1129, "end_line": 1305, "span_ids": ["test_reshape_splat", "test_reshape_exceptions", "test_reshape_fails_for_dask_only", "test_reshape_warns_by_default_if_it_is_producing_large_chunks", "test_map_blocks", "test_reshape", "test_reshape_unknown_dimensions", "test_reshape_avoids_large_chunks", "test_full"], "tokens": 2033}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"original_shape,new_shape,chunks\",\n [\n ((10,), (10,), (3, 3, 4)),\n ((10,), (10, 1, 1), 5),\n ((10,), (1, 10), 5),\n ((24,), (2, 3, 4), 12),\n ((1, 24), (2, 3, 4), 12),\n ((2, 3, 4), (24,), (1, 3, 4)),\n ((2, 3, 4), (24,), 4),\n ((2, 3, 4), (24, 1), 4),\n ((2, 3, 4), (1, 24), 4),\n ((4, 4, 1), (4, 4), 2),\n ((4, 4), (4, 4, 1), 2),\n ((1, 4, 4), (4, 4), 2),\n ((1, 4, 4), (4, 4, 1), 2),\n ((1, 4, 4), (1, 1, 4, 4), 2),\n ((4, 4), (1, 4, 4, 1), 2),\n ((4, 4), (1, 4, 4), 2),\n ((2, 3), (2, 3), (1, 2)),\n ((2, 3), (3, 2), 3),\n ((4, 2, 3), (4, 6), 4),\n ((3, 4, 5, 6), (3, 4, 5, 6), (2, 3, 4, 5)),\n ((), (1,), 1),\n ((1,), (), 1),\n ((24,), (3, 8), 24),\n ((24,), (4, 6), 6),\n ((24,), (4, 3, 2), 6),\n ((24,), (4, 6, 1), 6),\n ((24,), (4, 6), (6, 12, 6)),\n ((64, 4), (8, 8, 4), (16, 2)),\n ((4, 64), (4, 8, 4, 2), (2, 16)),\n ((4, 8, 4, 2), (2, 1, 2, 32, 2), (2, 4, 2, 2)),\n ((4, 1, 4), (4, 4), (2, 1, 2)),\n ((0, 10), (0, 5, 2), (5, 5)),\n ((5, 0, 2), (0, 10), (5, 2, 2)),\n ((0,), (2, 0, 2), (4,)),\n ((2, 0, 2), (0,), (4, 4, 4)),\n ],\n)\ndef test_reshape(original_shape, new_shape, chunks):\n x = np.random.randint(10, size=original_shape)\n a = from_array(x, chunks=chunks)\n\n xr = x.reshape(new_shape)\n ar = a.reshape(new_shape)\n\n if a.shape == new_shape:\n assert a is ar\n\n assert_eq(xr, ar)\n\n\ndef test_reshape_exceptions():\n x = np.random.randint(10, size=(5,))\n a = from_array(x, chunks=(2,))\n with pytest.raises(ValueError):\n da.reshape(a, (100,))\n\n\ndef test_reshape_splat():\n x = da.ones((5, 5), chunks=(2, 2))\n assert_eq(x.reshape((25,)), x.reshape(25))\n\n\ndef test_reshape_fails_for_dask_only():\n cases = [((3, 4), (4, 3), 2)]\n for original_shape, new_shape, chunks in cases:\n x = np.random.randint(10, size=original_shape)\n a = from_array(x, chunks=chunks)\n assert x.reshape(new_shape).shape == new_shape\n with pytest.raises(ValueError):\n da.reshape(a, new_shape)\n\n\ndef test_reshape_unknown_dimensions():\n for original_shape in [(24,), (2, 12), (2, 3, 4)]:\n for new_shape in [(-1,), (2, -1), (-1, 3, 4)]:\n x = np.random.randint(10, size=original_shape)\n a = from_array(x, 24)\n assert_eq(x.reshape(new_shape), a.reshape(new_shape))\n\n pytest.raises(ValueError, lambda: da.reshape(a, (-1, -1)))\n\n\n@pytest.mark.parametrize(\n \"limit\", # in bytes\n [\n None, # Default value: dask.config.get(\"array.chunk-size\")\n 134217728, # 128 MiB (default value size on a typical laptop)\n 67108864, # 64 MiB (half the typical default value size)\n ],\n)\n@pytest.mark.parametrize(\n \"shape, chunks, reshape_size\",\n [\n # Test reshape where output chunks would otherwise be too large\n ((300, 180, 4, 18483), (-1, -1, 1, 183), (300, 180, -1)),\n # Test reshape where multiple chunks match between input and output\n ((300, 300, 4, 18483), (-1, -1, 1, 183), (300, 300, -1)),\n ],\n)\ndef test_reshape_avoids_large_chunks(limit, shape, chunks, reshape_size):\n array = da.random.random(shape, chunks=chunks)\n if limit is None:\n with dask.config.set(**{\"array.slicing.split_large_chunks\": True}):\n result = array.reshape(*reshape_size, limit=limit)\n else:\n result = array.reshape(*reshape_size, limit=limit)\n nbytes = array.dtype.itemsize\n max_chunksize_in_bytes = reduce(operator.mul, result.chunksize) * nbytes\n if limit is None:\n limit = parse_bytes(dask.config.get(\"array.chunk-size\"))\n assert max_chunksize_in_bytes < limit\n\n\ndef test_reshape_warns_by_default_if_it_is_producing_large_chunks():\n # Test reshape where output chunks would otherwise be too large\n shape, chunks, reshape_size = (300, 180, 4, 18483), (-1, -1, 1, 183), (300, 180, -1)\n array = da.random.random(shape, chunks=chunks)\n\n with pytest.warns(PerformanceWarning) as record:\n result = array.reshape(*reshape_size)\n nbytes = array.dtype.itemsize\n max_chunksize_in_bytes = reduce(operator.mul, result.chunksize) * nbytes\n limit = parse_bytes(dask.config.get(\"array.chunk-size\"))\n assert max_chunksize_in_bytes > limit\n\n assert len(record) == 1\n\n with dask.config.set(**{\"array.slicing.split_large_chunks\": False}):\n result = array.reshape(*reshape_size)\n nbytes = array.dtype.itemsize\n max_chunksize_in_bytes = reduce(operator.mul, result.chunksize) * nbytes\n limit = parse_bytes(dask.config.get(\"array.chunk-size\"))\n assert max_chunksize_in_bytes > limit\n\n with dask.config.set(**{\"array.slicing.split_large_chunks\": True}):\n result = array.reshape(*reshape_size)\n nbytes = array.dtype.itemsize\n max_chunksize_in_bytes = reduce(operator.mul, result.chunksize) * nbytes\n limit = parse_bytes(dask.config.get(\"array.chunk-size\"))\n assert max_chunksize_in_bytes < limit\n\n\ndef test_full():\n d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))\n assert d.chunks == ((2, 1), (2, 2))\n assert_eq(d, np.full((3, 4), 2))\n\n\ndef test_map_blocks():\n x = np.arange(400).reshape((20, 20))\n d = from_array(x, chunks=(7, 7))\n\n e = d.map_blocks(inc, dtype=d.dtype)\n\n assert d.chunks == e.chunks\n assert_eq(e, x + 1)\n\n e = d.map_blocks(inc, name=\"increment\")\n assert e.name.startswith(\"increment-\")\n\n assert d.map_blocks(inc, name=\"foo\").name != d.map_blocks(dec, name=\"foo\").name\n\n d = from_array(x, chunks=(10, 10))\n e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)\n\n assert e.chunks == ((5, 5), (5, 5))\n assert_eq(e, x[::2, ::2])\n\n d = from_array(x, chunks=(8, 8))\n e = d.map_blocks(\n lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)), dtype=d.dtype\n )\n\n assert_eq(e, x[::2, ::2])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks2_test_map_blocks_block_info_with_broadcast.expected2._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks2_test_map_blocks_block_info_with_broadcast.expected2._", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1308, "end_line": 1458, "span_ids": ["test_map_blocks2", "test_map_blocks_block_info", "test_map_blocks_block_info_with_drop_axis", "test_map_blocks_block_info_with_broadcast", "test_map_blocks_block_info_with_new_axis"], "tokens": 1560}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks2():\n x = np.arange(10, dtype=\"i8\")\n d = from_array(x, chunks=(2,))\n\n def func(block, block_id=None, c=0):\n return np.ones_like(block) * sum(block_id) + c\n\n out = d.map_blocks(func, dtype=\"i8\")\n expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=\"i8\")\n\n assert_eq(out, expected)\n assert same_keys(d.map_blocks(func, dtype=\"i8\"), out)\n\n out = d.map_blocks(func, dtype=\"i8\", c=1)\n expected = expected + 1\n\n assert_eq(out, expected)\n assert same_keys(d.map_blocks(func, dtype=\"i8\", c=1), out)\n\n\ndef test_map_blocks_block_info():\n x = da.arange(50, chunks=10)\n\n def func(a, b, c, block_info=None):\n for idx in [0, 2, None]: # positions in args\n assert block_info[idx][\"shape\"] == (50,)\n assert block_info[idx][\"num-chunks\"] == (5,)\n start, stop = block_info[idx][\"array-location\"][0]\n assert stop - start == 10\n assert 0 <= start <= 40\n assert 10 <= stop <= 50\n\n assert 0 <= block_info[idx][\"chunk-location\"][0] <= 4\n assert block_info[None][\"chunk-shape\"] == (10,)\n assert block_info[None][\"dtype\"] == x.dtype\n\n return a + b + c\n\n z = da.map_blocks(func, x, 100, x + 1, dtype=x.dtype)\n assert_eq(z, x + x + 1 + 100)\n\n\ndef test_map_blocks_block_info_with_new_axis():\n # https://github.com/dask/dask/issues/4298\n values = da.from_array(np.array([\"a\", \"a\", \"b\", \"c\"]), 2)\n\n def func(x, block_info=None):\n assert block_info.keys() == {0, None}\n assert block_info[0][\"shape\"] == (4,)\n assert block_info[0][\"num-chunks\"] == (2,)\n assert block_info[None][\"shape\"] == (4, 3)\n assert block_info[None][\"num-chunks\"] == (2, 1)\n assert block_info[None][\"chunk-shape\"] == (2, 3)\n assert block_info[None][\"dtype\"] == np.dtype(\"f8\")\n\n assert block_info[0][\"chunk-location\"] in {(0,), (1,)}\n\n if block_info[0][\"chunk-location\"] == (0,):\n assert block_info[0][\"array-location\"] == [(0, 2)]\n assert block_info[None][\"chunk-location\"] == (0, 0)\n assert block_info[None][\"array-location\"] == [(0, 2), (0, 3)]\n elif block_info[0][\"chunk-location\"] == (1,):\n assert block_info[0][\"array-location\"] == [(2, 4)]\n assert block_info[None][\"chunk-location\"] == (1, 0)\n assert block_info[None][\"array-location\"] == [(2, 4), (0, 3)]\n\n return np.ones((len(x), 3))\n\n z = values.map_blocks(func, chunks=((2, 2), 3), new_axis=1, dtype=\"f8\")\n assert_eq(z, np.ones((4, 3), dtype=\"f8\"))\n\n\ndef test_map_blocks_block_info_with_drop_axis():\n # https://github.com/dask/dask/issues/4584\n values = da.from_array(\n np.array(\n [[1, 2, 4], [8, 16, 32], [64, 128, 256], [1024, 2048, 4096]], dtype=\"u4\"\n ),\n (2, 1),\n )\n\n def func(x, block_info=None):\n assert block_info.keys() == {0, None}\n assert block_info[0][\"shape\"] == (4, 3)\n # drop_axis concatenates along the dropped dimension, hence not (2, 3)\n assert block_info[0][\"num-chunks\"] == (2, 1)\n assert block_info[None][\"shape\"] == (4,)\n assert block_info[None][\"num-chunks\"] == (2,)\n assert block_info[None][\"chunk-shape\"] == (2,)\n assert block_info[None][\"dtype\"] == np.dtype(\"u4\")\n\n assert block_info[0][\"chunk-location\"] in {(0, 0), (1, 0)}\n\n if block_info[0][\"chunk-location\"] == (0, 0):\n assert block_info[0][\"array-location\"] == [(0, 2), (0, 3)]\n assert block_info[None][\"chunk-location\"] == (0,)\n assert block_info[None][\"array-location\"] == [(0, 2)]\n elif block_info[0][\"chunk-location\"] == (1, 0):\n assert block_info[0][\"array-location\"] == [(2, 4), (0, 3)]\n assert block_info[None][\"chunk-location\"] == (1,)\n assert block_info[None][\"array-location\"] == [(2, 4)]\n\n return np.sum(x, axis=1, dtype=\"u4\")\n\n z = values.map_blocks(func, drop_axis=1, dtype=\"u4\")\n assert_eq(z, np.array([7, 56, 448, 7168], dtype=\"u4\"))\n\n\ndef test_map_blocks_block_info_with_broadcast():\n expected0 = [\n {\n \"shape\": (3, 4),\n \"num-chunks\": (1, 2),\n \"array-location\": [(0, 3), (0, 2)],\n \"chunk-location\": (0, 0),\n },\n {\n \"shape\": (3, 4),\n \"num-chunks\": (1, 2),\n \"array-location\": [(0, 3), (2, 4)],\n \"chunk-location\": (0, 1),\n },\n ]\n expected1 = [\n {\n \"shape\": (6, 2),\n \"num-chunks\": (2, 1),\n \"array-location\": [(0, 3), (0, 2)],\n \"chunk-location\": (0, 0),\n },\n {\n \"shape\": (6, 2),\n \"num-chunks\": (2, 1),\n \"array-location\": [(3, 6), (0, 2)],\n \"chunk-location\": (1, 0),\n },\n ]\n expected2 = [\n {\n \"shape\": (4,),\n \"num-chunks\": (2,),\n \"array-location\": [(0, 2)],\n \"chunk-location\": (0,),\n },\n {\n \"shape\": (4,),\n \"num-chunks\": (2,),\n \"array-location\": [(2, 4)],\n \"chunk-location\": (1,),\n },\n ]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_broadcast.expected_test_map_blocks_unique_name_new_axis.assert_x_name_y_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_block_info_with_broadcast.expected_test_map_blocks_unique_name_new_axis.assert_x_name_y_name", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1459, "end_line": 1661, "span_ids": ["test_map_blocks_infer_newaxis", "test_map_blocks_no_array_args", "test_map_blocks_unique_name_new_axis", "test_map_blocks_unique_name_drop_axis", "test_map_blocks_unique_name_chunks_dtype", "test_map_blocks_infer_chunks_broadcast", "test_map_blocks_with_kwargs", "test_map_blocks_block_info_with_broadcast", "test_map_blocks_with_chunks", "test_map_blocks_with_constants", "test_map_blocks_dtype_inference"], "tokens": 2048}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_block_info_with_broadcast():\n # ... other code\n expected = [\n {\n 0: expected0[0],\n 1: expected1[0],\n 2: expected2[0],\n None: {\n \"shape\": (6, 4),\n \"num-chunks\": (2, 2),\n \"dtype\": np.float_,\n \"chunk-shape\": (3, 2),\n \"array-location\": [(0, 3), (0, 2)],\n \"chunk-location\": (0, 0),\n },\n },\n {\n 0: expected0[1],\n 1: expected1[0],\n 2: expected2[1],\n None: {\n \"shape\": (6, 4),\n \"num-chunks\": (2, 2),\n \"dtype\": np.float_,\n \"chunk-shape\": (3, 2),\n \"array-location\": [(0, 3), (2, 4)],\n \"chunk-location\": (0, 1),\n },\n },\n {\n 0: expected0[0],\n 1: expected1[1],\n 2: expected2[0],\n None: {\n \"shape\": (6, 4),\n \"num-chunks\": (2, 2),\n \"dtype\": np.float_,\n \"chunk-shape\": (3, 2),\n \"array-location\": [(3, 6), (0, 2)],\n \"chunk-location\": (1, 0),\n },\n },\n {\n 0: expected0[1],\n 1: expected1[1],\n 2: expected2[1],\n None: {\n \"shape\": (6, 4),\n \"num-chunks\": (2, 2),\n \"dtype\": np.float_,\n \"chunk-shape\": (3, 2),\n \"array-location\": [(3, 6), (2, 4)],\n \"chunk-location\": (1, 1),\n },\n },\n ]\n\n def func(x, y, z, block_info=None):\n for info in expected:\n if block_info[None][\"chunk-location\"] == info[None][\"chunk-location\"]:\n assert block_info == info\n break\n else:\n assert False\n return x + y + z\n\n a = da.ones((3, 4), chunks=(3, 2))\n b = da.ones((6, 2), chunks=(3, 2))\n c = da.ones((4,), chunks=(2,))\n d = da.map_blocks(func, a, b, c, chunks=((3, 3), (2, 2)), dtype=a.dtype)\n assert d.chunks == ((3, 3), (2, 2))\n assert_eq(d, 3 * np.ones((6, 4)))\n\n\ndef test_map_blocks_with_constants():\n d = da.arange(10, chunks=3)\n e = d.map_blocks(add, 100, dtype=d.dtype)\n\n assert_eq(e, np.arange(10) + 100)\n\n assert_eq(da.map_blocks(sub, d, 10, dtype=d.dtype), np.arange(10) - 10)\n assert_eq(da.map_blocks(sub, 10, d, dtype=d.dtype), 10 - np.arange(10))\n\n\ndef test_map_blocks_with_kwargs():\n d = da.arange(10, chunks=5)\n\n result = d.map_blocks(np.max, axis=0, keepdims=True, dtype=d.dtype, chunks=(1,))\n\n assert_eq(result, np.array([4, 9]))\n\n\ndef test_map_blocks_infer_chunks_broadcast():\n dx = da.from_array([[1, 2, 3, 4]], chunks=((1,), (2, 2)))\n dy = da.from_array([[10, 20], [30, 40]], chunks=((1, 1), (2,)))\n result = da.map_blocks(lambda x, y: x + y, dx, dy)\n assert result.chunks == ((1, 1), (2, 2))\n assert_eq(result, np.array([[11, 22, 13, 24], [31, 42, 33, 44]]))\n\n\ndef test_map_blocks_with_chunks():\n dx = da.ones((5, 3), chunks=(2, 2))\n dy = da.ones((5, 3), chunks=(2, 2))\n dz = da.map_blocks(np.add, dx, dy, chunks=dx.chunks)\n assert_eq(dz, np.ones((5, 3)) * 2)\n\n\ndef test_map_blocks_dtype_inference():\n x = np.arange(50).reshape((5, 10))\n y = np.arange(10)\n dx = da.from_array(x, chunks=5)\n dy = da.from_array(y, chunks=5)\n\n def foo(x, *args, **kwargs):\n cast = kwargs.pop(\"cast\", \"i8\")\n return (x + sum(args)).astype(cast)\n\n assert_eq(dx.map_blocks(foo, dy, 1), foo(dx, dy, 1))\n assert_eq(dx.map_blocks(foo, dy, 1, cast=\"f8\"), foo(dx, dy, 1, cast=\"f8\"))\n assert_eq(\n dx.map_blocks(foo, dy, 1, cast=\"f8\", dtype=\"f8\"),\n foo(dx, dy, 1, cast=\"f8\", dtype=\"f8\"),\n )\n\n def foo(x):\n raise RuntimeError(\"Woops\")\n\n with pytest.raises(ValueError) as e:\n dx.map_blocks(foo)\n msg = str(e.value)\n assert \"dtype\" in msg\n\n\ndef test_map_blocks_infer_newaxis():\n x = da.ones((5, 3), chunks=(2, 2))\n y = da.map_blocks(lambda x: x[None], x, chunks=((1,), (2, 2, 1), (2, 1)))\n assert_eq(y, da.ones((1, 5, 3)))\n\n\ndef test_map_blocks_no_array_args():\n def func(dtype, block_info=None):\n loc = block_info[None][\"array-location\"]\n return np.arange(loc[0][0], loc[0][1], dtype=dtype)\n\n x = da.map_blocks(func, np.float32, chunks=((5, 3),), dtype=np.float32)\n assert x.chunks == ((5, 3),)\n assert_eq(x, np.arange(8, dtype=np.float32))\n\n\ndef test_map_blocks_unique_name_chunks_dtype():\n def func(block_info=None):\n loc = block_info[None][\"array-location\"]\n dtype = block_info[None][\"dtype\"]\n return np.arange(loc[0][0], loc[0][1], dtype=dtype)\n\n x = da.map_blocks(func, chunks=((5, 3),), dtype=np.float32)\n assert x.chunks == ((5, 3),)\n assert_eq(x, np.arange(8, dtype=np.float32))\n\n y = da.map_blocks(func, chunks=((2, 2, 1, 3),), dtype=np.float32)\n assert y.chunks == ((2, 2, 1, 3),)\n assert_eq(y, np.arange(8, dtype=np.float32))\n assert x.name != y.name\n\n z = da.map_blocks(func, chunks=((5, 3),), dtype=np.float64)\n assert z.chunks == ((5, 3),)\n assert_eq(z, np.arange(8, dtype=np.float64))\n assert x.name != z.name\n assert y.name != z.name\n\n\ndef test_map_blocks_unique_name_drop_axis():\n def func(some_3d, block_info=None):\n if not block_info:\n return some_3d\n dtype = block_info[None][\"dtype\"]\n return np.zeros(block_info[None][\"shape\"], dtype=dtype)\n\n input_arr = da.zeros((3, 4, 5), chunks=((3,), (4,), (5,)), dtype=np.float32)\n x = da.map_blocks(func, input_arr, drop_axis=[0], dtype=np.float32)\n assert x.chunks == ((4,), (5,))\n assert_eq(x, np.zeros((4, 5), dtype=np.float32))\n\n y = da.map_blocks(func, input_arr, drop_axis=[2], dtype=np.float32)\n assert y.chunks == ((3,), (4,))\n assert_eq(y, np.zeros((3, 4), dtype=np.float32))\n assert x.name != y.name\n\n\ndef test_map_blocks_unique_name_new_axis():\n def func(some_2d, block_info=None):\n if not block_info:\n return some_2d\n dtype = block_info[None][\"dtype\"]\n return np.zeros(block_info[None][\"shape\"], dtype=dtype)\n\n input_arr = da.zeros((3, 4), chunks=((3,), (4,)), dtype=np.float32)\n x = da.map_blocks(func, input_arr, new_axis=[0], dtype=np.float32)\n assert x.chunks == ((1,), (3,), (4,))\n assert_eq(x, np.zeros((1, 3, 4), dtype=np.float32))\n\n y = da.map_blocks(func, input_arr, new_axis=[2], dtype=np.float32)\n assert y.chunks == ((3,), (4,), (1,))\n assert_eq(y, np.zeros((3, 4, 1), dtype=np.float32))\n assert x.name != y.name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_optimize_blockwise_test_store.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_optimize_blockwise_test_store.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1664, "end_line": 1873, "span_ids": ["test_store_delayed_target", "test_repr_meta", "test_repr_html_array_highlevelgraph", "test_map_blocks_optimize_blockwise", "test_dtype", "test_blockdims_from_blockshape", "test_store_kwargs", "test_repr", "test_slicing_with_ellipsis", "test_slicing_flexible_type", "test_slicing_with_object_dtype", "test_bool", "test_slicing_with_ndarray", "test_store", "test_coerce"], "tokens": 1886}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [lambda x, y: x + y, lambda x, y, block_info: x + y])\ndef test_map_blocks_optimize_blockwise(func):\n # Check that map_blocks layers can merge with elementwise layers\n base = [da.full((1,), i, chunks=1) for i in range(4)]\n a = base[0] + base[1]\n b = da.map_blocks(func, a, base[2], dtype=np.int8)\n c = b + base[3]\n dsk = c.__dask_graph__()\n optimized = optimize_blockwise(dsk)\n\n # Everything should be fused into a single layer.\n # If the lambda includes block_info, there will be two layers.\n assert len(optimized.layers) == len(dsk.layers) - 6\n\n\ndef test_repr():\n d = da.ones((4, 4), chunks=(2, 2))\n assert key_split(d.name) in repr(d)\n assert str(d.shape) in repr(d)\n assert str(d.dtype) in repr(d)\n d = da.ones((4000, 4), chunks=(4, 2))\n assert len(str(d)) < 1000\n\n\ndef test_repr_meta():\n d = da.ones((4, 4), chunks=(2, 2))\n assert \"chunktype=numpy.ndarray\" in repr(d)\n\n # Test non-numpy meta\n sparse = pytest.importorskip(\"sparse\")\n s = d.map_blocks(sparse.COO)\n assert \"chunktype=sparse.COO\" in repr(s)\n\n\ndef test_repr_html_array_highlevelgraph():\n pytest.importorskip(\"jinja2\")\n x = da.ones((9, 9), chunks=(3, 3)).T[0:4, 0:4]\n hg = x.dask\n assert xml.etree.ElementTree.fromstring(hg._repr_html_()) is not None\n for layer in hg.layers.values():\n assert xml.etree.ElementTree.fromstring(layer._repr_html_()) is not None\n\n\ndef test_slicing_with_ellipsis():\n x = np.arange(256).reshape((4, 4, 4, 4))\n d = da.from_array(x, chunks=((2, 2, 2, 2)))\n\n assert_eq(d[..., 1], x[..., 1])\n assert_eq(d[0, ..., 1], x[0, ..., 1])\n\n\ndef test_slicing_with_ndarray():\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=((4, 4)))\n\n assert_eq(d[np.arange(8)], x)\n assert_eq(d[np.ones(8, dtype=bool)], x)\n assert_eq(d[np.array([1])], x[[1]])\n assert_eq(d[np.array([True, False, True] + [False] * 5)], x[[0, 2]])\n\n\ndef test_slicing_flexible_type():\n a = np.array([[\"a\", \"b\"], [\"c\", \"d\"]])\n b = da.from_array(a, 2)\n\n assert_eq(a[:, 0], b[:, 0])\n\n\ndef test_slicing_with_object_dtype():\n # https://github.com/dask/dask/issues/6892\n d = da.from_array(np.array([\"a\", \"b\"], dtype=object), chunks=(1,))\n assert d.dtype == d[(0,)].dtype\n\n\ndef test_dtype():\n d = da.ones((4, 4), chunks=(2, 2))\n\n assert d.dtype == d.compute().dtype\n assert (d * 1.0).dtype == (d + 1.0).compute().dtype\n assert d.sum().dtype == d.sum().compute().dtype # no shape\n\n\ndef test_blockdims_from_blockshape():\n assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))\n pytest.raises(TypeError, lambda: blockdims_from_blockshape((10,), None))\n assert blockdims_from_blockshape((1e2, 3), [1e1, 3]) == ((10,) * 10, (3,))\n assert blockdims_from_blockshape((np.int8(10),), (5,)) == ((5, 5),)\n\n\ndef test_coerce():\n d0 = da.from_array(np.array(1), chunks=(1,))\n d1 = da.from_array(np.array([1]), chunks=(1,))\n with dask.config.set(scheduler=\"sync\"):\n for d in d0, d1:\n assert bool(d) is True\n assert int(d) == 1\n assert float(d) == 1.0\n assert complex(d) == complex(1)\n\n a2 = np.arange(2)\n d2 = da.from_array(a2, chunks=(2,))\n for func in (int, float, complex):\n pytest.raises(TypeError, lambda: func(d2))\n\n\ndef test_bool():\n arr = np.arange(100).reshape((10, 10))\n darr = da.from_array(arr, chunks=(10, 10))\n with pytest.raises(ValueError):\n bool(darr)\n bool(darr == darr)\n\n\ndef test_store_kwargs():\n d = da.ones((10, 10), chunks=(2, 2))\n a = d + 1\n\n called = [False]\n\n def get_func(*args, **kwargs):\n assert kwargs.pop(\"foo\") == \"test kwarg\"\n r = dask.get(*args, **kwargs)\n called[0] = True\n return r\n\n called[0] = False\n at = np.zeros(shape=(10, 10))\n store([a], [at], scheduler=get_func, foo=\"test kwarg\")\n assert called[0]\n\n called[0] = False\n at = np.zeros(shape=(10, 10))\n a.store(at, scheduler=get_func, foo=\"test kwarg\")\n assert called[0]\n\n called[0] = False\n at = np.zeros(shape=(10, 10))\n store([a], [at], scheduler=get_func, return_stored=True, foo=\"test kwarg\")\n assert called[0]\n\n\ndef test_store_delayed_target():\n from dask.delayed import delayed\n\n d = da.ones((4, 4), chunks=(2, 2))\n a, b = d + 1, d + 2\n\n # empty buffers to be used as targets\n targs = {}\n\n def make_target(key):\n a = np.empty((4, 4))\n targs[key] = a\n return a\n\n # delayed calls to these targets\n atd = delayed(make_target)(\"at\")\n btd = delayed(make_target)(\"bt\")\n\n # test not keeping result\n st = store([a, b], [atd, btd])\n\n at = targs[\"at\"]\n bt = targs[\"bt\"]\n\n assert st is None\n assert_eq(at, a)\n assert_eq(bt, b)\n\n # test keeping result\n for st_compute in [False, True]:\n targs.clear()\n\n st = store([a, b], [atd, btd], return_stored=True, compute=st_compute)\n if st_compute:\n assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in st)\n\n st = dask.compute(*st)\n\n at = targs[\"at\"]\n bt = targs[\"bt\"]\n\n assert st is not None\n assert isinstance(st, tuple)\n assert all([isinstance(v, np.ndarray) for v in st])\n assert_eq(at, a)\n assert_eq(bt, b)\n assert_eq(st[0], a)\n assert_eq(st[1], b)\n\n pytest.raises(ValueError, lambda: store([a], [at, bt]))\n pytest.raises(ValueError, lambda: store(at, at))\n pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))\n\n\ndef test_store():\n d = da.ones((4, 4), chunks=(2, 2))\n a, b = d + 1, d + 2\n\n at = np.empty(shape=(4, 4))\n bt = np.empty(shape=(4, 4))\n\n st = store([a, b], [at, bt])\n assert st is None\n assert (at == 2).all()\n assert (bt == 3).all()\n\n pytest.raises(ValueError, lambda: store([a], [at, bt]))\n pytest.raises(ValueError, lambda: store(at, at))\n pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_regions_CounterLock.release.return.self_lock_release_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_regions_CounterLock.release.return.self_lock_release_args_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1876, "end_line": 2045, "span_ids": ["test_store_regions", "NonthreadSafeStore.__init__", "CounterLock.acquire", "test_store_compute_false", "NonthreadSafeStore.__setitem__", "ThreadSafeStore.__setitem__", "NonthreadSafeStore", "test_store_nocompute_regions", "ThreadSafeStore.__init__", "CounterLock", "CounterLock.release", "ThreadSafetyError", "CounterLock.__init__", "ThreadSafeStore"], "tokens": 1702}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_regions():\n d = da.ones((4, 4, 4), dtype=int, chunks=(2, 2, 2))\n a, b = d + 1, d + 2\n a = a[:, 1:, :].astype(float)\n\n region = (slice(None, None, 2), slice(None), [1, 2, 4, 5])\n\n # Single region:\n at = np.zeros(shape=(8, 3, 6))\n bt = np.zeros(shape=(8, 4, 6))\n v = store([a, b], [at, bt], regions=region, compute=False)\n assert isinstance(v, Delayed)\n assert (at == 0).all() and (bt[region] == 0).all()\n assert all([ev is None for ev in v.compute()])\n assert (at[region] == 2).all() and (bt[region] == 3).all()\n assert not (bt == 3).all() and not (bt == 0).all()\n assert not (at == 2).all() and not (at == 0).all()\n\n # Multiple regions:\n at = np.zeros(shape=(8, 3, 6))\n bt = np.zeros(shape=(8, 4, 6))\n v = store([a, b], [at, bt], regions=[region, region], compute=False)\n assert isinstance(v, Delayed)\n assert (at == 0).all() and (bt[region] == 0).all()\n assert all([ev is None for ev in v.compute()])\n assert (at[region] == 2).all() and (bt[region] == 3).all()\n assert not (bt == 3).all() and not (bt == 0).all()\n assert not (at == 2).all() and not (at == 0).all()\n\n # Single region (keep result):\n for st_compute in [False, True]:\n at = np.zeros(shape=(8, 3, 6))\n bt = np.zeros(shape=(8, 4, 6))\n v = store(\n [a, b], [at, bt], regions=region, compute=st_compute, return_stored=True\n )\n assert isinstance(v, tuple)\n assert all([isinstance(e, da.Array) for e in v])\n if st_compute:\n assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in v)\n else:\n assert (at == 0).all() and (bt[region] == 0).all()\n\n ar, br = v\n assert ar.dtype == a.dtype\n assert br.dtype == b.dtype\n assert ar.shape == a.shape\n assert br.shape == b.shape\n assert ar.chunks == a.chunks\n assert br.chunks == b.chunks\n\n ar, br = da.compute(ar, br)\n assert (at[region] == 2).all() and (bt[region] == 3).all()\n assert not (bt == 3).all() and not (bt == 0).all()\n assert not (at == 2).all() and not (at == 0).all()\n assert (br == 3).all()\n assert (ar == 2).all()\n\n # Multiple regions (keep result):\n for st_compute in [False, True]:\n at = np.zeros(shape=(8, 3, 6))\n bt = np.zeros(shape=(8, 4, 6))\n v = store(\n [a, b],\n [at, bt],\n regions=[region, region],\n compute=st_compute,\n return_stored=True,\n )\n assert isinstance(v, tuple)\n assert all([isinstance(e, da.Array) for e in v])\n if st_compute:\n assert all(not any(dask.core.get_deps(e.dask)[0].values()) for e in v)\n else:\n assert (at == 0).all() and (bt[region] == 0).all()\n\n ar, br = v\n assert ar.dtype == a.dtype\n assert br.dtype == b.dtype\n assert ar.shape == a.shape\n assert br.shape == b.shape\n assert ar.chunks == a.chunks\n assert br.chunks == b.chunks\n\n ar, br = da.compute(ar, br)\n assert (at[region] == 2).all() and (bt[region] == 3).all()\n assert not (bt == 3).all() and not (bt == 0).all()\n assert not (at == 2).all() and not (at == 0).all()\n assert (br == 3).all()\n assert (ar == 2).all()\n\n\ndef test_store_compute_false():\n d = da.ones((4, 4), chunks=(2, 2))\n a, b = d + 1, d + 2\n\n at = np.zeros(shape=(4, 4))\n bt = np.zeros(shape=(4, 4))\n\n v = store([a, b], [at, bt], compute=False)\n assert isinstance(v, Delayed)\n\n # You need a well-formed HighLevelgraph for e.g. dask.graph_manipulation.bind\n for layer in v.__dask_layers__():\n assert layer in v.dask.layers\n\n assert (at == 0).all() and (bt == 0).all()\n assert all([ev is None for ev in v.compute()])\n assert (at == 2).all() and (bt == 3).all()\n\n at = np.zeros(shape=(4, 4))\n bt = np.zeros(shape=(4, 4))\n\n dat, dbt = store([a, b], [at, bt], compute=False, return_stored=True)\n assert isinstance(dat, Array) and isinstance(dbt, Array)\n assert (at == 0).all() and (bt == 0).all()\n assert (dat.compute() == at).all() and (dbt.compute() == bt).all()\n assert (at == 2).all() and (bt == 3).all()\n\n\ndef test_store_nocompute_regions():\n x = da.ones(10, chunks=1)\n y = np.zeros((2, 10))\n d1 = da.store(x, y, regions=(0,), compute=False)\n d2 = da.store(x, y, regions=(1,), compute=False)\n assert d1.key != d2.key\n\n\nclass ThreadSafetyError(Exception):\n pass\n\n\nclass NonthreadSafeStore:\n def __init__(self):\n self.in_use = False\n\n def __setitem__(self, key, value):\n if self.in_use:\n raise ThreadSafetyError()\n self.in_use = True\n time.sleep(0.001)\n self.in_use = False\n\n\nclass ThreadSafeStore:\n def __init__(self):\n self.concurrent_uses = 0\n self.max_concurrent_uses = 0\n\n def __setitem__(self, key, value):\n self.concurrent_uses += 1\n self.max_concurrent_uses = max(self.concurrent_uses, self.max_concurrent_uses)\n time.sleep(0.01)\n self.concurrent_uses -= 1\n\n\nclass CounterLock:\n def __init__(self, *args, **kwargs):\n self.lock = Lock(*args, **kwargs)\n\n self.acquire_count = 0\n self.release_count = 0\n\n def acquire(self, *args, **kwargs):\n self.acquire_count += 1\n return self.lock.acquire(*args, **kwargs)\n\n def release(self, *args, **kwargs):\n self.release_count += 1\n return self.lock.release(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_locks_test_astype.assert_d_astype_f8_is_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_store_locks_test_astype.assert_d_astype_f8_is_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2048, "end_line": 2264, "span_ids": ["test_store_multiprocessing_lock", "test_store_method_return", "test_store_locks", "test_astype", "test_to_dask_dataframe", "test_np_array_with_zero_dimensions", "test_store_deterministic_keys", "test_to_hdf5", "test_dtype_complex"], "tokens": 2040}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_locks():\n _Lock = type(Lock())\n d = da.ones((10, 10), chunks=(2, 2))\n a, b = d + 1, d + 2\n\n at = np.zeros(shape=(10, 10))\n bt = np.zeros(shape=(10, 10))\n\n lock = Lock()\n v = store([a, b], [at, bt], compute=False, lock=lock)\n assert isinstance(v, Delayed)\n dsk = v.dask\n locks = {vv for v in dsk.values() for vv in v if isinstance(vv, _Lock)}\n assert locks == {lock}\n\n # Ensure same lock applies over multiple stores\n at = NonthreadSafeStore()\n v = store([a, b], [at, at], lock=lock, scheduler=\"threads\", num_workers=10)\n assert v is None\n\n # Don't assume thread safety by default\n at = NonthreadSafeStore()\n assert store(a, at, scheduler=\"threads\", num_workers=10) is None\n assert a.store(at, scheduler=\"threads\", num_workers=10) is None\n\n # Ensure locks can be removed\n at = ThreadSafeStore()\n for i in range(10):\n st = a.store(at, lock=False, scheduler=\"threads\", num_workers=10)\n assert st is None\n if at.max_concurrent_uses > 1:\n break\n if i == 9:\n assert False\n\n # Verify number of lock calls\n nchunks = np.sum([np.prod([len(c) for c in e.chunks]) for e in [a, b]])\n for c in (False, True):\n at = np.zeros(shape=(10, 10))\n bt = np.zeros(shape=(10, 10))\n lock = CounterLock()\n\n v = store([a, b], [at, bt], lock=lock, compute=c, return_stored=True)\n assert all(isinstance(e, Array) for e in v)\n\n da.compute(v)\n\n # When `return_stored=True` and `compute=False`,\n # the lock should be acquired only once for store and load steps\n # as they are fused together into one step.\n assert lock.acquire_count == lock.release_count\n if c:\n assert lock.acquire_count == 2 * nchunks\n else:\n assert lock.acquire_count == nchunks\n\n\ndef test_store_method_return():\n d = da.ones((10, 10), chunks=(2, 2))\n a = d + 1\n\n for compute in [False, True]:\n for return_stored in [False, True]:\n at = np.zeros(shape=(10, 10))\n r = a.store(\n at, scheduler=\"threads\", compute=compute, return_stored=return_stored\n )\n\n if return_stored:\n assert isinstance(r, Array)\n elif compute:\n assert r is None\n else:\n assert isinstance(r, Delayed)\n\n\n@pytest.mark.xfail(reason=\"can't lock with multiprocessing\")\ndef test_store_multiprocessing_lock():\n d = da.ones((10, 10), chunks=(2, 2))\n a = d + 1\n\n at = np.zeros(shape=(10, 10))\n st = a.store(at, scheduler=\"processes\", num_workers=10)\n assert st is None\n\n\n@pytest.mark.parametrize(\"return_stored\", [False, True])\n@pytest.mark.parametrize(\"delayed_target\", [False, True])\ndef test_store_deterministic_keys(return_stored, delayed_target):\n a = da.ones((10, 10), chunks=(2, 2))\n at = np.zeros(shape=(10, 10))\n if delayed_target:\n at = delayed(at)\n st1 = a.store(at, return_stored=return_stored, compute=False)\n st2 = a.store(at, return_stored=return_stored, compute=False)\n assert st1.dask.keys() == st2.dask.keys()\n\n\ndef test_to_hdf5():\n h5py = pytest.importorskip(\"h5py\")\n x = da.ones((4, 4), chunks=(2, 2))\n y = da.ones(4, chunks=2, dtype=\"i4\")\n\n with tmpfile(\".hdf5\") as fn:\n x.to_hdf5(fn, \"/x\")\n with h5py.File(fn, mode=\"r+\") as f:\n d = f[\"/x\"]\n\n assert_eq(d[:], x)\n assert d.chunks == (2, 2)\n\n with tmpfile(\".hdf5\") as fn:\n x.to_hdf5(fn, \"/x\", chunks=None)\n with h5py.File(fn, mode=\"r+\") as f:\n d = f[\"/x\"]\n\n assert_eq(d[:], x)\n assert d.chunks is None\n\n with tmpfile(\".hdf5\") as fn:\n x.to_hdf5(fn, \"/x\", chunks=(1, 1))\n with h5py.File(fn, mode=\"r+\") as f:\n d = f[\"/x\"]\n\n assert_eq(d[:], x)\n assert d.chunks == (1, 1)\n\n with tmpfile(\".hdf5\") as fn:\n da.to_hdf5(fn, {\"/x\": x, \"/y\": y})\n\n with h5py.File(fn, mode=\"r+\") as f:\n assert_eq(f[\"/x\"][:], x)\n assert f[\"/x\"].chunks == (2, 2)\n assert_eq(f[\"/y\"][:], y)\n assert f[\"/y\"].chunks == (2,)\n\n\ndef test_to_dask_dataframe():\n dd = pytest.importorskip(\"dask.dataframe\")\n a = da.ones((4,), chunks=(2,))\n d = a.to_dask_dataframe()\n assert isinstance(d, dd.Series)\n\n a = da.ones((4, 4), chunks=(2, 2))\n d = a.to_dask_dataframe()\n assert isinstance(d, dd.DataFrame)\n\n\ndef test_np_array_with_zero_dimensions():\n d = da.ones((4, 4), chunks=(2, 2))\n assert_eq(np.array(d.sum()), np.array(d.compute().sum()))\n\n\ndef test_dtype_complex():\n x = np.arange(24).reshape((4, 6)).astype(\"f4\")\n y = np.arange(24).reshape((4, 6)).astype(\"i8\")\n z = np.arange(24).reshape((4, 6)).astype(\"i2\")\n\n a = da.from_array(x, chunks=(2, 3))\n b = da.from_array(y, chunks=(2, 3))\n c = da.from_array(z, chunks=(2, 3))\n\n def assert_eq(a, b):\n return isinstance(a, np.dtype) and isinstance(b, np.dtype) and str(a) == str(b)\n\n assert_eq(a.dtype, x.dtype)\n assert_eq(b.dtype, y.dtype)\n\n assert_eq((a + 1).dtype, (x + 1).dtype)\n assert_eq((a + b).dtype, (x + y).dtype)\n assert_eq(a.T.dtype, x.T.dtype)\n assert_eq(a[:3].dtype, x[:3].dtype)\n assert_eq((a.dot(b.T)).dtype, (x.dot(y.T)).dtype)\n\n assert_eq(stack([a, b]).dtype, np.vstack([x, y]).dtype)\n assert_eq(concatenate([a, b]).dtype, np.concatenate([x, y]).dtype)\n\n assert_eq(b.std().dtype, y.std().dtype)\n assert_eq(c.sum().dtype, z.sum().dtype)\n assert_eq(a.min().dtype, a.min().dtype)\n assert_eq(b.std().dtype, b.std().dtype)\n assert_eq(a.argmin(axis=0).dtype, a.argmin(axis=0).dtype)\n\n assert_eq(da.sin(c).dtype, np.sin(z).dtype)\n assert_eq(da.exp(b).dtype, np.exp(y).dtype)\n assert_eq(da.floor(a).dtype, np.floor(x).dtype)\n assert_eq(da.isnan(b).dtype, np.isnan(y).dtype)\n with contextlib.suppress(ImportError):\n assert da.isnull(b).dtype == \"bool\"\n assert da.notnull(b).dtype == \"bool\"\n\n x = np.array([(\"a\", 1)], dtype=[(\"text\", \"S1\"), (\"numbers\", \"i4\")])\n d = da.from_array(x, chunks=(1,))\n\n assert_eq(d[\"text\"].dtype, x[\"text\"].dtype)\n assert_eq(d[[\"numbers\", \"text\"]].dtype, x[[\"numbers\", \"text\"]].dtype)\n\n\ndef test_astype():\n x = np.ones((5, 5), dtype=\"f8\")\n d = da.from_array(x, chunks=(2, 2))\n\n assert d.astype(\"i8\").dtype == \"i8\"\n assert_eq(d.astype(\"i8\"), x.astype(\"i8\"))\n assert same_keys(d.astype(\"i8\"), d.astype(\"i8\"))\n\n with pytest.raises(TypeError):\n d.astype(\"i8\", casting=\"safe\")\n\n with pytest.raises(TypeError):\n d.astype(\"i8\", not_a_real_kwarg=\"foo\")\n\n # smoketest with kwargs\n assert_eq(d.astype(\"i8\", copy=False), x.astype(\"i8\", copy=False))\n\n # Check it's a noop\n assert d.astype(\"f8\") is d", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate3_2_test_align_chunks_to_previous_chunks.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_concatenate3_2_test_align_chunks_to_previous_chunks.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2770, "end_line": 2914, "span_ids": ["test_concatenate3_2", "test_concatenate3_nep18_dispatching", "test_normalize_chunks", "test_align_chunks_to_previous_chunks", "test_from_array_with_missing_chunks", "test_map_blocks3"], "tokens": 1883}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concatenate3_2():\n x = np.array([1, 2])\n assert_eq(concatenate3([x, x, x]), np.array([1, 2, 1, 2, 1, 2]))\n\n x = np.array([[1, 2]])\n assert (\n concatenate3([[x, x, x], [x, x, x]])\n == np.array([[1, 2, 1, 2, 1, 2], [1, 2, 1, 2, 1, 2]])\n ).all()\n\n assert (\n concatenate3([[x, x], [x, x], [x, x]])\n == np.array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]])\n ).all()\n\n x = np.arange(12).reshape((2, 2, 3))\n assert_eq(\n concatenate3([[[x, x, x], [x, x, x]], [[x, x, x], [x, x, x]]]),\n np.array(\n [\n [\n [0, 1, 2, 0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5, 3, 4, 5],\n [0, 1, 2, 0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5, 3, 4, 5],\n ],\n [\n [6, 7, 8, 6, 7, 8, 6, 7, 8],\n [9, 10, 11, 9, 10, 11, 9, 10, 11],\n [6, 7, 8, 6, 7, 8, 6, 7, 8],\n [9, 10, 11, 9, 10, 11, 9, 10, 11],\n ],\n [\n [0, 1, 2, 0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5, 3, 4, 5],\n [0, 1, 2, 0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5, 3, 4, 5],\n ],\n [\n [6, 7, 8, 6, 7, 8, 6, 7, 8],\n [9, 10, 11, 9, 10, 11, 9, 10, 11],\n [6, 7, 8, 6, 7, 8, 6, 7, 8],\n [9, 10, 11, 9, 10, 11, 9, 10, 11],\n ],\n ]\n ),\n )\n\n\n@pytest.mark.parametrize(\"one_d\", [True, False])\n@mock.patch.object(da.core, \"_concatenate2\", wraps=da.core._concatenate2)\ndef test_concatenate3_nep18_dispatching(mock_concatenate2, one_d):\n x = EncapsulateNDArray(np.arange(10))\n concat = [x, x] if one_d else [[x[None]], [x[None]]]\n result = concatenate3(concat)\n assert type(result) is type(x)\n mock_concatenate2.assert_called()\n mock_concatenate2.reset_mock()\n\n # When all the inputs are supported by plain `np.concatenate`, we should take the concatenate3\n # fastpath of allocating the full array up front and writing blocks into it.\n concat = [x.arr, x.arr] if one_d else [[x.arr[None]], [x.arr[None]]]\n plain_np_result = concatenate3(concat)\n mock_concatenate2.assert_not_called()\n assert type(plain_np_result) is np.ndarray\n\n\ndef test_map_blocks3():\n x = np.arange(10)\n y = np.arange(10) * 2\n\n d = da.from_array(x, chunks=5)\n e = da.from_array(y, chunks=5)\n\n assert_eq(\n da.core.map_blocks(lambda a, b: a + 2 * b, d, e, dtype=d.dtype), x + 2 * y\n )\n\n z = np.arange(100).reshape((10, 10))\n f = da.from_array(z, chunks=5)\n\n func = lambda a, b: a + 2 * b\n res = da.core.map_blocks(func, d, f, dtype=d.dtype)\n assert_eq(res, x + 2 * z)\n assert same_keys(da.core.map_blocks(func, d, f, dtype=d.dtype), res)\n\n assert_eq(da.map_blocks(func, f, d, dtype=d.dtype), z + 2 * x)\n\n\ndef test_from_array_with_missing_chunks():\n x = np.random.randn(2, 4, 3)\n d = da.from_array(x, chunks=(None, 2, None))\n assert d.chunks == da.from_array(x, chunks=(2, 2, 3)).chunks\n\n\ndef test_normalize_chunks():\n assert normalize_chunks(3, (4, 6)) == ((3, 1), (3, 3))\n assert normalize_chunks(((3, 3), (8,)), (6, 8)) == ((3, 3), (8,))\n assert normalize_chunks((4, 5), (9,)) == ((4, 5),)\n assert normalize_chunks((4, 5), (9, 9)) == ((4, 4, 1), (5, 4))\n assert normalize_chunks(-1, (5, 5)) == ((5,), (5,))\n assert normalize_chunks((3, -1), (5, 5)) == ((3, 2), (5,))\n assert normalize_chunks((3, None), (5, 5)) == ((3, 2), (5,))\n assert normalize_chunks({0: 3}, (5, 5)) == ((3, 2), (5,))\n assert normalize_chunks([[2, 2], [3, 3]]) == ((2, 2), (3, 3))\n assert normalize_chunks(10, (30, 5)) == ((10, 10, 10), (5,))\n assert normalize_chunks((), (0, 0)) == ((0,), (0,))\n assert normalize_chunks(-1, (0, 3)) == ((0,), (3,))\n assert normalize_chunks(\"auto\", shape=(20,), limit=5, dtype=\"uint8\") == (\n (5, 5, 5, 5),\n )\n assert normalize_chunks((\"auto\", None), (5, 5), dtype=int) == ((5,), (5,))\n\n with pytest.raises(ValueError):\n normalize_chunks(((10,),), (11,))\n with pytest.raises(ValueError):\n normalize_chunks(((5,), (5,)), (5,))\n\n\ndef test_align_chunks_to_previous_chunks():\n chunks = normalize_chunks(\n \"auto\", shape=(2000,), previous_chunks=(512,), limit=\"600 B\", dtype=np.uint8\n )\n assert chunks == ((512, 512, 512, 2000 - 512 * 3),)\n\n chunks = normalize_chunks(\n \"auto\", shape=(2000,), previous_chunks=(128,), limit=\"600 B\", dtype=np.uint8\n )\n assert chunks == ((512, 512, 512, 2000 - 512 * 3),)\n\n chunks = normalize_chunks(\n \"auto\", shape=(2000,), previous_chunks=(512,), limit=\"1200 B\", dtype=np.uint8\n )\n assert chunks == ((1024, 2000 - 1024),)\n\n chunks = normalize_chunks(\n \"auto\",\n shape=(3, 10211, 10376),\n previous_chunks=(1, 512, 512),\n limit=\"1MiB\",\n dtype=np.float32,\n )\n assert chunks[0] == (1, 1, 1)\n assert all(c % 512 == 0 for c in chunks[1][:-1])\n assert all(c % 512 == 0 for c in chunks[2][:-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_raise_on_no_chunks_test_point_slicing.assert_same_keys_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_raise_on_no_chunks_test_point_slicing.assert_same_keys_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2917, "end_line": 2978, "span_ids": ["test_ellipsis_slicing", "test_h5py_newaxis", "test_chunks_is_immutable", "test_long_slice", "test_raise_on_no_chunks", "test_point_slicing", "test_raise_on_bad_kwargs"], "tokens": 609}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_raise_on_no_chunks():\n x = da.ones(6, chunks=3)\n try:\n Array(x.dask, x.name, chunks=None, dtype=x.dtype, shape=None)\n assert False\n except ValueError as e:\n assert \"dask\" in str(e)\n assert \".org\" in str(e)\n\n\ndef test_chunks_is_immutable():\n x = da.ones(6, chunks=3)\n try:\n x.chunks = 2\n assert False\n except TypeError as e:\n assert \"rechunk(2)\" in str(e)\n\n\ndef test_raise_on_bad_kwargs():\n x = da.ones(5, chunks=3)\n try:\n da.minimum(x, foo=None)\n except TypeError as e:\n assert \"minimum\" in str(e)\n assert \"foo\" in str(e)\n\n\ndef test_long_slice():\n x = np.arange(10000)\n d = da.from_array(x, chunks=1)\n\n assert_eq(d[8000:8200], x[8000:8200])\n\n\ndef test_h5py_newaxis():\n h5py = pytest.importorskip(\"h5py\")\n\n with tmpfile(\"h5\") as fn:\n with h5py.File(fn, mode=\"a\") as f:\n x = f.create_dataset(\"/x\", shape=(10, 10), dtype=\"f8\")\n d = da.from_array(x, chunks=(5, 5))\n assert d[None, :, :].compute(scheduler=\"sync\").shape == (1, 10, 10)\n assert d[:, None, :].compute(scheduler=\"sync\").shape == (10, 1, 10)\n assert d[:, :, None].compute(scheduler=\"sync\").shape == (10, 10, 1)\n assert same_keys(d[:, :, None], d[:, :, None])\n\n\ndef test_ellipsis_slicing():\n assert_eq(da.ones(4, chunks=2)[...], np.ones(4))\n\n\ndef test_point_slicing():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(3, 4))\n\n result = d.vindex[[1, 2, 5, 5], [3, 1, 6, 1]]\n assert_eq(result, x[[1, 2, 5, 5], [3, 1, 6, 1]])\n\n result = d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]]\n assert_eq(result, x[[0, 1, 6, 0], [0, 1, 0, 7]])\n assert same_keys(result, d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_changed_dimension_test_to_delayed.assert_a_compute_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_map_blocks_with_changed_dimension_test_to_delayed.assert_a_compute_s", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3205, "end_line": 3390, "span_ids": ["test_broadcast_chunks", "test_map_blocks_with_changed_dimension", "test_to_delayed", "test_map_blocks_with_invalid_drop_axis", "test_chunks_error", "test_timedelta_op", "test_map_blocks_with_changed_dimension_and_broadcast_chunks", "test_map_blocks_with_negative_drop_axis", "test_array_compute_forward_kwargs", "test_dont_fuse_outputs", "test_dont_dealias_outputs"], "tokens": 1893}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_blocks_with_changed_dimension():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(7, 4))\n\n e = d.map_blocks(lambda b: b.sum(axis=0), chunks=(4,), drop_axis=0, dtype=d.dtype)\n assert e.chunks == ((4, 4),)\n assert_eq(e, x.sum(axis=0))\n\n # Provided chunks have wrong shape\n with pytest.raises(ValueError):\n d.map_blocks(lambda b: b.sum(axis=0), chunks=(), drop_axis=0)\n\n with pytest.raises(ValueError):\n d.map_blocks(lambda b: b.sum(axis=0), chunks=((4, 4, 4),), drop_axis=0)\n\n with pytest.raises(ValueError):\n d.map_blocks(lambda b: b.sum(axis=1), chunks=((3, 4),), drop_axis=1)\n\n d = da.from_array(x, chunks=(4, 8))\n e = d.map_blocks(lambda b: b.sum(axis=1), drop_axis=1, dtype=d.dtype)\n assert e.chunks == ((4, 3),)\n assert_eq(e, x.sum(axis=1))\n\n x = np.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4))\n e = d.map_blocks(\n lambda b: b[None, :, :, None],\n chunks=(1, 4, 4, 1),\n new_axis=[0, 3],\n dtype=d.dtype,\n )\n assert e.chunks == ((1,), (4, 4), (4, 4), (1,))\n assert_eq(e, x[None, :, :, None])\n\n e = d.map_blocks(lambda b: b[None, :, :, None], new_axis=[0, 3], dtype=d.dtype)\n assert e.chunks == ((1,), (4, 4), (4, 4), (1,))\n assert_eq(e, x[None, :, :, None])\n\n # Adding axis with a gap\n with pytest.raises(ValueError):\n d.map_blocks(lambda b: b, new_axis=(3, 4))\n\n # Both new_axis and drop_axis\n d = da.from_array(x, chunks=(8, 4))\n e = d.map_blocks(\n lambda b: b.sum(axis=0)[:, None, None],\n drop_axis=0,\n new_axis=(1, 2),\n dtype=d.dtype,\n )\n assert e.chunks == ((4, 4), (1,), (1,))\n assert_eq(e, x.sum(axis=0)[:, None, None])\n\n d = da.from_array(x, chunks=(4, 8))\n e = d.map_blocks(\n lambda b: b.sum(axis=1)[:, None, None],\n drop_axis=1,\n new_axis=(1, 2),\n dtype=d.dtype,\n )\n assert e.chunks == ((4, 4), (1,), (1,))\n assert_eq(e, x.sum(axis=1)[:, None, None])\n\n\ndef test_map_blocks_with_negative_drop_axis():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(7, 4))\n\n for drop_axis in [0, -2]:\n # test with equivalent positive and negative drop_axis\n e = d.map_blocks(\n lambda b: b.sum(axis=0), chunks=(4,), drop_axis=drop_axis, dtype=d.dtype\n )\n assert e.chunks == ((4, 4),)\n assert_eq(e, x.sum(axis=0))\n\n\ndef test_map_blocks_with_invalid_drop_axis():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(x, chunks=(7, 4))\n\n for drop_axis in [x.ndim, -x.ndim - 1]:\n with pytest.raises(ValueError):\n d.map_blocks(\n lambda b: b.sum(axis=0), chunks=(4,), drop_axis=drop_axis, dtype=d.dtype\n )\n\n\ndef test_map_blocks_with_changed_dimension_and_broadcast_chunks():\n # https://github.com/dask/dask/issues/4299\n a = da.from_array([1, 2, 3], 3)\n b = da.from_array(np.array([0, 1, 2, 0, 1, 2]), chunks=3)\n result = da.map_blocks(operator.add, a, b, chunks=b.chunks)\n expected = da.from_array(np.array([1, 3, 5, 1, 3, 5]), chunks=3)\n assert_eq(result, expected)\n\n\ndef test_broadcast_chunks():\n assert broadcast_chunks() == ()\n\n assert broadcast_chunks(((2, 3),)) == ((2, 3),)\n\n assert broadcast_chunks(((5, 5),), ((5, 5),)) == ((5, 5),)\n\n a = ((10, 10, 10), (5, 5))\n b = ((5, 5),)\n assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))\n assert broadcast_chunks(b, a) == ((10, 10, 10), (5, 5))\n\n a = ((10, 10, 10), (5, 5))\n b = ((1,), (5, 5))\n assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5))\n\n a = ((10, 10, 10), (5, 5))\n b = ((3, 3), (5, 5))\n with pytest.raises(ValueError):\n broadcast_chunks(a, b)\n\n a = ((1,), (5, 5))\n b = ((1,), (5, 5))\n assert broadcast_chunks(a, b) == a\n\n a = ((1,), (np.nan, np.nan, np.nan))\n b = ((3, 3), (1,))\n r = broadcast_chunks(a, b)\n assert r[0] == b[0] and np.allclose(r[1], a[1], equal_nan=True)\n\n a = ((3, 3), (1,))\n b = ((1,), (np.nan, np.nan, np.nan))\n r = broadcast_chunks(a, b)\n assert r[0] == a[0] and np.allclose(r[1], b[1], equal_nan=True)\n\n a = ((3, 3), (5, 5))\n b = ((1,), (np.nan, np.nan, np.nan))\n with pytest.raises(ValueError):\n broadcast_chunks(a, b)\n\n\ndef test_chunks_error():\n x = np.ones((10, 10))\n with pytest.raises(ValueError):\n da.from_array(x, chunks=(5,))\n\n\ndef test_array_compute_forward_kwargs():\n x = da.arange(10, chunks=2).sum()\n x.compute(bogus_keyword=10)\n\n\ndef test_dont_fuse_outputs():\n dsk = {(\"x\", 0): np.array([1, 2]), (\"x\", 1): (inc, (\"x\", 0))}\n\n a = da.Array(dsk, \"x\", chunks=(2,), shape=(4,), dtype=np.array([1]).dtype)\n assert_eq(a, np.array([1, 2, 2, 3], dtype=a.dtype))\n\n\ndef test_dont_dealias_outputs():\n dsk = {\n (\"x\", 0, 0): np.ones((2, 2)),\n (\"x\", 0, 1): np.ones((2, 2)),\n (\"x\", 1, 0): np.ones((2, 2)),\n (\"x\", 1, 1): (\"x\", 0, 0),\n }\n\n a = da.Array(dsk, \"x\", chunks=(2, 2), shape=(4, 4), dtype=np.ones(1).dtype)\n assert_eq(a, np.ones((4, 4)))\n\n\ndef test_timedelta_op():\n x = np.array([np.timedelta64(10, \"h\")])\n y = np.timedelta64(1, \"h\")\n a = da.from_array(x, chunks=(1,)) / y\n assert a.compute() == x / y\n\n\ndef test_to_delayed():\n x = da.random.random((4, 4), chunks=(2, 2))\n y = x + 10\n\n [[a, b], [c, d]] = y.to_delayed()\n assert_eq(a.compute(), y[:2, :2])\n\n s = 2\n x = da.from_array(np.array(s), chunks=0)\n a = x.to_delayed()[tuple()]\n assert a.compute() == s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_delayed_optimize_graph_test_concatenate_axes.None_1._too_many_axes": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_to_delayed_optimize_graph_test_concatenate_axes.None_1._too_many_axes", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3441, "end_line": 3630, "span_ids": ["test_A_property", "test_concatenate_axes", "test_from_delayed", "test_map_blocks_token_deprecated", "test_copy_mutate", "test_from_array_names", "test_elemwise_name", "test_map_blocks_name", "test_from_array_raises_on_bad_chunks", "test_to_delayed_optimize_graph", "test_array_picklable", "test_from_delayed_meta", "test_npartitions", "test_cumulative", "test_astype_gh1151"], "tokens": 2014}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_delayed_optimize_graph():\n x = da.ones((4, 4), chunks=(2, 2))\n y = x[1:][1:][1:][:, 1:][:, 1:][:, 1:]\n\n # optimizations\n d = y.to_delayed().flatten().tolist()[0]\n assert len([k for k in d.dask if k[0].startswith(\"getitem\")]) == 1\n assert d.key == (y.name, 0, 0)\n assert d.dask.layers.keys() == {\"delayed-\" + y.name}\n assert d.dask.dependencies == {\"delayed-\" + y.name: set()}\n assert d.__dask_layers__() == (\"delayed-\" + y.name,)\n\n # no optimizations\n d2 = y.to_delayed(optimize_graph=False).flatten().tolist()[0]\n assert d2.dask is y.dask\n assert d2.key == (y.name, 0, 0)\n assert d2.__dask_layers__() == y.__dask_layers__()\n\n assert (d.compute() == d2.compute()).all()\n\n\ndef test_cumulative():\n x = da.arange(20, chunks=5)\n assert_eq(x.cumsum(axis=0), np.arange(20).cumsum())\n assert_eq(x.cumprod(axis=0), np.arange(20).cumprod())\n\n assert_eq(da.nancumsum(x, axis=0), nancumsum(np.arange(20)))\n assert_eq(da.nancumprod(x, axis=0), nancumprod(np.arange(20)))\n\n a = np.random.random(20)\n rs = np.random.RandomState(0)\n a[rs.rand(*a.shape) < 0.5] = np.nan\n x = da.from_array(a, chunks=5)\n assert_eq(da.nancumsum(x, axis=0), nancumsum(a))\n assert_eq(da.nancumprod(x, axis=0), nancumprod(a))\n\n a = np.random.random((20, 24))\n x = da.from_array(a, chunks=(6, 5))\n assert_eq(x.cumsum(axis=0), a.cumsum(axis=0))\n assert_eq(x.cumsum(axis=1), a.cumsum(axis=1))\n assert_eq(x.cumprod(axis=0), a.cumprod(axis=0))\n assert_eq(x.cumprod(axis=1), a.cumprod(axis=1))\n\n assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))\n assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))\n assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))\n assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))\n\n a = np.random.random((20, 24))\n rs = np.random.RandomState(0)\n a[rs.rand(*a.shape) < 0.5] = np.nan\n x = da.from_array(a, chunks=(6, 5))\n assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))\n assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))\n assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))\n assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))\n\n a = np.random.random((20, 24, 13))\n x = da.from_array(a, chunks=(6, 5, 4))\n for axis in [0, 1, 2, -1, -2, -3]:\n assert_eq(x.cumsum(axis=axis), a.cumsum(axis=axis))\n assert_eq(x.cumprod(axis=axis), a.cumprod(axis=axis))\n\n assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))\n assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))\n\n a = np.random.random((20, 24, 13))\n rs = np.random.RandomState(0)\n a[rs.rand(*a.shape) < 0.5] = np.nan\n x = da.from_array(a, chunks=(6, 5, 4))\n for axis in [0, 1, 2, -1, -2, -3]:\n assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))\n assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))\n\n with pytest.raises(ValueError):\n x.cumsum(axis=3)\n\n with pytest.raises(ValueError):\n x.cumsum(axis=-4)\n\n\ndef test_from_delayed():\n v = delayed(np.ones)((5, 3))\n x = from_delayed(v, shape=(5, 3), dtype=np.ones(0).dtype)\n assert isinstance(x, Array)\n assert_eq(x, np.ones((5, 3)))\n\n\ndef test_from_delayed_meta():\n v = delayed(np.ones)((5, 3))\n x = from_delayed(v, shape=(5, 3), meta=np.ones(0))\n assert isinstance(x, Array)\n assert isinstance(x._meta, np.ndarray)\n\n\ndef test_A_property():\n x = da.ones(5, chunks=(2,))\n assert x.A is x\n\n\ndef test_copy_mutate():\n x = da.arange(5, chunks=(2,))\n y = x.copy()\n memo = {}\n y2 = copy.deepcopy(x, memo=memo)\n x[x % 2 == 0] = -1\n\n xx = np.arange(5)\n xx[xx % 2 == 0] = -1\n assert_eq(x, xx)\n\n assert_eq(y, np.arange(5))\n assert_eq(y2, np.arange(5))\n assert memo[id(x)] is y2\n\n\ndef test_npartitions():\n assert da.ones(5, chunks=(2,)).npartitions == 3\n assert da.ones((5, 5), chunks=(2, 3)).npartitions == 6\n\n\ndef test_astype_gh1151():\n a = np.arange(5).astype(np.int32)\n b = da.from_array(a, (1,))\n assert_eq(a.astype(np.int16), b.astype(np.int16))\n\n\ndef test_elemwise_name():\n assert (da.ones(5, chunks=2) + 1).name.startswith(\"add-\")\n\n\ndef test_map_blocks_name():\n assert da.ones(5, chunks=2).map_blocks(inc).name.startswith(\"inc-\")\n\n\ndef test_map_blocks_token_deprecated():\n with pytest.warns(FutureWarning, match=\"use `name=` instead\"):\n x = da.ones(5, chunks=2).map_blocks(inc, token=\"foo\")\n assert x.name.startswith(\"foo-\")\n\n\ndef test_from_array_names():\n pytest.importorskip(\"distributed\")\n\n x = np.ones(10)\n d = da.from_array(x, chunks=2)\n\n names = countby(key_split, d.dask)\n assert set(names.values()) == {5}\n\n\n@pytest.mark.parametrize(\n \"array\", [da.arange(100, chunks=25), da.ones((10, 10), chunks=25)]\n)\ndef test_array_picklable(array):\n from pickle import dumps, loads\n\n a2 = loads(dumps(array))\n assert_eq(array, a2)\n\n\ndef test_from_array_raises_on_bad_chunks():\n x = np.ones(10)\n\n with pytest.raises(ValueError):\n da.from_array(x, chunks=(5, 5, 5))\n\n # with pytest.raises(ValueError):\n # da.from_array(x, chunks=100)\n\n with pytest.raises(ValueError):\n da.from_array(x, chunks=((5, 5, 5),))\n\n\ndef test_concatenate_axes():\n x = np.ones((2, 2, 2))\n\n assert_eq(concatenate_axes([x, x], axes=[0]), np.ones((4, 2, 2)))\n assert_eq(concatenate_axes([x, x, x], axes=[0]), np.ones((6, 2, 2)))\n assert_eq(concatenate_axes([x, x], axes=[1]), np.ones((2, 4, 2)))\n assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 1]), np.ones((4, 4, 2)))\n assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 2]), np.ones((4, 2, 4)))\n assert_eq(concatenate_axes([[x, x, x], [x, x, x]], axes=[1, 2]), np.ones((2, 4, 6)))\n\n with pytest.raises(ValueError):\n concatenate_axes(\n [[x, x], [x, x]], axes=[0]\n ) # not all nested lists accounted for\n with pytest.raises(ValueError):\n concatenate_axes([x, x], axes=[0, 1, 2, 3]) # too many axes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_concatenate_test_index_array_with_array_1d.with_pytest_raises_ValueE.dx_dy_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blockwise_concatenate_test_index_array_with_array_1d.with_pytest_raises_ValueE.dx_dy_5_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3626, "end_line": 3821, "span_ids": ["test_no_chunks_2d", "test_raise_informative_errors_no_chunks", "test_warn_bad_rechunking", "test_common_blockdim", "test_concatenate_stack_dont_warn", "test_map_blocks_delayed", "test_no_chunks", "test_index_array_with_array_1d", "test_uneven_chunks_that_fit_neatly", "test_elemwise_uneven_chunks", "test_blockwise_concatenate", "test_uneven_chunks_blockwise", "test_no_chunks_slicing_2d", "test_no_chunks_yes_chunks"], "tokens": 2034}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_concatenate():\n x = da.ones((4, 4, 4), chunks=(2, 2, 2))\n y = da.ones((4, 4), chunks=(2, 2))\n\n def f(a, b):\n assert isinstance(a, np.ndarray)\n assert isinstance(b, np.ndarray)\n\n assert a.shape == (2, 4, 4)\n assert b.shape == (4, 4)\n\n return (a + b).sum(axis=(1, 2))\n\n z = da.blockwise(f, \"i\", x, \"ijk\", y, \"jk\", concatenate=True, dtype=x.dtype)\n assert_eq(z, np.ones(4) * 32)\n\n z = da.blockwise(add, \"ij\", y, \"ij\", y, \"ij\", concatenate=True, dtype=x.dtype)\n assert_eq(z, np.ones((4, 4)) * 2)\n\n def f(a, b, c):\n assert isinstance(a, np.ndarray)\n assert isinstance(b, np.ndarray)\n assert isinstance(c, np.ndarray)\n\n assert a.shape == (4, 2, 4)\n assert b.shape == (4, 4)\n assert c.shape == (4, 2)\n\n return np.ones(2)\n\n z = da.blockwise(\n f, \"j\", x, \"ijk\", y, \"ki\", y, \"ij\", concatenate=True, dtype=x.dtype\n )\n assert_eq(z, np.ones(4), check_shape=False)\n\n\ndef test_common_blockdim():\n assert common_blockdim([(5,), (5,)]) == (5,)\n assert common_blockdim([(5,), (2, 3)]) == (2, 3)\n assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)\n assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)\n assert common_blockdim([(5, 2, 3), (2, 3, 5)]) == (2, 3, 2, 3)\n\n assert common_blockdim([(1, 2), (2, 1)]) == (1, 1, 1)\n assert common_blockdim([(1, 2, 2), (2, 1, 2), (2, 2, 1)]) == (1, 1, 1, 1, 1)\n\n\ndef test_uneven_chunks_that_fit_neatly():\n x = da.arange(10, chunks=((5, 5),))\n y = da.ones(10, chunks=((5, 2, 3),))\n\n assert_eq(x + y, np.arange(10) + np.ones(10))\n\n z = x + y\n assert z.chunks == ((5, 2, 3),)\n\n\ndef test_elemwise_uneven_chunks():\n x = da.arange(10, chunks=((4, 6),))\n y = da.ones(10, chunks=((6, 4),))\n\n assert_eq(x + y, np.arange(10) + np.ones(10))\n\n z = x + y\n assert z.chunks == ((4, 2, 4),)\n\n x = da.random.random((10, 10), chunks=((4, 6), (5, 2, 3)))\n y = da.random.random((4, 10, 10), chunks=((2, 2), (6, 4), (2, 3, 5)))\n\n z = x + y\n assert_eq(x + y, x.compute() + y.compute())\n assert z.chunks == ((2, 2), (4, 2, 4), (2, 3, 2, 3))\n\n\ndef test_uneven_chunks_blockwise():\n x = da.random.random((10, 10), chunks=((2, 3, 2, 3), (5, 5)))\n y = da.random.random((10, 10), chunks=((4, 4, 2), (4, 2, 4)))\n z = da.blockwise(np.dot, \"ik\", x, \"ij\", y, \"jk\", dtype=x.dtype, concatenate=True)\n assert z.chunks == (x.chunks[0], y.chunks[1])\n\n assert_eq(z, x.compute().dot(y))\n\n\ndef test_warn_bad_rechunking():\n x = da.ones((20, 20), chunks=(20, 1))\n y = da.ones((20, 20), chunks=(1, 20))\n\n with pytest.warns(da.core.PerformanceWarning, match=\"factor of 20\"):\n x + y\n\n\ndef test_concatenate_stack_dont_warn():\n with warnings.catch_warnings(record=True) as record:\n da.concatenate([da.ones(2, chunks=1)] * 62)\n assert not record\n\n with warnings.catch_warnings(record=True) as record:\n da.stack([da.ones(2, chunks=1)] * 62)\n assert not record\n\n\ndef test_map_blocks_delayed():\n x = da.ones((10, 10), chunks=(5, 5))\n y = np.ones((5, 5))\n\n z = x.map_blocks(add, y, dtype=x.dtype)\n\n yy = delayed(y)\n zz = x.map_blocks(add, yy, dtype=x.dtype)\n\n assert_eq(z, zz)\n\n assert yy.key in zz.dask\n\n\ndef test_no_chunks():\n X = np.arange(11)\n dsk = {(\"x\", 0): np.arange(5), (\"x\", 1): np.arange(5, 11)}\n x = Array(dsk, \"x\", ((np.nan, np.nan),), np.arange(1).dtype)\n assert_eq(x + 1, X + 1)\n assert_eq(x.sum(), X.sum())\n assert_eq((x + 1).std(), (X + 1).std())\n assert_eq((x + x).std(), (X + X).std())\n assert_eq((x + x).std(keepdims=True), (X + X).std(keepdims=True))\n\n\ndef test_no_chunks_2d():\n X = np.arange(24).reshape((4, 6))\n x = da.from_array(X, chunks=(2, 2))\n x._chunks = ((np.nan, np.nan), (np.nan, np.nan, np.nan))\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning) # divide by zero\n assert_eq(da.log(x), np.log(X))\n assert_eq(x.T, X.T)\n assert_eq(x.sum(axis=0, keepdims=True), X.sum(axis=0, keepdims=True))\n assert_eq(x.sum(axis=1, keepdims=True), X.sum(axis=1, keepdims=True))\n assert_eq(x.dot(x.T + 1), X.dot(X.T + 1))\n\n\ndef test_no_chunks_yes_chunks():\n X = np.arange(24).reshape((4, 6))\n x = da.from_array(X, chunks=(2, 2))\n x._chunks = ((2, 2), (np.nan, np.nan, np.nan))\n\n assert (x + 1).chunks == ((2, 2), (np.nan, np.nan, np.nan))\n assert (x.T).chunks == ((np.nan, np.nan, np.nan), (2, 2))\n assert (x.dot(x.T)).chunks == ((2, 2), (2, 2))\n\n\ndef test_raise_informative_errors_no_chunks():\n X = np.arange(10)\n a = da.from_array(X, chunks=(5, 5))\n a._chunks = ((np.nan, np.nan),)\n\n b = da.from_array(X, chunks=(4, 4, 2))\n b._chunks = ((np.nan, np.nan, np.nan),)\n\n for op in [\n lambda: a + b,\n lambda: a[1],\n lambda: a[::2],\n lambda: a[-5],\n lambda: a.rechunk(3),\n lambda: a.reshape(2, 5),\n ]:\n with pytest.raises(ValueError) as e:\n op()\n if \"chunk\" not in str(e.value) or \"unknown\" not in str(e.value):\n op()\n\n\ndef test_no_chunks_slicing_2d():\n X = np.arange(24).reshape((4, 6))\n x = da.from_array(X, chunks=(2, 2))\n x._chunks = ((2, 2), (np.nan, np.nan, np.nan))\n\n assert_eq(x[0], X[0])\n\n for op in [lambda: x[:, 4], lambda: x[:, ::2], lambda: x[0, 2:4]]:\n with pytest.raises(ValueError, match=\"chunk sizes are unknown\"):\n op()\n\n\ndef test_index_array_with_array_1d():\n x = np.arange(10)\n dx = da.from_array(x, chunks=(5,))\n dx._chunks = ((np.nan, np.nan),)\n\n assert_eq(x[x > 6], dx[dx > 6])\n assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])\n\n dy = da.ones(11, chunks=(3,))\n\n with pytest.raises(ValueError):\n dx[dy > 5]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_2d_test_setitem_extended_API_2d.assert_eq_x_dx_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_index_array_with_array_2d_test_setitem_extended_API_2d.assert_eq_x_dx_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3830, "end_line": 3978, "span_ids": ["test_index_array_with_array_3d_2d", "test_setitem_extended_API_1d", "test_index_array_with_array_2d", "test_setitem_extended_API_0d", "test_setitem_extended_API_2d", "test_setitem_1d", "test_setitem_2d"], "tokens": 1442}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_array_with_array_2d():\n x = np.arange(24).reshape((4, 6))\n dx = da.from_array(x, chunks=(2, 2))\n\n assert_eq(x[x > 6], dx[dx > 6])\n assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])\n\n # Test with unknown chunks\n dx._chunks = ((2, 2), (np.nan, np.nan, np.nan))\n\n with pytest.warns(UserWarning, match=\"different ordering\") as record:\n assert sorted(x[x % 2 == 0].tolist()) == sorted(\n dx[dx % 2 == 0].compute().tolist()\n )\n assert sorted(x[x > 6].tolist()) == sorted(dx[dx > 6].compute().tolist())\n\n assert len(record) == 2\n\n\n@pytest.mark.xfail(reason=\"Chunking does not align well\")\ndef test_index_array_with_array_3d_2d():\n x = np.arange(4**3).reshape((4, 4, 4))\n dx = da.from_array(x, chunks=(2, 2, 2))\n\n ind = np.random.random((4, 4)) > 0.5\n ind = np.arange(4**2).reshape((4, 4)) % 2 == 0\n dind = da.from_array(ind, (2, 2))\n\n assert_eq(x[ind], dx[dind])\n assert_eq(x[:, ind], dx[:, dind])\n\n\ndef test_setitem_1d():\n x = np.arange(10)\n dx = da.from_array(x.copy(), chunks=(5,))\n\n x[x > 6] = -1\n x[x % 2 == 0] = -2\n\n dx[dx > 6] = -1\n dx[dx % 2 == 0] = -2\n\n assert_eq(x, dx)\n\n\ndef test_setitem_2d():\n x = np.arange(24).reshape((4, 6))\n dx = da.from_array(x.copy(), chunks=(2, 2))\n\n x[x > 6] = -1\n x[x % 2 == 0] = -2\n\n dx[dx > 6] = -1\n dx[dx % 2 == 0] = -2\n\n assert_eq(x, dx)\n\n\ndef test_setitem_extended_API_0d():\n # 0-d array\n x = np.array(9)\n dx = da.from_array(9)\n\n x[()] = -1\n dx[()] = -1\n assert_eq(x, dx.compute())\n\n x[...] = -11\n dx[...] = -11\n assert_eq(x, dx.compute())\n\n\n@pytest.mark.parametrize(\n \"index, value\",\n [\n [Ellipsis, -1],\n [slice(2, 8, 2), -2],\n [slice(8, None, 2), -3],\n [slice(8, None, 2), [-30]],\n [slice(1, None, -2), -4],\n [slice(1, None, -2), [-40]],\n [slice(3, None, 2), -5],\n [slice(-3, None, -2), -6],\n [slice(1, None, -2), -4],\n [slice(3, None, 2), -5],\n [slice(3, None, 2), [10, 11, 12, 13]],\n [slice(-4, None, -2), [14, 15, 16, 17]],\n ],\n)\ndef test_setitem_extended_API_1d(index, value):\n # 1-d array\n x = np.arange(10)\n dx = da.from_array(x, chunks=(4, 6))\n dx[index] = value\n x[index] = value\n assert_eq(x, dx.compute())\n\n\n@pytest.mark.parametrize(\n \"index, value\",\n [\n [Ellipsis, -1],\n [(slice(None, None, 2), slice(None, None, -1)), -1],\n [slice(1, None, 2), -1],\n [[4, 3, 1], -1],\n [(Ellipsis, 4), -1],\n [5, -1],\n [(slice(None), 2), range(6)],\n [3, range(10)],\n [(slice(None), [3, 5, 6]), [-30, -31, -32]],\n [([-1, 0, 1], 2), [-30, -31, -32]],\n [(slice(None, 2), slice(None, 3)), [-50, -51, -52]],\n [(slice(None), [6, 1, 3]), [-60, -61, -62]],\n [(slice(1, 3), slice(1, 4)), [[-70, -71, -72]]],\n [(slice(None), [9, 8, 8]), [-80, -81, 91]],\n [([True, False, False, False, True, False], 2), -1],\n [(3, [True, True, False, True, True, False, True, False, True, True]), -1],\n [(np.array([False, False, True, True, False, False]), slice(5, 7)), -1],\n [\n (\n 4,\n da.from_array(\n [False, False, True, True, False, False, True, False, False, True]\n ),\n ),\n -1,\n ],\n [\n (\n slice(2, 4),\n da.from_array(\n [False, False, True, True, False, False, True, False, False, True]\n ),\n ),\n [[-100, -101, -102, -103], [-200, -201, -202, -203]],\n ],\n [slice(5, None, 2), -99],\n [slice(5, None, 2), range(1, 11)],\n [slice(1, None, -2), -98],\n [slice(1, None, -2), range(11, 21)],\n ],\n)\ndef test_setitem_extended_API_2d(index, value):\n # 2-d array\n x = np.ma.arange(60).reshape((6, 10))\n dx = da.from_array(x, chunks=(2, 3))\n dx[index] = value\n x[index] = value\n assert_eq(x, dx.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_setitem_extended_API_2d_rhs_func_of_lhs_test_setitem_errs.None_22.dx_0_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_setitem_extended_API_2d_rhs_func_of_lhs_test_setitem_errs.None_22.dx_0_0", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 3921, "end_line": 4152, "span_ids": ["test_setitem_errs", "test_setitem_extended_API_2d_rhs_func_of_lhs", "test_setitem_on_read_only_blocks", "test_setitem_extended_API_2d_mask"], "tokens": 2182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setitem_extended_API_2d_rhs_func_of_lhs():\n # Cases:\n # * RHS and/or indices are a function of the LHS\n # * Indices have unknown chunk sizes\n # * RHS has extra leading size 1 dimensions compared to LHS\n x = np.arange(60).reshape((6, 10))\n chunks = (2, 3)\n\n dx = da.from_array(x, chunks=chunks)\n dx[2:4, dx[0] > 3] = -5\n x[2:4, x[0] > 3] = -5\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[2, dx[0] < -2] = -7\n x[2, x[0] < -2] = -7\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[dx % 2 == 0] = -8\n x[x % 2 == 0] = -8\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[dx % 2 == 0] = -8\n x[x % 2 == 0] = -8\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[3:5, 5:1:-2] = -dx[:2, 4:1:-2]\n x[3:5, 5:1:-2] = -x[:2, 4:1:-2]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[0, 1:3] = -dx[0, 4:2:-1]\n x[0, 1:3] = -x[0, 4:2:-1]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[...] = dx\n x[...] = x\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[...] = dx[...]\n x[...] = x[...]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[0] = dx[-1]\n x[0] = x[-1]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[0, :] = dx[-2, :]\n x[0, :] = x[-2, :]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[:, 1] = dx[:, -3]\n x[:, 1] = x[:, -3]\n assert_eq(x, dx.compute())\n\n index = da.from_array([0, 2], chunks=(2,))\n dx = da.from_array(x, chunks=chunks)\n dx[index, 8] = [99, 88]\n x[[0, 2], 8] = [99, 88]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[:, index] = dx[:, :2]\n x[:, [0, 2]] = x[:, :2]\n assert_eq(x, dx.compute())\n\n index = da.where(da.arange(3, chunks=(1,)) < 2)[0]\n dx = da.from_array(x, chunks=chunks)\n dx[index, 7] = [-23, -33]\n x[index.compute(), 7] = [-23, -33]\n assert_eq(x, dx.compute())\n\n index = da.where(da.arange(3, chunks=(1,)) < 2)[0]\n dx = da.from_array(x, chunks=chunks)\n dx[(index,)] = -34\n x[(index.compute(),)] = -34\n assert_eq(x, dx.compute())\n\n index = index - 4\n dx = da.from_array(x, chunks=chunks)\n dx[index, 7] = [-43, -53]\n x[index.compute(), 7] = [-43, -53]\n assert_eq(x, dx.compute())\n\n index = da.from_array([0, -1], chunks=(1,))\n x[[0, -1]] = 9999\n dx[(index,)] = 9999\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=(-1, -1))\n dx[...] = da.from_array(x, chunks=chunks)\n assert_eq(x, dx.compute())\n\n # RHS has extra leading size 1 dimensions compared to LHS\n dx = da.from_array(x.copy(), chunks=(2, 3))\n v = x.reshape((1, 1) + x.shape)\n x[...] = v\n dx[...] = v\n assert_eq(x, dx.compute())\n\n index = da.where(da.arange(3, chunks=(1,)) < 2)[0]\n v = -np.arange(12).reshape(1, 1, 6, 2)\n x[:, [0, 1]] = v\n dx[:, index] = v\n assert_eq(x, dx.compute())\n\n\n@pytest.mark.parametrize(\n \"index, value\",\n [\n [(1, slice(1, 7, 2)), np.ma.masked],\n [(slice(1, 5, 2), [7, 5]), np.ma.masked_all((2, 2))],\n ],\n)\ndef test_setitem_extended_API_2d_mask(index, value):\n x = np.ma.arange(60).reshape((6, 10))\n dx = da.from_array(x.data, chunks=(2, 3))\n dx[index] = value\n x[index] = value\n dx = dx.persist()\n assert_eq(x, dx.compute())\n assert_eq(x.mask, da.ma.getmaskarray(dx).compute())\n\n\ndef test_setitem_on_read_only_blocks():\n # Outputs of broadcast_trick-style functions contain read-only\n # arrays\n dx = da.empty((4, 6), dtype=float, chunks=(2, 2))\n dx[0] = 99\n\n assert_eq(dx[0, 0], 99.0)\n\n dx[0:2] = 88\n\n assert_eq(dx[0, 0], 88.0)\n\n\ndef test_setitem_errs():\n x = da.ones((4, 4), chunks=(2, 2))\n\n with pytest.raises(ValueError):\n x[x > 1] = x\n\n # Shape mismatch\n with pytest.raises(ValueError):\n x[[True, True, False, False], 0] = [2, 3, 4]\n\n with pytest.raises(ValueError):\n x[[True, True, True, False], 0] = [2, 3]\n\n with pytest.raises(ValueError):\n x[0, [True, True, True, False]] = [2, 3]\n\n with pytest.raises(ValueError):\n x[0, [True, True, True, False]] = [1, 2, 3, 4, 5]\n\n with pytest.raises(ValueError):\n x[da.from_array([True, True, True, False]), 0] = [1, 2, 3, 4, 5]\n\n with pytest.raises(ValueError):\n x[0, da.from_array([True, False, False, True])] = [1, 2, 3, 4, 5]\n\n with pytest.raises(ValueError):\n x[:, 0] = [2, 3, 4]\n\n with pytest.raises(ValueError):\n x[0, :] = [1, 2, 3, 4, 5]\n\n x = da.ones((4, 4), chunks=(2, 2))\n\n # Too many indices\n with pytest.raises(IndexError):\n x[:, :, :] = 2\n\n # 2-d boolean indexing a single dimension\n with pytest.raises(IndexError):\n x[[[True, True, False, False]], 0] = 5\n\n # Too many/not enough booleans\n with pytest.raises(IndexError):\n x[[True, True, False]] = 5\n\n with pytest.raises(IndexError):\n x[[False, True, True, True, False]] = 5\n\n # 2-d indexing a single dimension\n with pytest.raises(IndexError):\n x[[[1, 2, 3]], 0] = 5\n\n # Multiple 1-d boolean/integer arrays\n with pytest.raises(NotImplementedError):\n x[[1, 2], [2, 3]] = 6\n\n with pytest.raises(NotImplementedError):\n x[[True, True, False, False], [2, 3]] = 5\n\n with pytest.raises(NotImplementedError):\n x[[True, True, False, False], [False, True, False, False]] = 7\n\n # scalar boolean indexing\n with pytest.raises(NotImplementedError):\n x[True] = 5\n\n with pytest.raises(NotImplementedError):\n x[np.array(True)] = 5\n\n with pytest.raises(NotImplementedError):\n x[0, da.from_array(True)] = 5\n\n # Scalar arrays\n y = da.from_array(np.array(1))\n with pytest.raises(IndexError):\n y[:] = 2\n\n # RHS has non-brodacastable extra leading dimensions\n x = np.arange(12).reshape((3, 4))\n dx = da.from_array(x, chunks=(2, 2))\n with pytest.raises(ValueError):\n dx[...] = np.arange(24).reshape((2, 1, 3, 4))\n\n # RHS doesn't have chunks set\n dx = da.unique(da.random.random([10]))\n with pytest.raises(ValueError, match=\"Arrays chunk sizes are unknown\"):\n dx[0] = 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_constructors_chunks_dict_test_pandas_from_dask_array.if_PANDAS_GT_130_and_not_.else_.assert_eq_s_values_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_constructors_chunks_dict_test_pandas_from_dask_array.if_PANDAS_GT_130_and_not_.else_.assert_eq_s_values_a_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4367, "end_line": 4417, "span_ids": ["test_from_array_chunks_dict", "test_normalize_chunks_object_dtype", "test_pandas_from_dask_array", "test_normalize_chunks_nan", "test_constructors_chunks_dict", "test_normalize_chunks_tuples_of_tuples"], "tokens": 542}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_constructors_chunks_dict():\n x = da.ones((20, 20), chunks={0: 10, 1: 5})\n assert x.chunks == ((10, 10), (5, 5, 5, 5))\n\n x = da.ones((20, 20), chunks={0: 10, 1: \"auto\"})\n assert x.chunks == ((10, 10), (20,))\n\n\ndef test_from_array_chunks_dict():\n with dask.config.set({\"array.chunk-size\": \"128kiB\"}):\n x = np.empty((100, 100, 100))\n y = da.from_array(x, chunks={0: 10, 1: -1, 2: \"auto\"})\n z = da.from_array(x, chunks=(10, 100, 10))\n assert y.chunks == z.chunks\n\n\n@pytest.mark.parametrize(\"dtype\", [object, [(\"a\", object), (\"b\", int)]])\ndef test_normalize_chunks_object_dtype(dtype):\n x = np.array([\"a\", \"abc\"], dtype=object)\n with pytest.raises(NotImplementedError):\n da.from_array(x, chunks=\"auto\")\n\n\ndef test_normalize_chunks_tuples_of_tuples():\n result = normalize_chunks(((2, 3, 5), \"auto\"), (10, 10), limit=10, dtype=np.uint8)\n expected = ((2, 3, 5), (2, 2, 2, 2, 2))\n assert result == expected\n\n\ndef test_normalize_chunks_nan():\n with pytest.raises(ValueError) as info:\n normalize_chunks(\"auto\", (np.nan,), limit=10, dtype=np.uint8)\n assert \"auto\" in str(info.value)\n with pytest.raises(ValueError) as info:\n normalize_chunks(((np.nan, np.nan), \"auto\"), (10, 10), limit=10, dtype=np.uint8)\n assert \"auto\" in str(info.value)\n\n\ndef test_pandas_from_dask_array():\n pd = pytest.importorskip(\"pandas\")\n from dask.dataframe._compat import PANDAS_GT_130, PANDAS_GT_131\n\n a = da.ones((12,), chunks=4)\n s = pd.Series(a, index=range(12))\n\n if PANDAS_GT_130 and not PANDAS_GT_131:\n # https://github.com/pandas-dev/pandas/issues/38645\n assert s.dtype != a.dtype\n else:\n assert s.dtype == a.dtype\n assert_eq(s.values, a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_stack_functions_require_sequence_of_arrays_test_array_function_fft.assert_eq_res_y_res_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_stack_functions_require_sequence_of_arrays_test_array_function_fft.assert_eq_res_y_res_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 76, "span_ids": ["test_array_function_fft", "test_stack_functions_require_sequence_of_arrays"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: np.dstack(x),\n lambda x: np.hstack(x),\n lambda x: np.vstack(x),\n ],\n)\ndef test_stack_functions_require_sequence_of_arrays(func):\n x = np.random.random((100, 100))\n y = da.from_array(x, chunks=(50, 50))\n\n with pytest.raises(\n NotImplementedError, match=\"expects a sequence of arrays as the first argument\"\n ):\n func(y)\n\n\n@pytest.mark.parametrize(\"func\", [np.fft.fft, np.fft.fft2])\ndef test_array_function_fft(func):\n x = np.random.random((100, 100))\n y = da.from_array(x, chunks=(100, 100))\n res_x = func(x)\n res_y = func(y)\n\n if func.__module__ != \"mkl_fft._numpy_fft\":\n assert isinstance(res_y, da.Array)\n assert_eq(res_y, res_x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_notimpl_function_dask_test_array_function_sparse.assert_eq_func_x_func_y": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_array_notimpl_function_dask_test_array_function_sparse.assert_eq_func_x_func_y", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 79, "end_line": 107, "span_ids": ["test_array_function_sparse", "test_array_notimpl_function_dask"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func\",\n [\n lambda x: np.min_scalar_type(x),\n lambda x: np.linalg.det(x),\n lambda x: np.linalg.eigvals(x),\n ],\n)\ndef test_array_notimpl_function_dask(func):\n x = np.random.random((100, 100))\n y = da.from_array(x, chunks=(50, 50))\n\n with pytest.warns(\n FutureWarning, match=\"The `.*` function is not implemented by Dask\"\n ):\n func(y)\n\n\n@pytest.mark.parametrize(\n \"func\", [lambda x: np.real(x), lambda x: np.imag(x), lambda x: np.transpose(x)]\n)\ndef test_array_function_sparse(func):\n sparse = pytest.importorskip(\"sparse\")\n x = da.random.random((500, 500), chunks=(100, 100))\n x[x < 0.9] = 0\n\n y = x.map_blocks(sparse.COO)\n\n assert_eq(func(x), func(y))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_non_existent_func_test_non_existent_func.with_pytest_warns_.assert_list_np_sort_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_non_existent_func_test_non_existent_func.with_pytest_warns_.assert_list_np_sort_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 180, "end_line": 187, "span_ids": ["test_non_existent_func"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_non_existent_func():\n # Regression test for __array_function__ becoming default in numpy 1.17\n # dask has no sort function, so ensure that this still calls np.sort\n x = da.from_array(np.array([1, 2, 4, 3]), chunks=(2,))\n with pytest.warns(\n FutureWarning, match=\"The `numpy.sort` function is not implemented by Dask\"\n ):\n assert list(np.sort(x)) == [1, 2, 3, 4]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_binary_function_type_precedence_test_binary_function_type_precedence.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_binary_function_type_precedence_test_binary_function_type_precedence.assert_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 190, "end_line": 222, "span_ids": ["test_binary_function_type_precedence"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"func\",\n [\n np.equal,\n np.matmul,\n np.dot,\n lambda x, y: np.stack([x, y]),\n ],\n)\n@pytest.mark.parametrize(\n \"arr_upcast, arr_downcast\",\n [\n (\n WrappedArray(np.random.random((10, 10))),\n da.random.random((10, 10), chunks=(5, 5)),\n ),\n (\n da.random.random((10, 10), chunks=(5, 5)),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n (\n WrappedArray(np.random.random((10, 10))),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n ],\n)\ndef test_binary_function_type_precedence(func, arr_upcast, arr_downcast):\n \"\"\"Test proper dispatch on binary NumPy functions\"\"\"\n assert (\n type(func(arr_upcast, arr_downcast))\n == type(func(arr_downcast, arr_upcast))\n == type(arr_upcast)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_like_raises_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_function.py_test_like_raises_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_function.py", "file_name": "test_array_function.py", "file_type": "text/x-python", "category": "test", "start_line": 225, "end_line": 246, "span_ids": ["test_like_with_numpy_func", "test_like_with_numpy_func_and_dtype", "test_like_raises"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [da.array, da.asarray, da.asanyarray, da.tri])\ndef test_like_raises(func):\n if _numpy_120:\n assert_eq(func(1, like=func(1)), func(1))\n else:\n with pytest.raises(\n RuntimeError, match=\"The use of ``like`` required NumPy >= 1.20\"\n ):\n func(1, like=func(1))\n\n\n@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"func\", [np.array, np.asarray, np.asanyarray])\ndef test_like_with_numpy_func(func):\n assert_eq(func(1, like=da.array(1)), func(1))\n\n\n@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"func\", [np.array, np.asarray, np.asanyarray])\ndef test_like_with_numpy_func_and_dtype(func):\n assert_eq(func(1, dtype=float, like=da.array(1)), func(1, dtype=float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_np_kroncompare_examples_test_tile_np_kroncompare_examples.assert_eq_np_tile_x_reps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_tile_np_kroncompare_examples_test_tile_np_kroncompare_examples.assert_eq_np_tile_x_reps", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 642, "end_line": 650, "span_ids": ["test_tile_np_kroncompare_examples"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape\", [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]\n)\n@pytest.mark.parametrize(\"reps\", [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)])\ndef test_tile_np_kroncompare_examples(shape, reps):\n x = np.random.random(shape)\n d = da.asarray(x)\n\n assert_eq(np.tile(x, reps), da.tile(d, reps))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_np_functions._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_np_functions._", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 108, "span_ids": ["imports"], "tokens": 1031}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\nfrom packaging.version import parse as parse_version\n\npytestmark = pytest.mark.gpu\n\nimport dask\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import assert_eq\nfrom dask.sizeof import sizeof\n\ncupy = pytest.importorskip(\"cupy\")\ncupy_version = parse_version(cupy.__version__)\n\n\nfunctions = [\n lambda x: x,\n lambda x: da.expm1(x),\n lambda x: 2 * x,\n lambda x: x / 2,\n lambda x: x**2,\n lambda x: x + x,\n lambda x: x * x,\n lambda x: x[0],\n lambda x: x[:, 1],\n lambda x: x[:1, None, 1:3],\n lambda x: x.T,\n lambda x: da.transpose(x, (1, 2, 0)),\n lambda x: x.sum(),\n lambda x: da.empty_like(x),\n lambda x: da.ones_like(x),\n lambda x: da.zeros_like(x),\n lambda x: da.full_like(x, 5),\n pytest.param(\n lambda x: x.mean(),\n marks=pytest.mark.skipif(\n cupy_version < parse_version(\"6.4.0\"),\n reason=\"Requires CuPy 6.4.0+ \"\n \"(with https://github.com/cupy/cupy/pull/2418)\",\n ),\n ),\n pytest.param(lambda x: x.moment(order=0)),\n lambda x: x.moment(order=2),\n pytest.param(\n lambda x: x.std(),\n marks=pytest.mark.skipif(\n cupy_version < parse_version(\"6.4.0\"),\n reason=\"Requires CuPy 6.4.0+ \"\n \"(with https://github.com/cupy/cupy/pull/2418)\",\n ),\n ),\n pytest.param(\n lambda x: x.var(),\n marks=pytest.mark.skipif(\n cupy_version < parse_version(\"6.4.0\"),\n reason=\"Requires CuPy 6.4.0+ \"\n \"(with https://github.com/cupy/cupy/pull/2418)\",\n ),\n ),\n pytest.param(\n lambda x: x.dot(np.arange(x.shape[-1])),\n marks=pytest.mark.xfail(reason=\"cupy.dot(numpy) fails\"),\n ),\n pytest.param(\n lambda x: x.dot(np.eye(x.shape[-1])),\n marks=pytest.mark.xfail(reason=\"cupy.dot(numpy) fails\"),\n ),\n pytest.param(\n lambda x: da.tensordot(x, np.ones(x.shape[:2]), axes=[(0, 1), (0, 1)]),\n marks=pytest.mark.xfail(reason=\"cupy.dot(numpy) fails\"),\n ),\n lambda x: x.sum(axis=0),\n lambda x: x.max(axis=0),\n lambda x: x.sum(axis=(1, 2)),\n lambda x: x.astype(np.complex128),\n lambda x: x.map_blocks(lambda x: x * 2),\n pytest.param(lambda x: x.round(1)),\n lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2])),\n # Rechunking here is required, see https://github.com/dask/dask/issues/2561\n lambda x: (x.rechunk(x.shape)).reshape((x.shape[1], x.shape[0], x.shape[2])),\n lambda x: x.reshape((x.shape[0], x.shape[1], x.shape[2] / 2, x.shape[2] / 2)),\n lambda x: abs(x),\n lambda x: x > 0.5,\n lambda x: x.rechunk((4, 4, 4)),\n lambda x: x.rechunk((2, 2, 1)),\n pytest.param(lambda x: da.einsum(\"ijk,ijk\", x, x)),\n lambda x: np.isneginf(x),\n lambda x: np.isposinf(x),\n lambda x: np.isreal(x),\n lambda x: np.iscomplex(x),\n lambda x: np.real(x),\n lambda x: np.imag(x),\n lambda x: np.exp(x),\n lambda x: np.fix(x),\n lambda x: np.i0(x.reshape((24,))),\n lambda x: np.sinc(x),\n lambda x: np.nan_to_num(x),\n lambda x: np.max(x),\n lambda x: np.min(x),\n lambda x: np.prod(x),\n lambda x: np.any(x),\n lambda x: np.all(x),\n lambda x: np.nansum(x),\n lambda x: np.nanprod(x),\n lambda x: np.nanmin(x),\n lambda x: np.nanmax(x),\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_basic_test_basic.if_next_iter_ddc_dask_key.else_.assert_eq_ddc_ddn_check": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_basic_test_basic.if_next_iter_ddc_dask_key.else_.assert_eq_ddc_ddn_check", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 117, "end_line": 134, "span_ids": ["test_basic"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", functions)\ndef test_basic(func):\n c = cupy.random.random((2, 3, 4))\n n = c.get()\n dc = da.from_array(c, chunks=(1, 2, 2), asarray=False)\n dn = da.from_array(n, chunks=(1, 2, 2))\n\n ddc = func(dc)\n ddn = func(dn)\n\n assert type(ddc._meta) is cupy.ndarray\n\n if next(iter(ddc.dask.keys()))[0].startswith(\"empty\"):\n # We can't verify for data correctness when testing empty_like\n assert type(ddc._meta) is type(ddc.compute())\n else:\n assert_eq(ddc, ddc) # Check that _meta and computed arrays match types\n assert_eq(ddc, ddn, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_sizeof_test_asanyarray.if_isinstance_like_np_nd.else_.assert_type_a_is_type_li": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_sizeof_test_asanyarray.if_isinstance_like_np_nd.else_.assert_type_a_is_type_li", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 159, "span_ids": ["test_sizeof", "test_asanyarray"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [\"f4\", \"f8\"])\ndef test_sizeof(dtype):\n c = cupy.random.random((2, 3, 4), dtype=dtype)\n\n assert sizeof(c) == c.nbytes\n\n\n@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\n \"arr\", [np.arange(5), cupy.arange(5), da.arange(5), da.from_array(cupy.arange(5))]\n)\n@pytest.mark.parametrize(\n \"like\", [np.arange(5), cupy.arange(5), da.arange(5), da.from_array(cupy.arange(5))]\n)\ndef test_asanyarray(arr, like):\n if isinstance(like, np.ndarray) and isinstance(\n da.utils.meta_from_array(arr), cupy.ndarray\n ):\n with pytest.raises(TypeError):\n a = da.utils.asanyarray_safe(arr, like=like)\n else:\n a = da.utils.asanyarray_safe(arr, like=like)\n assert type(a) is type(like)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_vindex_test_vindex.assert_eq_res_np_res_cp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_vindex_test_vindex.assert_eq_res_np_res_cp_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 162, "end_line": 178, "span_ids": ["test_vindex"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_vindex():\n x_np = np.arange(56).reshape((7, 8))\n x_cp = cupy.arange(56).reshape((7, 8))\n\n d_np = da.from_array(x_np, chunks=(3, 4))\n d_cp = da.from_array(x_cp, chunks=(3, 4))\n\n res_np = da.core._vindex(d_np, [0, 1, 6, 0], [0, 1, 0, 7])\n res_cp = da.core._vindex(d_cp, [0, 1, 6, 0], [0, 1, 0, 7])\n\n assert type(res_cp._meta) == cupy.ndarray\n assert_eq(\n res_cp, res_cp, check_type=False\n ) # Check that _meta and computed arrays match types\n\n assert_eq(res_np, res_cp, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_view_test_view.None_1.d_view_i4_order_asdf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_view_test_view.None_1.d_view_i4_order_asdf_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 181, "end_line": 215, "span_ids": ["test_view"], "tokens": 375}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_view():\n x = np.arange(56).reshape((7, 8))\n d = da.from_array(cupy.array(x), chunks=(2, 3))\n\n result = d.view()\n assert type(result._meta) == cupy.ndarray\n assert_eq(result, result) # Check that _meta and computed arrays match types\n assert_eq(result, x.view(), check_type=False)\n\n result = d.view(\"i4\")\n assert type(result._meta) == cupy.ndarray\n assert_eq(result, result) # Check that _meta and computed arrays match types\n assert_eq(result, x.view(\"i4\"), check_type=False)\n\n result = d.view(\"i2\")\n assert type(result._meta) == cupy.ndarray\n assert_eq(result, result) # Check that _meta and computed arrays match types\n assert_eq(result, x.view(\"i2\"), check_type=False)\n assert all(isinstance(s, int) for s in d.shape)\n\n x = np.arange(8, dtype=\"i1\")\n d = da.from_array(cupy.array(x), chunks=(4,))\n result = d.view(\"i4\")\n assert type(result._meta) == cupy.ndarray\n assert_eq(result, result) # Check that _meta and computed arrays match types\n assert_eq(x.view(\"i4\"), d.view(\"i4\"), check_type=False)\n\n with pytest.raises(ValueError):\n x = np.arange(8, dtype=\"i1\")\n d = da.from_array(cupy.array(x), chunks=(3,))\n d.view(\"i4\")\n\n with pytest.raises(ValueError):\n d.view(\"i4\", order=\"asdf\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_view_fortran_test_getter.assert_eq_result_np_aran": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_view_fortran_test_getter.assert_eq_result_np_aran", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 218, "end_line": 239, "span_ids": ["test_view_fortran", "test_getter"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_view_fortran():\n x = np.asfortranarray(np.arange(64).reshape((8, 8)))\n d = da.from_array(cupy.asfortranarray(cupy.array(x)), chunks=(2, 3))\n\n result = d.view(\"i4\", order=\"F\")\n assert type(result._meta) == cupy.ndarray\n assert_eq(result, result) # Check that _meta and computed arrays match types\n assert_eq(result, x.T.view(\"i4\").T, check_type=False)\n\n result = d.view(\"i2\", order=\"F\")\n assert type(result._meta) == cupy.ndarray\n assert_eq(result, result) # Check that _meta and computed arrays match types\n assert_eq(result, x.T.view(\"i2\").T, check_type=False)\n\n\n@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_getter():\n result = da.core.getter(cupy.arange(5), (None, slice(None, None)))\n\n assert type(result) == cupy.ndarray\n assert_eq(result, np.arange(5)[None, :], check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_store_kwargs_test_store_kwargs.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_store_kwargs_test_store_kwargs.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 242, "end_line": 268, "span_ids": ["test_store_kwargs"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_store_kwargs():\n d = da.from_array(cupy.ones((10, 10)), chunks=(2, 2))\n a = d + 1\n\n called = [False]\n\n def get_func(*args, **kwargs):\n assert kwargs.pop(\"foo\") == \"test kwarg\"\n r = dask.get(*args, **kwargs)\n called[0] = True\n return r\n\n called[0] = False\n at = cupy.zeros(shape=(10, 10))\n da.core.store([a], [at], scheduler=get_func, foo=\"test kwarg\")\n assert called[0]\n\n called[0] = False\n at = cupy.zeros(shape=(10, 10))\n a.store(at, scheduler=get_func, foo=\"test kwarg\")\n assert called[0]\n\n called[0] = False\n at = cupy.zeros(shape=(10, 10))\n da.core.store([a], [at], scheduler=get_func, return_stored=True, foo=\"test kwarg\")\n assert called[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_2d_test_setitem_extended_API_2d.assert_eq_x_dx_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_2d_test_setitem_extended_API_2d.assert_eq_x_dx_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 347, "end_line": 428, "span_ids": ["test_setitem_extended_API_2d"], "tokens": 757}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"index, value\",\n [\n [Ellipsis, -1],\n [(slice(None, None, 2), slice(None, None, -1)), -1],\n [slice(1, None, 2), -1],\n [[4, 3, 1], -1],\n [(Ellipsis, 4), -1],\n [5, -1],\n pytest.param(\n (slice(None), 2),\n range(6),\n marks=pytest.mark.skip(\n reason=\"Assigning `range` to CuPy array is not supported\"\n ),\n ),\n pytest.param(\n 3,\n range(10),\n marks=pytest.mark.skip(\n reason=\"Assigning `range` to CuPy array is not supported\"\n ),\n ),\n [(slice(None), [3, 5, 6]), [-30, -31, -32]],\n [([-1, 0, 1], 2), [-30, -31, -32]],\n pytest.param(\n (slice(None, 2), slice(None, 3)),\n [-50, -51, -52],\n marks=pytest.mark.skip(reason=\"Unsupported assigning `list` to CuPy array\"),\n ),\n [(slice(None), [6, 1, 3]), [-60, -61, -62]],\n pytest.param(\n (slice(1, 3), slice(1, 4)),\n [[-70, -71, -72]],\n marks=pytest.mark.skip(reason=\"Unsupported assigning `list` to CuPy array\"),\n ),\n pytest.param(\n (slice(None), [9, 8, 8]),\n [[-80, -81, 91]],\n marks=pytest.mark.flaky(reruns=10),\n ),\n [([True, False, False, False, True, False], 2), -1],\n [(3, [True, True, False, True, True, False, True, False, True, True]), -1],\n [(np.array([False, False, True, True, False, False]), slice(5, 7)), -1],\n [(cupy.array([False, False, True, True, False, False]), slice(5, 7)), -1],\n pytest.param(\n (\n 4,\n da.from_array(\n [False, False, True, True, False, False, True, False, False, True]\n ),\n ),\n -1,\n marks=pytest.mark.skip(\n reason=\"Unsupported assigning Dask Array to CuPy array\"\n ),\n ),\n [slice(5, None, 2), -99],\n pytest.param(\n slice(5, None, 2),\n range(1, 11),\n marks=pytest.mark.skip(\n reason=\"Assigning `range` to CuPy array is not supported\"\n ),\n ),\n [slice(1, None, -2), -98],\n pytest.param(\n slice(1, None, -2),\n range(11, 21),\n marks=pytest.mark.skip(\n reason=\"Assigning `range` to CuPy array is not supported\"\n ),\n ),\n ],\n)\ndef test_setitem_extended_API_2d(index, value):\n # 2-d array\n x = cupy.arange(60).reshape((6, 10))\n dx = da.from_array(x, chunks=(2, 3))\n dx[index] = value\n x[index] = value\n assert_eq(x, dx.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_2d_rhs_func_of_lhs_test_setitem_extended_API_2d_rhs_func_of_lhs.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_2d_rhs_func_of_lhs_test_setitem_extended_API_2d_rhs_func_of_lhs.None_13", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 397, "end_line": 475, "span_ids": ["test_setitem_extended_API_2d_rhs_func_of_lhs"], "tokens": 822}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setitem_extended_API_2d_rhs_func_of_lhs():\n # Cases:\n # * RHS and/or indices are a function of the LHS\n # * Indices have unknown chunk sizes\n # * RHS has extra leading size 1 dimensions compared to LHS\n x = cupy.arange(60).reshape((6, 10))\n chunks = (2, 3)\n\n dx = da.from_array(x, chunks=chunks)\n dx[2:4, dx[0] > 3] = -5\n x[2:4, x[0] > 3] = -5\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[2, dx[0] < -2] = -7\n x[2, x[0] < -2] = -7\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[dx % 2 == 0] = -8\n x[x % 2 == 0] = -8\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[dx % 2 == 0] = -8\n x[x % 2 == 0] = -8\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[3:5, 5:1:-2] = -dx[:2, 4:1:-2]\n x[3:5, 5:1:-2] = -x[:2, 4:1:-2]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[0, 1:3] = -dx[0, 4:2:-1]\n x[0, 1:3] = -x[0, 4:2:-1]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[...] = dx\n x[...] = x\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[...] = dx[...]\n x[...] = x[...]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[0] = dx[-1]\n x[0] = x[-1]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[0, :] = dx[-2, :]\n x[0, :] = x[-2, :]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[:, 1] = dx[:, -3]\n x[:, 1] = x[:, -3]\n assert_eq(x, dx.compute())\n\n index = da.from_array([0, 2], chunks=(2,))\n dx = da.from_array(x, chunks=chunks)\n dx[index, 8] = [99, 88]\n x[[0, 2], 8] = [99, 88]\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=chunks)\n dx[:, index] = dx[:, :2]\n x[:, [0, 2]] = x[:, :2]\n assert_eq(x, dx.compute())\n\n index = da.where(da.arange(3, chunks=(1,)) < 2)[0]\n dx = da.from_array(x, chunks=chunks)\n dx[index, 7] = [-23, -33]\n x[index.compute(), 7] = [-23, -33]\n assert_eq(x, dx.compute())\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_2d_rhs_func_of_lhs.index_18_test_setitem_on_read_only_blocks.assert_eq_dx_0_0_88_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_2d_rhs_func_of_lhs.index_18_test_setitem_on_read_only_blocks.assert_eq_dx_0_0_88_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 477, "end_line": 524, "span_ids": ["test_setitem_extended_API_2d_rhs_func_of_lhs", "test_setitem_on_read_only_blocks"], "tokens": 487}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setitem_extended_API_2d_rhs_func_of_lhs():\n # ... other code\n\n index = da.where(da.arange(3, chunks=(1,)) < 2)[0]\n dx = da.from_array(x, chunks=chunks)\n dx[(index,)] = -34\n x[(index.compute(),)] = -34\n assert_eq(x, dx.compute())\n\n index = index - 4\n dx = da.from_array(x, chunks=chunks)\n dx[index, 7] = [-43, -53]\n x[index.compute(), 7] = [-43, -53]\n assert_eq(x, dx.compute())\n\n index = da.from_array([0, -1], chunks=(1,))\n x[[0, -1]] = 9999\n dx[(index,)] = 9999\n assert_eq(x, dx.compute())\n\n dx = da.from_array(x, chunks=(-1, -1))\n dx[...] = da.from_array(x, chunks=chunks)\n assert_eq(x, dx.compute())\n\n # Both tests below fail in CuPy due to leading singular dimensions\n if False:\n # RHS has extra leading size 1 dimensions compared to LHS\n dx = da.from_array(x.copy(), chunks=(2, 3))\n v = x.reshape((1, 1) + x.shape)\n x[...] = v\n dx[...] = v\n assert_eq(x, dx.compute())\n\n index = da.where(da.arange(3, chunks=(1,)) < 2)[0]\n v = -cupy.arange(12).reshape(1, 1, 6, 2)\n x[:, [0, 1]] = v\n dx[:, index] = v\n assert_eq(x, dx.compute())\n\n\ndef test_setitem_on_read_only_blocks():\n # Outputs of broadcast_trick-style functions contain read-only\n # arrays\n dx = da.empty_like(cupy.array(()), shape=(4, 6), dtype=float, chunks=(2, 2))\n dx[0] = 99\n\n assert_eq(dx[0, 0], 99.0)\n\n dx[0:2] = 88\n\n assert_eq(dx[0, 0], 88.0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_errs_test_setitem_errs.dx_6.da_from_array_x_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_errs_test_setitem_errs.dx_6.da_from_array_x_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 527, "end_line": 612, "span_ids": ["test_setitem_errs"], "tokens": 788}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setitem_errs():\n x = da.ones_like(cupy.array(()), shape=(4, 4), chunks=(2, 2))\n\n with pytest.raises(ValueError):\n x[x > 1] = x\n\n # Shape mismatch\n with pytest.raises(ValueError):\n x[[True, True, False, False], 0] = [2, 3, 4]\n\n with pytest.raises(ValueError):\n x[[True, True, True, False], 0] = [2, 3]\n\n with pytest.raises(ValueError):\n x[0, [True, True, True, False]] = [2, 3]\n\n with pytest.raises(ValueError):\n x[0, [True, True, True, False]] = [1, 2, 3, 4, 5]\n\n with pytest.raises(ValueError):\n x[da.from_array([True, True, True, False]), 0] = [1, 2, 3, 4, 5]\n\n with pytest.raises(ValueError):\n x[0, da.from_array([True, False, False, True])] = [1, 2, 3, 4, 5]\n\n with pytest.raises(ValueError):\n x[:, 0] = [2, 3, 4]\n\n with pytest.raises(ValueError):\n x[0, :] = [1, 2, 3, 4, 5]\n\n x = da.ones((4, 4), chunks=(2, 2))\n\n # Too many indices\n with pytest.raises(IndexError):\n x[:, :, :] = 2\n\n # 2-d boolean indexing a single dimension\n with pytest.raises(IndexError):\n x[[[True, True, False, False]], 0] = 5\n\n # Too many/not enough booleans\n with pytest.raises(IndexError):\n x[[True, True, False]] = 5\n\n with pytest.raises(IndexError):\n x[[False, True, True, True, False]] = 5\n\n # 2-d indexing a single dimension\n with pytest.raises(IndexError):\n x[[[1, 2, 3]], 0] = 5\n\n # Multiple 1-d boolean/integer arrays\n with pytest.raises(NotImplementedError):\n x[[1, 2], [2, 3]] = 6\n\n with pytest.raises(NotImplementedError):\n x[[True, True, False, False], [2, 3]] = 5\n\n with pytest.raises(NotImplementedError):\n x[[True, True, False, False], [False, True, False, False]] = 7\n\n # scalar boolean indexing\n with pytest.raises(NotImplementedError):\n x[True] = 5\n\n with pytest.raises(NotImplementedError):\n x[cupy.array(True)] = 5\n\n with pytest.raises(NotImplementedError):\n x[0, da.from_array(True)] = 5\n\n # Scalar arrays\n y = da.from_array(cupy.array(1))\n with pytest.raises(IndexError):\n y[:] = 2\n\n # RHS has non-brodacastable extra leading dimensions\n x = cupy.arange(12).reshape((3, 4))\n dx = da.from_array(x, chunks=(2, 2))\n with pytest.raises(ValueError):\n dx[...] = cupy.arange(24).reshape((2, 1, 3, 4))\n\n # RHS has extra leading size 1 dimensions compared to LHS\n x = cupy.arange(12).reshape((3, 4))\n dx = da.from_array(x, chunks=(2, 3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_array_like_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_array_like_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 615, "end_line": 628, "span_ids": ["test_array_like"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"xp\", [np, da])\n@pytest.mark.parametrize(\"orig_arr\", [np.array, da.array])\n@pytest.mark.parametrize(\"array_func\", [\"array\", \"asarray\", \"asanyarray\"])\ndef test_array_like(xp, orig_arr, array_func):\n cp_func = getattr(cupy, array_func)\n xp_func = getattr(xp, array_func)\n\n cp_a = cp_func([1, 2, 3])\n xp_a = xp_func(orig_arr([1, 2, 3]), like=da.from_array(cupy.array(())))\n assert isinstance(xp_a, da.Array)\n assert isinstance(xp_a._meta, cupy.ndarray)\n assert_eq(xp_a, cp_a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_np_test_diag.assert_eq_da_diag_dx_cu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_np_test_diag.assert_eq_da_diag_dx_cu", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_creation.py", "file_name": "test_cupy_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 32, "span_ids": ["imports", "test_diag"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\npytestmark = pytest.mark.gpu\n\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import assert_eq\n\ncupy = pytest.importorskip(\"cupy\")\n\n\ndef test_diag():\n v = cupy.arange(11)\n dv = da.from_array(v, chunks=(4,), asarray=False)\n assert type(dv._meta) == cupy.ndarray\n assert_eq(dv, dv) # Check that _meta and computed arrays match types\n assert_eq(da.diag(dv), cupy.diag(v))\n\n v = v + v + 3\n dv = dv + dv + 3\n darr = da.diag(dv)\n cupyarr = cupy.diag(v)\n assert type(darr._meta) == cupy.ndarray\n assert_eq(darr, darr) # Check that _meta and computed arrays match types\n assert_eq(darr, cupyarr)\n\n x = cupy.arange(64).reshape((8, 8))\n dx = da.from_array(x, chunks=(4, 4), asarray=False)\n assert type(dx._meta) == cupy.ndarray\n assert_eq(dx, dx) # Check that _meta and computed arrays match types\n assert_eq(da.diag(dx), cupy.diag(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_test_diagonal_test_diagonal.None_11": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_test_diagonal_test_diagonal.None_11", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_creation.py", "file_name": "test_cupy_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 90, "span_ids": ["test_diagonal"], "tokens": 613}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diagonal():\n v = cupy.arange(11)\n with pytest.raises(ValueError):\n da.diagonal(v)\n\n v = cupy.arange(4).reshape((2, 2))\n with pytest.raises(ValueError):\n da.diagonal(v, axis1=0, axis2=0)\n\n with pytest.raises(np.AxisError):\n da.diagonal(v, axis1=-4)\n\n with pytest.raises(np.AxisError):\n da.diagonal(v, axis2=-4)\n\n v = cupy.arange(4 * 5 * 6).reshape((4, 5, 6))\n v = da.from_array(v, chunks=2, asarray=False)\n assert_eq(da.diagonal(v), np.diagonal(v))\n # Empty diagonal.\n assert_eq(da.diagonal(v, offset=10), np.diagonal(v, offset=10))\n assert_eq(da.diagonal(v, offset=-10), np.diagonal(v, offset=-10))\n assert isinstance(da.diagonal(v).compute(), cupy.ndarray)\n\n with pytest.raises(ValueError):\n da.diagonal(v, axis1=-2)\n\n # Negative axis.\n assert_eq(da.diagonal(v, axis1=-1), np.diagonal(v, axis1=-1))\n assert_eq(da.diagonal(v, offset=1, axis1=-1), np.diagonal(v, offset=1, axis1=-1))\n\n # Heterogeneous chunks.\n v = cupy.arange(2 * 3 * 4 * 5 * 6).reshape((2, 3, 4, 5, 6))\n v = da.from_array(\n v, chunks=(1, (1, 2), (1, 2, 1), (2, 1, 2), (5, 1)), asarray=False\n )\n\n assert_eq(da.diagonal(v), np.diagonal(v))\n assert_eq(\n da.diagonal(v, offset=2, axis1=3, axis2=1),\n np.diagonal(v, offset=2, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-2, axis1=3, axis2=1),\n np.diagonal(v, offset=-2, axis1=3, axis2=1),\n )\n\n assert_eq(\n da.diagonal(v, offset=-2, axis1=3, axis2=4),\n np.diagonal(v, offset=-2, axis1=3, axis2=4),\n )\n\n assert_eq(da.diagonal(v, 1), np.diagonal(v, 1))\n assert_eq(da.diagonal(v, -1), np.diagonal(v, -1))\n # Positional arguments\n assert_eq(da.diagonal(v, 1, 2, 1), np.diagonal(v, 1, 2, 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_test_pad_test_pad.if_mode_empty_.else_.assert_eq_np_r_da_r_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_test_pad_test_pad.if_mode_empty_.else_.assert_eq_np_r_da_r_che", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_creation.py", "file_name": "test_cupy_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 142, "span_ids": ["test_pad"], "tokens": 628}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\n \"shape, chunks, pad_width, mode, kwargs\",\n [\n ((10,), (3,), 1, \"constant\", {}),\n ((10,), (3,), 2, \"constant\", {\"constant_values\": -1}),\n ((10,), (3,), ((2, 3)), \"constant\", {\"constant_values\": (-1, -2)}),\n (\n (10, 11),\n (4, 5),\n ((1, 4), (2, 3)),\n \"constant\",\n {\"constant_values\": ((-1, -2), (2, 1))},\n ),\n ((10,), (3,), 3, \"edge\", {}),\n ((10,), (3,), 3, \"linear_ramp\", {}),\n ((10,), (3,), 3, \"linear_ramp\", {\"end_values\": 0}),\n (\n (10, 11),\n (4, 5),\n ((1, 4), (2, 3)),\n \"linear_ramp\",\n {\"end_values\": ((-1, -2), (4, 3))},\n ),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"reflect\", {}),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"symmetric\", {}),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"wrap\", {}),\n ((10,), (3,), ((2, 3)), \"maximum\", {\"stat_length\": (1, 2)}),\n ((10, 11), (4, 5), ((1, 4), (2, 3)), \"mean\", {\"stat_length\": ((3, 4), (2, 1))}),\n ((10,), (3,), ((2, 3)), \"minimum\", {\"stat_length\": (2, 3)}),\n ((10,), (3,), 1, \"empty\", {}),\n ],\n)\ndef test_pad(shape, chunks, pad_width, mode, kwargs):\n np_a = np.random.random(shape)\n da_a = da.from_array(cupy.array(np_a), chunks=chunks)\n\n np_r = np.pad(np_a, pad_width, mode, **kwargs)\n da_r = da.pad(da_a, pad_width, mode, **kwargs)\n\n assert isinstance(da_r._meta, cupy.ndarray)\n assert isinstance(da_r.compute(), cupy.ndarray)\n\n if mode == \"empty\":\n # empty pads lead to undefined values which may be different\n assert_eq(\n np_r[pad_width:-pad_width], da_r[pad_width:-pad_width], check_type=False\n )\n else:\n assert_eq(np_r, da_r, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_test_tri_like_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_creation.py_test_tri_like_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_creation.py", "file_name": "test_cupy_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 145, "end_line": 172, "span_ids": ["test_tri_like"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"xp\", [np, da])\n@pytest.mark.parametrize(\n \"N, M, k, dtype, chunks\",\n [\n (3, None, 0, float, \"auto\"),\n (4, None, 0, float, \"auto\"),\n (3, 4, 0, bool, \"auto\"),\n (3, None, 1, int, \"auto\"),\n (3, None, -1, int, \"auto\"),\n (3, None, 2, int, 1),\n (6, 8, -2, int, (3, 4)),\n (6, 8, 0, int, (3, \"auto\")),\n ],\n)\ndef test_tri_like(xp, N, M, k, dtype, chunks):\n xp_tri = getattr(xp, \"tri\")\n\n args = [N, M, k, dtype]\n\n cp_a = cupy.tri(*args)\n\n if xp is da:\n args.append(chunks)\n xp_a = xp_tri(*args, like=da.from_array(cupy.array(())))\n\n assert_eq(xp_a, cp_a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_gufunc.py_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_gufunc.py_np_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_gufunc.py", "file_name": "test_cupy_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 25, "span_ids": ["test_apply_gufunc_axis", "imports"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\npytestmark = pytest.mark.gpu\n\nimport dask.array as da\nfrom dask.array.gufunc import apply_gufunc\nfrom dask.array.utils import assert_eq\n\ncupy = pytest.importorskip(\"cupy\")\n\n\ndef test_apply_gufunc_axis():\n def mydiff(x):\n return np.diff(x)\n\n a = cupy.random.randn(3, 6, 4)\n da_ = da.from_array(a, chunks=2, asarray=False)\n\n m = np.diff(a, axis=1)\n dm = apply_gufunc(\n mydiff, \"(i)->(i)\", da_, axis=1, output_sizes={\"i\": 5}, allow_rechunk=True\n )\n assert_eq(m, dm)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_np_test_tsqr._full_matrix_returned": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_np_test_tsqr._full_matrix_returned", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_linalg.py", "file_name": "test_cupy_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 79, "span_ids": ["imports", "test_tsqr"], "tokens": 833}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\nfrom packaging.version import parse as parse_version\n\npytestmark = pytest.mark.gpu\n\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import assert_eq\n\ncupy = pytest.importorskip(\"cupy\")\ncupy_version = parse_version(cupy.__version__)\n\n\n@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.skipif(\n cupy_version < parse_version(\"6.1.0\"),\n reason=\"Requires CuPy 6.1.0+ (with https://github.com/cupy/cupy/pull/2209)\",\n)\n@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, None), # tall-skinny regular blocks\n (20, 10, (3, 10), None), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers\n (40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)\n (128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2\n (300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3\n (300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), ValueError), # short-fat regular blocks\n (10, 40, (10, 15), ValueError), # short-fat irregular blocks\n (\n 10,\n 40,\n (10, (15, 5, 5, 8, 7)),\n ValueError,\n ), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_tsqr(m, n, chunks, error_type):\n mat = cupy.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\", asarray=False)\n\n # qr\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n\n # svd\n m_u = m\n n_u = min(m, n)\n n_s = n_q\n m_vh = n_q\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr.if_error_type_is_None__test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr.if_error_type_is_None__test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_linalg.py", "file_name": "test_cupy_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 81, "end_line": 108, "span_ids": ["test_tsqr"], "tokens": 1012}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.skipif(\n cupy_version < parse_version(\"6.1.0\"),\n reason=\"Requires CuPy 6.1.0+ (with https://github.com/cupy/cupy/pull/2209)\",\n)\n@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, None), # tall-skinny regular blocks\n (20, 10, (3, 10), None), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers\n (40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)\n (128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2\n (300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3\n (300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), ValueError), # short-fat regular blocks\n (10, 40, (10, 15), ValueError), # short-fat irregular blocks\n (\n 10,\n 40,\n (10, (15, 5, 5, 8, 7)),\n ValueError,\n ), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_tsqr(m, n, chunks, error_type):\n # ... other code\n\n if error_type is None:\n # test QR\n q, r = da.linalg.tsqr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(cupy.eye(n_q, n_q), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, np.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n\n # test SVD\n u, s, vh = da.linalg.tsqr(data, compute_svd=True)\n s_exact = np.linalg.svd(mat)[1]\n assert_eq(s, s_exact) # s must contain the singular values\n assert_eq((m_u, n_u), u.shape) # shape check\n assert_eq((n_s,), s.shape) # shape check\n assert_eq((d_vh, d_vh), vh.shape) # shape check\n assert_eq(\n np.eye(n_u, n_u), da.dot(u.T, u), check_type=False\n ) # u must be orthonormal\n assert_eq(\n np.eye(d_vh, d_vh), da.dot(vh, vh.T), check_type=False\n ) # vh must be orthonormal\n assert_eq(mat, da.dot(da.dot(u, da.diag(s)), vh[:n_q])) # accuracy check\n else:\n with pytest.raises(error_type):\n q, r = da.linalg.tsqr(data)\n with pytest.raises(error_type):\n u, s, vh = da.linalg.tsqr(data, compute_svd=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr_uncertain_test_tsqr_uncertain.if_vary_rows_.m.mat_shape_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr_uncertain_test_tsqr_uncertain.if_vary_rows_.m.mat_shape_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_linalg.py", "file_name": "test_cupy_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 206, "span_ids": ["test_tsqr_uncertain"], "tokens": 761}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n mat = cupy.random.rand(m_min * 2, n_max)\n m, n = m_min * 2, n_max\n mat[0:m_min, 0] += 1\n _c0 = mat[:, 0]\n _r0 = mat[0, :]\n c0 = da.from_array(_c0, chunks=m_min, name=\"c\", asarray=False)\n r0 = da.from_array(_r0, chunks=n_max, name=\"r\", asarray=False)\n data = da.from_array(mat, chunks=chunks, name=\"A\", asarray=False)\n if vary_rows:\n data = data[c0 > 0.5, :]\n mat = mat[_c0 > 0.5, :]\n m = mat.shape[0]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr_uncertain.if_vary_cols__test_tsqr_uncertain._full_matrix_returned": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr_uncertain.if_vary_cols__test_tsqr_uncertain._full_matrix_returned", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_linalg.py", "file_name": "test_cupy_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 207, "end_line": 224, "span_ids": ["test_tsqr_uncertain"], "tokens": 731}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n # ... other code\n if vary_cols:\n data = data[:, r0 > 0.5]\n mat = mat[:, _r0 > 0.5]\n n = mat.shape[1]\n\n # qr\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n\n # svd\n m_u = m\n n_u = min(m, n)\n n_s = n_q\n m_vh = n_q\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_tsqr_uncertain.if_error_type_is_None__test_tsqr_uncertain.if_error_type_is_None_.else_.None_1.u_s_vh_da_linalg_tsqr", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_linalg.py", "file_name": "test_cupy_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 226, "end_line": 262, "span_ids": ["test_tsqr_uncertain"], "tokens": 1018}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m_min,n_max,chunks,vary_rows,vary_cols,error_type\",\n [\n (10, 5, (10, 5), True, False, None), # single block tall\n (10, 5, (10, 5), False, True, None), # single block tall\n (10, 5, (10, 5), True, True, None), # single block tall\n (40, 5, (10, 5), True, False, None), # multiple blocks tall\n (40, 5, (10, 5), False, True, None), # multiple blocks tall\n (40, 5, (10, 5), True, True, None), # multiple blocks tall\n (\n 300,\n 10,\n (40, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n False,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n False,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (\n 300,\n 10,\n (40, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n True,\n True,\n None,\n ), # tall-skinny regular thin layers; recursion_depth=4\n ],\n)\ndef test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):\n # ... other code\n\n if error_type is None:\n # test QR\n q, r = da.linalg.tsqr(data)\n q = q.compute() # because uncertainty\n r = r.compute()\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, np.dot(q, r)) # accuracy check\n assert_eq(\n np.eye(n_q, n_q), np.dot(q.T, q), check_type=False\n ) # q must be orthonormal\n assert_eq(r, np.triu(r)) # r must be upper triangular\n\n # test SVD\n u, s, vh = da.linalg.tsqr(data, compute_svd=True)\n u = u.compute() # because uncertainty\n s = s.compute()\n vh = vh.compute()\n s_exact = np.linalg.svd(mat)[1]\n assert_eq(s, s_exact) # s must contain the singular values\n assert_eq((m_u, n_u), u.shape) # shape check\n assert_eq((n_s,), s.shape) # shape check\n assert_eq((d_vh, d_vh), vh.shape) # shape check\n assert_eq(\n np.eye(n_u, n_u), np.dot(u.T, u), check_type=False\n ) # u must be orthonormal\n assert_eq(\n np.eye(d_vh, d_vh), np.dot(vh, vh.T), check_type=False\n ) # vh must be orthonormal\n assert_eq(\n mat, np.dot(np.dot(u, np.diag(s)), vh[:n_q]), check_type=False\n ) # accuracy check\n else:\n with pytest.raises(error_type):\n q, r = da.linalg.tsqr(data)\n with pytest.raises(error_type):\n u, s, vh = da.linalg.tsqr(data, compute_svd=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_da_linalg_sfqr_dat": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_sfqr_test_sfqr.if_error_type_is_None_.else_.with_pytest_raises_error_.q_r_da_linalg_sfqr_dat", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_linalg.py", "file_name": "test_cupy_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 346, "span_ids": ["test_sfqr"], "tokens": 797}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, ValueError), # tall-skinny regular blocks\n (20, 10, (3, 10), ValueError), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), ValueError), # tall-skinny irregular fat layers\n (\n 40,\n 10,\n ((15, 5, 5, 8, 7), 10),\n ValueError,\n ), # tall-skinny non-uniform chunks (why?)\n (\n 128,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 300,\n 10,\n (40, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=2\n (\n 300,\n 10,\n (30, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=3\n (\n 300,\n 10,\n (20, 10),\n ValueError,\n ), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), None), # short-fat regular blocks\n (10, 40, (10, 15), None), # short-fat irregular blocks\n (10, 40, (10, (15, 5, 5, 8, 7)), None), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_sfqr(m, n, chunks, error_type):\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n m_qtq = n_q\n\n if error_type is None:\n q, r = da.linalg.sfqr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(m_qtq, m_qtq), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n else:\n with pytest.raises(error_type):\n q, r = da.linalg.sfqr(data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_lstsq_test_lstsq.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py_test_lstsq_test_lstsq.None_6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_linalg.py", "file_name": "test_cupy_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 349, "end_line": 396, "span_ids": ["test_lstsq"], "tokens": 620}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"iscomplex\", [False, True])\n@pytest.mark.parametrize((\"nrow\", \"ncol\", \"chunk\"), [(20, 10, 5), (100, 10, 10)])\ndef test_lstsq(nrow, ncol, chunk, iscomplex):\n cupy.random.seed(1)\n A = cupy.random.randint(1, 20, (nrow, ncol))\n b = cupy.random.randint(1, 20, nrow)\n if iscomplex:\n A = A + 1.0j * cupy.random.randint(1, 20, A.shape)\n b = b + 1.0j * cupy.random.randint(1, 20, b.shape)\n\n dA = da.from_array(A, (chunk, ncol))\n db = da.from_array(b, chunk)\n\n x, r, rank, s = cupy.linalg.lstsq(A, b, rcond=-1)\n dx, dr, drank, ds = da.linalg.lstsq(dA, db)\n\n assert_eq(dx, x)\n assert_eq(dr, r)\n assert drank.compute() == rank\n assert_eq(ds, s)\n\n # reduce rank causes multicollinearity, only compare rank\n A[:, 1] = A[:, 2]\n dA = da.from_array(A, (chunk, ncol))\n db = da.from_array(b, chunk)\n x, r, rank, s = cupy.linalg.lstsq(\n A, b, rcond=cupy.finfo(cupy.double).eps * max(nrow, ncol)\n )\n assert rank == ncol - 1\n dx, dr, drank, ds = da.linalg.lstsq(dA, db)\n assert drank.compute() == rank\n\n # 2D case\n A = cupy.random.randint(1, 20, (nrow, ncol))\n b2D = cupy.random.randint(1, 20, (nrow, ncol // 2))\n if iscomplex:\n A = A + 1.0j * cupy.random.randint(1, 20, A.shape)\n b2D = b2D + 1.0j * cupy.random.randint(1, 20, b2D.shape)\n dA = da.from_array(A, (chunk, ncol))\n db2D = da.from_array(b2D, (chunk, ncol // 2))\n x, r, rank, s = cupy.linalg.lstsq(A, b2D, rcond=-1)\n dx, dr, drank, ds = da.linalg.lstsq(dA, db2D)\n\n assert_eq(dx, x)\n assert_eq(dr, r)\n assert drank.compute() == rank\n assert_eq(ds, s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py__get_symmat_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_linalg.py__get_symmat_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_linalg.py", "file_name": "test_cupy_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 399, "end_line": 427, "span_ids": ["_get_symmat", "test_cholesky"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_symmat(size):\n cupy.random.seed(1)\n A = cupy.random.randint(1, 21, (size, size))\n lA = cupy.tril(A)\n return lA.dot(lA.T)\n\n\n@pytest.mark.parametrize((\"shape\", \"chunk\"), [(20, 10), (12, 3), (30, 3), (30, 6)])\ndef test_cholesky(shape, chunk):\n scipy_linalg = pytest.importorskip(\"scipy.linalg\")\n\n A = _get_symmat(shape)\n dA = da.from_array(A, (chunk, chunk))\n\n # Need to take the transpose because default in `cupy.linalg.cholesky` is\n # to return lower triangle\n assert_eq(\n da.linalg.cholesky(dA),\n cupy.linalg.cholesky(A).T,\n check_graph=False,\n check_chunks=False,\n )\n assert_eq(\n da.linalg.cholesky(dA, lower=True).map_blocks(cupy.asnumpy),\n scipy_linalg.cholesky(cupy.asnumpy(A), lower=True),\n check_graph=False,\n check_chunks=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_np_test_overlap_internal.assert_same_keys_da_overl": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_np_test_overlap_internal.assert_same_keys_da_overl", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_overlap.py", "file_name": "test_cupy_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 39, "span_ids": ["imports", "test_overlap_internal"], "tokens": 567}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\nfrom packaging.version import parse as parse_version\n\npytestmark = pytest.mark.gpu\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq, same_keys\n\ncupy = pytest.importorskip(\"cupy\")\ncupy_version = parse_version(cupy.__version__)\n\n\ndef test_overlap_internal():\n x = cupy.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4), asarray=False)\n\n g = da.overlap.overlap_internal(d, {0: 2, 1: 1})\n assert g.chunks == ((6, 6), (5, 5))\n\n expected = np.array(\n [\n [0, 1, 2, 3, 4, 3, 4, 5, 6, 7],\n [8, 9, 10, 11, 12, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [16, 17, 18, 19, 20, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 59, 60, 61, 62, 63],\n ]\n )\n\n assert_eq(g, expected, check_type=False)\n assert same_keys(da.overlap.overlap_internal(d, {0: 2, 1: 1}), g)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_trim_internal_test_periodic.assert_eq_e_0_d_2_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_overlap.py", "file_name": "test_cupy_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 59, "span_ids": ["test_periodic", "test_trim_internal"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_trim_internal():\n x = cupy.ones((40, 60))\n d = da.from_array(x, chunks=(10, 10), asarray=False)\n e = da.overlap.trim_internal(d, axes={0: 1, 1: 2}, boundary=\"reflect\")\n\n assert e.chunks == ((8, 8, 8, 8), (6, 6, 6, 6, 6, 6))\n\n\ndef test_periodic():\n x = cupy.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4), asarray=False)\n\n e = da.overlap.periodic(d, axis=0, depth=2)\n assert e.shape[0] == d.shape[0] + 4\n assert e.shape[1] == d.shape[1]\n\n assert_eq(e[1, :], d[-1, :])\n assert_eq(e[0, :], d[-2, :])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_reflect_test_reflect.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_reflect_test_reflect.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_overlap.py", "file_name": "test_cupy_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 72, "span_ids": ["test_reflect"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reflect():\n x = cupy.arange(10)\n d = da.from_array(x, chunks=(5, 5), asarray=False)\n\n e = da.overlap.reflect(d, axis=0, depth=2)\n expected = np.array([1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8])\n assert_eq(e, expected, check_type=False)\n\n e = da.overlap.reflect(d, axis=0, depth=1)\n expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])\n assert_eq(e, expected, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_nearest_test_nearest.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_nearest_test_nearest.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_overlap.py", "file_name": "test_cupy_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 75, "end_line": 85, "span_ids": ["test_nearest"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nearest():\n x = cupy.arange(10)\n d = da.from_array(x, chunks=(5, 5), asarray=False)\n\n e = da.overlap.nearest(d, axis=0, depth=2)\n expected = np.array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9])\n assert_eq(e, expected, check_type=False)\n\n e = da.overlap.nearest(d, axis=0, depth=1)\n expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])\n assert_eq(e, expected, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_constant_test_constant.assert_eq_e_1_np_on": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_constant_test_constant.assert_eq_e_1_np_on", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_overlap.py", "file_name": "test_cupy_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 88, "end_line": 101, "span_ids": ["test_constant"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n cupy_version < parse_version(\"6.4.0\"),\n reason=\"Requires CuPy 6.4.0+ (with https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_constant():\n x = cupy.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4), asarray=False)\n\n e = da.overlap.constant(d, axis=0, depth=2, value=10)\n assert e.shape[0] == d.shape[0] + 4\n assert e.shape[1] == d.shape[1]\n\n assert_eq(e[1, :], np.ones(8, dtype=x.dtype) * 10, check_type=False)\n assert_eq(e[-1, :], np.ones(8, dtype=x.dtype) * 10, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_boundaries_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_overlap.py_test_boundaries_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_overlap.py", "file_name": "test_cupy_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 104, "end_line": 131, "span_ids": ["test_boundaries"], "tokens": 521}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n cupy_version < parse_version(\"6.4.0\"),\n reason=\"Requires CuPy 6.4.0+ (with https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_boundaries():\n x = cupy.arange(64).reshape((8, 8))\n d = da.from_array(x, chunks=(4, 4), asarray=False)\n\n e = da.overlap.boundaries(d, {0: 2, 1: 1}, {0: 0, 1: \"periodic\"})\n\n expected = np.array(\n [\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [7, 0, 1, 2, 3, 4, 5, 6, 7, 0],\n [15, 8, 9, 10, 11, 12, 13, 14, 15, 8],\n [23, 16, 17, 18, 19, 20, 21, 22, 23, 16],\n [31, 24, 25, 26, 27, 28, 29, 30, 31, 24],\n [39, 32, 33, 34, 35, 36, 37, 38, 39, 32],\n [47, 40, 41, 42, 43, 44, 45, 46, 47, 40],\n [55, 48, 49, 50, 51, 52, 53, 54, 55, 48],\n [63, 56, 57, 58, 59, 60, 61, 62, 63, 56],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n )\n assert_eq(e, expected, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_np_test_percentile.assert_not_same_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_np_test_percentile.assert_not_same_keys_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_percentile.py", "file_name": "test_cupy_percentile.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 30, "span_ids": ["test_percentile", "imports"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\npytestmark = pytest.mark.gpu\n\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import assert_eq, same_keys\n\ncupy = pytest.importorskip(\"cupy\")\n\n\n@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_percentile():\n d = da.from_array(cupy.ones((16,)), chunks=(4,))\n qs = np.array([0, 50, 100])\n\n result = da.percentile(d, qs, method=\"midpoint\")\n assert_eq(result, np.array([1, 1, 1], dtype=d.dtype), check_type=False)\n\n x = cupy.array([0, 0, 5, 5, 5, 5, 20, 20])\n d = da.from_array(x, chunks=(3,))\n\n result = da.percentile(d, qs, method=\"midpoint\")\n assert_eq(result, np.array([0, 5, 20], dtype=result.dtype), check_type=False)\n\n assert not same_keys(\n da.percentile(d, qs, \"midpoint\"),\n da.percentile(d, [0, 50], \"midpoint\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentile_tokenize_test_percentile_tokenize.assert_same_keys_da_perce": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentile_tokenize_test_percentile_tokenize.assert_same_keys_da_perce", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_percentile.py", "file_name": "test_cupy_percentile.py", "file_type": "text/x-python", "category": "test", "start_line": 33, "end_line": 41, "span_ids": ["test_percentile_tokenize"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n reason=\"Non-deterministic tokenize(cupy.array(...)), \"\n \"see https://github.com/dask/dask/issues/6718\"\n)\n@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_percentile_tokenize():\n d = da.from_array(cupy.ones((16,)), chunks=(4,))\n qs = np.array([0, 50, 100])\n assert same_keys(da.percentile(d, qs), da.percentile(d, qs))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_empty_arrays_test_percentiles_with_empty_arrays.assert_eq_result_np_arra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_empty_arrays_test_percentiles_with_empty_arrays.assert_eq_result_np_arra", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_percentile.py", "file_name": "test_cupy_percentile.py", "file_type": "text/x-python", "category": "test", "start_line": 44, "end_line": 50, "span_ids": ["test_percentiles_with_empty_arrays"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_percentiles_with_empty_arrays():\n x = da.from_array(cupy.ones(10), chunks=((5, 0, 5),))\n result = da.percentile(x, [10, 50, 90], method=\"midpoint\")\n assert type(result._meta) == cupy.ndarray\n assert_eq(result, result) # Check that _meta and computed arrays match types\n assert_eq(result, np.array([1, 1, 1], dtype=x.dtype), check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_empty_q_test_percentiles_with_empty_q.assert_eq_result_np_arra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_empty_q_test_percentiles_with_empty_q.assert_eq_result_np_arra", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_percentile.py", "file_name": "test_cupy_percentile.py", "file_type": "text/x-python", "category": "test", "start_line": 53, "end_line": 59, "span_ids": ["test_percentiles_with_empty_q"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_percentiles_with_empty_q():\n x = da.from_array(cupy.ones(10), chunks=((5, 0, 5),))\n result = da.percentile(x, [], method=\"midpoint\")\n assert type(result._meta) == cupy.ndarray\n assert_eq(result, result) # Check that _meta and computed arrays match types\n assert_eq(result, np.array([], dtype=x.dtype), check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_scaler_percentile_test_percentiles_with_scaler_percentile.assert_eq_result_np_arra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_scaler_percentile_test_percentiles_with_scaler_percentile.assert_eq_result_np_arra", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_percentile.py", "file_name": "test_cupy_percentile.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 71, "span_ids": ["test_percentiles_with_scaler_percentile"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"q\", [5, 5.0, np.int64(5), np.float64(5)])\ndef test_percentiles_with_scaler_percentile(q):\n # Regression test to ensure da.percentile works with scalar percentiles\n # See #3020\n d = da.from_array(cupy.ones((16,)), chunks=(4,))\n result = da.percentile(d, q, method=\"midpoint\")\n assert type(result._meta) == cupy.ndarray\n assert_eq(result, result) # Check that _meta and computed arrays match types\n assert_eq(result, np.array([1], dtype=d.dtype), check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_unknown_chunk_sizes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_percentile.py_test_percentiles_with_unknown_chunk_sizes_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_percentile.py", "file_name": "test_cupy_percentile.py", "file_type": "text/x-python", "category": "test", "start_line": 74, "end_line": 90, "span_ids": ["test_percentiles_with_unknown_chunk_sizes"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_percentiles_with_unknown_chunk_sizes():\n rs = da.random.RandomState(RandomState=cupy.random.RandomState)\n x = rs.random(1000, chunks=(100,))\n x._chunks = ((np.nan,) * 10,)\n\n result = da.percentile(x, 50, method=\"midpoint\").compute()\n assert type(result) == cupy.ndarray\n assert 0.1 < result < 0.9\n\n a, b = da.percentile(x, [40, 60], method=\"midpoint\").compute()\n assert type(a) == cupy.ndarray\n assert type(b) == cupy.ndarray\n assert 0.1 < a < 0.9\n assert 0.1 < b < 0.9\n assert a < b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_random.py_pytest_test_random_all.rnd_test_rs_standard_t_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_random.py_pytest_test_random_all.rnd_test_rs_standard_t_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_random.py", "file_name": "test_cupy_random.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 58, "span_ids": ["imports", "test_random_all"], "tokens": 782}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytestmark = pytest.mark.gpu\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq\n\ncupy = pytest.importorskip(\"cupy\")\n\n\ndef test_random_all():\n def rnd_test(func, *args, **kwargs):\n a = func(*args, **kwargs)\n assert type(a._meta) == cupy.ndarray\n assert_eq(a, a) # Check that _meta and computed arrays match types\n\n rs = da.random.RandomState(RandomState=cupy.random.RandomState)\n\n rnd_test(rs.beta, 1, 2, size=5, chunks=3)\n rnd_test(rs.binomial, 10, 0.5, size=5, chunks=3)\n rnd_test(rs.chisquare, 1, size=5, chunks=3)\n rnd_test(rs.exponential, 1, size=5, chunks=3)\n rnd_test(rs.f, 1, 2, size=5, chunks=3)\n rnd_test(rs.gamma, 5, 1, size=5, chunks=3)\n rnd_test(rs.geometric, 1, size=5, chunks=3)\n rnd_test(rs.gumbel, 1, size=5, chunks=3)\n rnd_test(rs.hypergeometric, 1, 2, 3, size=5, chunks=3)\n rnd_test(rs.laplace, size=5, chunks=3)\n rnd_test(rs.logistic, size=5, chunks=3)\n rnd_test(rs.lognormal, size=5, chunks=3)\n rnd_test(rs.logseries, 0.5, size=5, chunks=3)\n # No RandomState for multinomial in CuPy\n # rnd_test(rs.multinomial, 20, [1 / 6.] * 6, size=5, chunks=3)\n rnd_test(rs.negative_binomial, 5, 0.5, size=5, chunks=3)\n rnd_test(rs.noncentral_chisquare, 2, 2, size=5, chunks=3)\n\n rnd_test(rs.noncentral_f, 2, 2, 3, size=5, chunks=3)\n rnd_test(rs.normal, 2, 2, size=5, chunks=3)\n rnd_test(rs.pareto, 1, size=5, chunks=3)\n rnd_test(rs.poisson, size=5, chunks=3)\n\n rnd_test(rs.power, 1, size=5, chunks=3)\n rnd_test(rs.rayleigh, size=5, chunks=3)\n rnd_test(rs.random_sample, size=5, chunks=3)\n\n rnd_test(rs.triangular, 1, 2, 3, size=5, chunks=3)\n rnd_test(rs.uniform, size=5, chunks=3)\n rnd_test(rs.vonmises, 2, 3, size=5, chunks=3)\n rnd_test(rs.wald, 1, 2, size=5, chunks=3)\n\n rnd_test(rs.weibull, 2, size=5, chunks=3)\n rnd_test(rs.zipf, 2, size=5, chunks=3)\n\n rnd_test(rs.standard_cauchy, size=5, chunks=3)\n rnd_test(rs.standard_exponential, size=5, chunks=3)\n rnd_test(rs.standard_gamma, 2, size=5, chunks=3)\n rnd_test(rs.standard_normal, size=5, chunks=3)\n rnd_test(rs.standard_t, 2, size=5, chunks=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_random.py_test_random_shapes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_random.py_test_random_shapes_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_random.py", "file_name": "test_cupy_random.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 70, "span_ids": ["test_random_shapes"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"shape\", [(2, 3), (2, 3, 4), (2, 3, 4, 2)])\ndef test_random_shapes(shape):\n rs = da.random.RandomState(RandomState=cupy.random.RandomState)\n\n x = rs.poisson(size=shape, chunks=3)\n assert type(x._meta) == cupy.ndarray\n assert_eq(x, x) # Check that _meta and computed arrays match types\n assert x._meta.shape == (0,) * len(shape)\n assert x.shape == shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_reductions.py_test_nanarg_reductions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_reductions.py_test_nanarg_reductions_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_reductions.py", "file_name": "test_cupy_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 50, "end_line": 73, "span_ids": ["test_nanarg_reductions"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\n [\"dfunc\", \"func\"], [(da.nanargmin, np.nanargmin), (da.nanargmax, np.nanargmax)]\n)\ndef test_nanarg_reductions(dfunc, func):\n x = cupy.random.random((10, 10, 10))\n x[5] = cupy.nan\n a = da.from_array(x, chunks=(3, 4, 5))\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning) # All-NaN slice encountered\n with pytest.raises(ValueError):\n dfunc(a, 1).compute()\n\n with pytest.raises(ValueError):\n dfunc(a, 2).compute()\n\n x[:] = cupy.nan\n a = da.from_array(x, chunks=(3, 4, 5))\n with pytest.raises(ValueError):\n dfunc(a).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_np_test_bincount.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_np_test_bincount.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 28, "span_ids": ["test_bincount", "imports"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\nfrom packaging.version import parse as parse_version\n\npytestmark = pytest.mark.gpu\n\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import assert_eq, same_keys\n\ncupy = pytest.importorskip(\"cupy\")\ncupy_version = parse_version(cupy.__version__)\n\n\n@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.skipif(\n cupy_version < parse_version(\"6.4.0\"),\n reason=\"Requires CuPy 6.4.0+ (with https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_bincount():\n x = cupy.array([2, 1, 5, 2, 1])\n d = da.from_array(x, chunks=2, asarray=False)\n e = da.bincount(d, minlength=6)\n assert_eq(e, np.bincount(x, minlength=6))\n assert same_keys(da.bincount(d, minlength=6), e)\n\n assert da.bincount(d, minlength=6).name != da.bincount(d, minlength=7).name\n assert da.bincount(d, minlength=6).name == da.bincount(d, minlength=6).name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_compress_test_compress.assert_eq_np_compress_c_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_compress_test_compress.assert_eq_np_compress_c_t", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 31, "end_line": 43, "span_ids": ["test_compress"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\ndef test_compress():\n carr = cupy.random.randint(0, 3, size=(10, 10))\n\n darr = da.from_array(carr, chunks=(20, 5))\n\n c = cupy.asarray([True])\n res = da.compress(c, darr, axis=0)\n\n # cupy.compress is not implemented but dask implementation does not\n # rely on np.compress -- move originial data back to host and\n # compare da.compress with np.compress\n assert_eq(np.compress(c.tolist(), carr.tolist(), axis=0), res, check_type=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_diff_test_diff.assert_eq_da_diff_a_n_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_diff_test_diff.assert_eq_da_diff_a_n_a", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 46, "end_line": 55, "span_ids": ["test_diff"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"shape, axis\",\n [[(10, 15, 20), 0], [(10, 15, 20), 1], [(10, 15, 20), 2], [(10, 15, 20), -1]],\n)\n@pytest.mark.parametrize(\"n\", [0, 1, 2])\ndef test_diff(shape, n, axis):\n x = cupy.random.randint(0, 10, shape)\n a = da.from_array(x, chunks=(len(shape) * (5,)))\n\n assert_eq(da.diff(a, n, axis), cupy.diff(x, n, axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_diff_prepend_test_diff_prepend.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_prepend_cup": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_diff_prepend_test_diff_prepend.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_prepend_cup", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 58, "end_line": 84, "span_ids": ["test_diff_prepend"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"n\", [0, 1, 2])\ndef test_diff_prepend(n):\n x = cupy.arange(5) + 1\n a = da.from_array(x, chunks=2)\n assert_eq(da.diff(a, n, prepend=0), cupy.diff(x, n, prepend=0))\n assert_eq(da.diff(a, n, prepend=[0]), cupy.diff(x, n, prepend=[0]))\n assert_eq(da.diff(a, n, prepend=[-1, 0]), cupy.diff(x, n, prepend=[-1, 0]))\n\n x = cupy.arange(16).reshape(4, 4)\n a = da.from_array(x, chunks=2)\n assert_eq(da.diff(a, n, axis=1, prepend=0), cupy.diff(x, n, axis=1, prepend=0))\n assert_eq(\n da.diff(a, n, axis=1, prepend=[[0], [0], [0], [0]]),\n cupy.diff(x, n, axis=1, prepend=[[0], [0], [0], [0]]),\n )\n assert_eq(da.diff(a, n, axis=0, prepend=0), cupy.diff(x, n, axis=0, prepend=0))\n assert_eq(\n da.diff(a, n, axis=0, prepend=[[0, 0, 0, 0]]),\n cupy.diff(x, n, axis=0, prepend=[[0, 0, 0, 0]]),\n )\n\n if n > 0:\n # When order is 0 the result is the icupyut array, it doesn't raise\n # an error\n with pytest.raises(ValueError):\n da.diff(a, n, prepend=cupy.zeros((3, 3)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_diff_append_test_diff_append.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_append_cupy": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_diff_append_test_diff_append.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_append_cupy", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 87, "end_line": 113, "span_ids": ["test_diff_append"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"n\", [0, 1, 2])\ndef test_diff_append(n):\n x = cupy.arange(5) + 1\n a = da.from_array(x, chunks=2)\n assert_eq(da.diff(a, n, append=0), cupy.diff(x, n, append=0))\n assert_eq(da.diff(a, n, append=[0]), cupy.diff(x, n, append=[0]))\n assert_eq(da.diff(a, n, append=[-1, 0]), cupy.diff(x, n, append=[-1, 0]))\n\n x = cupy.arange(16).reshape(4, 4)\n a = da.from_array(x, chunks=2)\n assert_eq(da.diff(a, n, axis=1, append=0), cupy.diff(x, n, axis=1, append=0))\n assert_eq(\n da.diff(a, n, axis=1, append=[[0], [0], [0], [0]]),\n cupy.diff(x, n, axis=1, append=[[0], [0], [0], [0]]),\n )\n assert_eq(da.diff(a, n, axis=0, append=0), cupy.diff(x, n, axis=0, append=0))\n assert_eq(\n da.diff(a, n, axis=0, append=[[0, 0, 0, 0]]),\n cupy.diff(x, n, axis=0, append=[[0, 0, 0, 0]]),\n )\n\n if n > 0:\n with pytest.raises(ValueError):\n # When order is 0 the result is the icupyut array, it doesn't raise\n # an error\n da.diff(a, n, append=cupy.zeros((3, 3)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_digitize_test_digitize.for_chunks_in_10_10_.for_right_in_False_True.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_digitize_test_digitize.for_chunks_in_10_10_.for_right_in_False_True.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 141, "span_ids": ["test_digitize"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"bins_type\", [np, cupy])\ndef test_digitize(bins_type):\n x = cupy.array([2, 4, 5, 6, 1])\n bins = bins_type.array([1, 2, 3, 4, 5])\n for chunks in [2, 4]:\n for right in [False, True]:\n d = da.from_array(x, chunks=chunks)\n bins_cupy = cupy.array(bins)\n assert_eq(\n da.digitize(d, bins, right=right),\n np.digitize(x, bins_cupy, right=right),\n check_type=False,\n )\n\n x = cupy.random.random(size=(100, 100))\n bins = bins_type.random.random(size=13)\n bins.sort()\n for chunks in [(10, 10), (10, 20), (13, 17), (87, 54)]:\n for right in [False, True]:\n d = da.from_array(x, chunks=chunks)\n bins_cupy = cupy.array(bins)\n assert_eq(\n da.digitize(d, bins, right=right),\n np.digitize(x, bins_cupy, right=right),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_25_20_9_.assert_eq_da_tril_dA_k_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_tril_triu_test_tril_triu.for_chk_in_5_4_.for_k_in_25_20_9_.assert_eq_da_tril_dA_k_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 144, "end_line": 159, "span_ids": ["test_tril_triu"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.skipif(\n cupy_version < parse_version(\"6.4.0\"),\n reason=\"Requires CuPy 6.4.0+ (with https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_tril_triu():\n A = cupy.random.randn(20, 20)\n for chk in [5, 4]:\n dA = da.from_array(A, (chk, chk), asarray=False)\n\n assert_eq(da.triu(dA), np.triu(A))\n assert_eq(da.tril(dA), np.tril(A))\n\n for k in [-25, -20, -9, -1, 1, 8, 19, 21]:\n assert_eq(da.triu(dA, k), np.triu(A, k))\n assert_eq(da.tril(dA, k), np.tril(A, k))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_tril_triu_non_square_arrays_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_tril_triu_non_square_arrays_test_tril_triu_non_square_arrays.assert_eq_da_tril_dA_np", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 162, "end_line": 171, "span_ids": ["test_tril_triu_non_square_arrays"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.skipif(\n cupy_version < parse_version(\"6.4.0\"),\n reason=\"Requires CuPy 6.4.0+ (with https://github.com/cupy/cupy/pull/2418)\",\n)\ndef test_tril_triu_non_square_arrays():\n A = cupy.random.randint(0, 11, (30, 35))\n dA = da.from_array(A, chunks=(5, 5), asarray=False)\n assert_eq(da.triu(dA), np.triu(A))\n assert_eq(da.tril(dA), np.tril(A))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_unique_kwargs_test_unique_kwargs.if_any_kwargs_values_.with_pytest_raises_ValueE._test_unique_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_unique_kwargs_test_unique_kwargs.if_any_kwargs_values_.with_pytest_raises_ValueE._test_unique_kwargs_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 174, "end_line": 211, "span_ids": ["test_unique_kwargs"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"return_index\", [False, True])\n@pytest.mark.parametrize(\"return_inverse\", [False, True])\n@pytest.mark.parametrize(\"return_counts\", [False, True])\ndef test_unique_kwargs(return_index, return_inverse, return_counts):\n kwargs = dict(\n return_index=return_index,\n return_inverse=return_inverse,\n return_counts=return_counts,\n )\n\n a = cupy.array([1, 2, 4, 4, 5, 2])\n d = da.from_array(a, chunks=(3,))\n\n def _test_unique_kwargs():\n r_a = np.unique(a, **kwargs)\n r_d = da.unique(d, **kwargs)\n\n if not any([return_index, return_inverse, return_counts]):\n assert isinstance(r_a, cupy.ndarray)\n assert isinstance(r_d, da.Array)\n\n r_a = (r_a,)\n r_d = (r_d,)\n\n assert len(r_a) == len(r_d)\n\n if return_inverse:\n i = 1 + int(return_index)\n assert (d.size,) == r_d[i].shape\n\n for e_r_a, e_r_d in zip(r_a, r_d):\n assert_eq(e_r_d, e_r_a)\n\n # `return_index`, `return_inverse` and `return_counts` are currently\n # unsupported on CuPy-backed Dask arrays.\n if any(kwargs.values()):\n with pytest.raises(ValueError):\n _test_unique_kwargs()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_unique_rand_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_routines.py_test_unique_rand_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_routines.py", "file_name": "test_cupy_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 229, "span_ids": ["test_unique_rand"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"seed\", [23, 796])\n@pytest.mark.parametrize(\"low, high\", [[0, 10]])\n@pytest.mark.parametrize(\n \"shape, chunks\",\n [[(10,), (5,)], [(10,), (3,)], [(4, 5), (3, 2)], [(20, 20), (4, 5)]],\n)\ndef test_unique_rand(seed, low, high, shape, chunks):\n cupy.random.seed(seed)\n\n a = cupy.random.randint(low, high, size=shape)\n d = da.from_array(a, chunks=chunks)\n\n r_a = np.unique(a)\n r_d = da.unique(d)\n assert_eq(r_d, r_a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_np_test_index_with_int_dask_array.assert_eq_x_T_idx_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_np_test_index_with_int_dask_array.assert_eq_x_T_idx_ex", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_slicing.py", "file_name": "test_cupy_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 31, "span_ids": ["imports", "test_index_with_int_dask_array"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pytest\n\npytestmark = pytest.mark.gpu\n\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import assert_eq\n\ncupy = pytest.importorskip(\"cupy\")\n\n\n@pytest.mark.parametrize(\"idx_chunks\", [None, 3, 2, 1])\n@pytest.mark.parametrize(\"x_chunks\", [(3, 5), (2, 3), (1, 2), (1, 1)])\ndef test_index_with_int_dask_array(x_chunks, idx_chunks):\n # test data is crafted to stress use cases:\n # - pick from different chunks of x out of order\n # - a chunk of x contains no matches\n # - only one chunk of x\n x = cupy.array(\n [[10, 20, 30, 40, 50], [60, 70, 80, 90, 100], [110, 120, 130, 140, 150]]\n )\n idx = cupy.array([3, 0, 1])\n expect = cupy.array([[40, 10, 20], [90, 60, 70], [140, 110, 120]])\n\n x = da.from_array(x, chunks=x_chunks)\n if idx_chunks is not None:\n idx = da.from_array(idx, chunks=idx_chunks)\n\n assert_eq(x[:, idx], expect)\n assert_eq(x.T[idx, :], expect.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_0d_test_index_with_int_dask_array_0d.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_0d_test_index_with_int_dask_array_0d.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_slicing.py", "file_name": "test_cupy_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 69, "end_line": 81, "span_ids": ["test_index_with_int_dask_array_0d"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"chunks\", [1, 2, 3])\ndef test_index_with_int_dask_array_0d(chunks):\n # Slice by 0-dimensional array\n x = da.from_array(cupy.array([[10, 20, 30], [40, 50, 60]]), chunks=chunks)\n idx0 = da.from_array(1, chunks=1)\n assert_eq(x[idx0, :], x[1, :])\n assert_eq(x[:, idx0], x[:, 1])\n\n # CuPy index\n idx0 = da.from_array(cupy.array(1), chunks=1)\n assert_eq(x[idx0, :], x[1, :])\n assert_eq(x[:, idx0], x[:, 1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_nanchunks_test_index_with_int_dask_array_nanchunks.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_nanchunks_test_index_with_int_dask_array_nanchunks.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_slicing.py", "file_name": "test_cupy_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 84, "end_line": 93, "span_ids": ["test_index_with_int_dask_array_nanchunks"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.skip(\"dask.Array.nonzero() doesn't support non-NumPy arrays yet\")\n@pytest.mark.parametrize(\"chunks\", [1, 2, 3, 4, 5])\ndef test_index_with_int_dask_array_nanchunks(chunks):\n # Slice by array with nan-sized chunks\n a = da.from_array(cupy.arange(-2, 3), chunks=chunks)\n assert_eq(a[a.nonzero()], cupy.array([-2, -1, 1, 2]))\n # Edge case: the nan-sized chunks resolve to size 0\n a = da.zeros_like(cupy.array(()), shape=5, chunks=chunks)\n assert_eq(a[a.nonzero()], cupy.array([]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_negindex_test_index_with_int_dask_array_negindex.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_negindex_test_index_with_int_dask_array_negindex.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_slicing.py", "file_name": "test_cupy_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 96, "end_line": 105, "span_ids": ["test_index_with_int_dask_array_negindex"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"chunks\", [2, 4])\ndef test_index_with_int_dask_array_negindex(chunks):\n a = da.arange(4, chunks=chunks, like=cupy.array(()))\n idx = da.from_array([-1, -4], chunks=1)\n assert_eq(a[idx], cupy.array([3, 0]))\n\n # CuPy index\n idx = da.from_array(cupy.array([-1, -4]), chunks=1)\n assert_eq(a[idx], cupy.array([3, 0]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_indexerror_test_index_with_int_dask_array_indexerror.None_3.a_idx_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_indexerror_test_index_with_int_dask_array_indexerror.None_3.a_idx_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_slicing.py", "file_name": "test_cupy_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 108, "end_line": 125, "span_ids": ["test_index_with_int_dask_array_indexerror"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"chunks\", [2, 4])\ndef test_index_with_int_dask_array_indexerror(chunks):\n a = da.arange(4, chunks=chunks, like=cupy.array(()))\n idx = da.from_array([4], chunks=1)\n with pytest.raises(IndexError):\n a[idx].compute()\n idx = da.from_array([-5], chunks=1)\n with pytest.raises(IndexError):\n a[idx].compute()\n\n # CuPy indices\n idx = da.from_array(cupy.array([4]), chunks=1)\n with pytest.raises(IndexError):\n a[idx].compute()\n idx = da.from_array(cupy.array([-5]), chunks=1)\n with pytest.raises(IndexError):\n a[idx].compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_dtypes_test_index_with_int_dask_array_dtypes.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_dtypes_test_index_with_int_dask_array_dtypes.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_slicing.py", "file_name": "test_cupy_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 128, "end_line": 139, "span_ids": ["test_index_with_int_dask_array_dtypes"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\n \"dtype\", [\"int8\", \"int16\", \"int32\", \"int64\", \"uint8\", \"uint16\", \"uint32\", \"uint64\"]\n)\ndef test_index_with_int_dask_array_dtypes(dtype):\n a = da.from_array(cupy.array([10, 20, 30, 40]), chunks=-1)\n idx = da.from_array(np.array([1, 2]).astype(dtype), chunks=1)\n assert_eq(a[idx], cupy.array([20, 30]))\n\n # CuPy index\n idx = da.from_array(cupy.array([1, 2]).astype(dtype), chunks=1)\n assert_eq(a[idx], cupy.array([20, 30]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_nocompute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_nocompute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_slicing.py", "file_name": "test_cupy_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 155, "span_ids": ["test_index_with_int_dask_array_nocompute"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_with_int_dask_array_nocompute():\n \"\"\"Test that when the indices are a dask array\n they are not accidentally computed\n \"\"\"\n\n def crash():\n raise NotImplementedError()\n\n x = da.arange(5, chunks=-1, like=cupy.array(()))\n idx = da.Array({(\"x\", 0): (crash,)}, name=\"x\", chunks=((2,),), dtype=np.int64)\n result = x[idx]\n with pytest.raises(NotImplementedError):\n result.compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_sparse.py_pytest_test_sparse_hstack_vstack_csr.assert_eq_x_y_todense_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_sparse.py_pytest_test_sparse_hstack_vstack_csr.assert_eq_x_y_todense_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_sparse.py", "file_name": "test_cupy_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 22, "span_ids": ["test_sparse_hstack_vstack_csr", "imports"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytestmark = pytest.mark.gpu\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq\n\ncupy = pytest.importorskip(\"cupy\")\ncupyx = pytest.importorskip(\"cupyx\")\n\n\ndef test_sparse_hstack_vstack_csr():\n pytest.importorskip(\"cupyx\")\n x = cupy.arange(24, dtype=cupy.float32).reshape(4, 6)\n\n sp = da.from_array(x, chunks=(2, 3), asarray=False, fancy=False)\n sp = sp.map_blocks(cupyx.scipy.sparse.csr_matrix, dtype=cupy.float32)\n\n y = sp.compute()\n\n assert cupyx.scipy.sparse.isspmatrix(y)\n assert_eq(x, y.todense())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_sparse.py_test_sparse_concatenate_test_sparse_concatenate.assert_z_toarray_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_sparse.py_test_sparse_concatenate_test_sparse_concatenate.assert_z_toarray_z_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_sparse.py", "file_name": "test_cupy_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 25, "end_line": 51, "span_ids": ["test_sparse_concatenate"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [0, 1])\ndef test_sparse_concatenate(axis):\n pytest.importorskip(\"cupyx\")\n\n rs = da.random.RandomState(RandomState=cupy.random.RandomState)\n meta = cupyx.scipy.sparse.csr_matrix((0, 0))\n\n xs = []\n ys = []\n for i in range(2):\n x = rs.random((1000, 10), chunks=(100, 10))\n x[x < 0.9] = 0\n xs.append(x)\n ys.append(x.map_blocks(cupyx.scipy.sparse.csr_matrix, meta=meta))\n\n z = da.concatenate(ys, axis=axis)\n z = z.compute()\n\n if axis == 0:\n sp_concatenate = cupyx.scipy.sparse.vstack\n elif axis == 1:\n sp_concatenate = cupyx.scipy.sparse.hstack\n z_expected = sp_concatenate(\n [cupyx.scipy.sparse.csr_matrix(e.compute()) for e in xs]\n )\n\n assert (z.toarray() == z_expected.toarray()).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_sparse.py_test_sparse_dot_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_sparse.py_test_sparse_dot_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_sparse.py", "file_name": "test_cupy_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 54, "end_line": 80, "span_ids": ["test_sparse_dot"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"sp_format\", [\"csr\", \"csc\"])\ndef test_sparse_dot(sp_format):\n pytest.importorskip(\"cupyx\")\n\n if sp_format == \"csr\":\n sp_matrix = cupyx.scipy.sparse.csr_matrix\n elif sp_format == \"csc\":\n sp_matrix = cupyx.scipy.sparse.csc_matrix\n dtype = \"f\"\n density = 0.3\n x_shape, x_chunks = (4, 8), (2, 4)\n y_shape, y_chunks = (8, 6), (4, 3)\n x = cupy.random.random(x_shape, dtype=dtype)\n y = cupy.random.random(y_shape, dtype=dtype)\n x[x < 1 - density] = 0\n y[y < 1 - density] = 0\n z = x.dot(y)\n\n da_x = da.from_array(x, chunks=x_chunks, asarray=False, fancy=False)\n da_y = da.from_array(y, chunks=y_chunks, asarray=False, fancy=False)\n da_x = da_x.map_blocks(sp_matrix, dtype=dtype)\n da_y = da_y.map_blocks(sp_matrix, dtype=dtype)\n da_z = da.dot(da_x, da_y).compute()\n\n assert cupyx.scipy.sparse.isspmatrix(da_z)\n assert_eq(z, da_z.todense())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_vectorize_whitespace_test_gufunc_vectorize_whitespace.gufoo_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_gufunc.py_test_gufunc_vectorize_whitespace_test_gufunc_vectorize_whitespace.gufoo_a_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_gufunc.py", "file_name": "test_gufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 335, "end_line": 358, "span_ids": ["test_gufunc_vectorize_whitespace"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_gufunc_vectorize_whitespace():\n # Regression test for https://github.com/dask/dask/issues/7972.\n # NumPy versions before https://github.com/numpy/numpy/pull/19627\n # would not ignore whitespace characters in `signature` like they\n # are supposed to. We remove the whitespace in Dask as a workaround.\n\n def foo(x, y):\n return (x + y).sum(axis=1)\n\n a = da.ones((8, 3, 5), chunks=(2, 3, 5), dtype=int)\n b = np.ones(5, dtype=int)\n x = apply_gufunc(foo, \"(m, n),(n)->(m)\", a, b, vectorize=True)\n\n assert_eq(x, np.full((8, 3), 10, dtype=int))\n\n a = da.random.random((6, 5, 5))\n\n @da.as_gufunc(signature=\"(n, n)->(n, n)\", output_dtypes=float, vectorize=True)\n def gufoo(x):\n return np.linalg.inv(x)\n\n # Previously calling `gufoo` would raise an error due to the whitespace\n # in its `signature`. Let's make sure it doesn't raise here.\n gufoo(a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_sys_test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_sys_test_tsqr.if_error_type_is_None_.else_.None_1.u_s_vh_tsqr_data_com", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 102, "span_ids": ["imports", "test_tsqr"], "tokens": 1127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\n\nimport pytest\n\npytest.importorskip(\"numpy\")\npytest.importorskip(\"scipy\")\n\nimport numpy as np\nimport scipy.linalg\nfrom packaging.version import parse as parse_version\n\nimport dask.array as da\nfrom dask.array.linalg import qr, sfqr, svd, svd_compressed, tsqr\nfrom dask.array.numpy_compat import _np_version\nfrom dask.array.utils import assert_eq, same_keys, svd_flip\n\n\n@pytest.mark.parametrize(\n \"m,n,chunks,error_type\",\n [\n (20, 10, 10, None), # tall-skinny regular blocks\n (20, 10, (3, 10), None), # tall-skinny regular fat layers\n (20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers\n (40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)\n (128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1\n (\n 129,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2\n (\n 130,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (\n 131,\n 2,\n (16, 2),\n None,\n ), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next\n (300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2\n (300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3\n (300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4\n (10, 5, 10, None), # single block tall\n (5, 10, 10, None), # single block short\n (10, 10, 10, None), # single block square\n (10, 40, (10, 10), ValueError), # short-fat regular blocks\n (10, 40, (10, 15), ValueError), # short-fat irregular blocks\n (\n 10,\n 40,\n (10, (15, 5, 5, 8, 7)),\n ValueError,\n ), # short-fat non-uniform chunks (why?)\n (20, 20, 10, ValueError), # 2x2 regular blocks\n ],\n)\ndef test_tsqr(m, n, chunks, error_type):\n mat = np.random.rand(m, n)\n data = da.from_array(mat, chunks=chunks, name=\"A\")\n\n # qr\n m_q = m\n n_q = min(m, n)\n m_r = n_q\n n_r = n\n\n # svd\n m_u = m\n n_u = min(m, n)\n n_s = n_q\n m_vh = n_q\n n_vh = n\n d_vh = max(m_vh, n_vh) # full matrix returned\n\n if error_type is None:\n # test QR\n q, r = tsqr(data)\n assert_eq((m_q, n_q), q.shape) # shape check\n assert_eq((m_r, n_r), r.shape) # shape check\n assert_eq(mat, da.dot(q, r)) # accuracy check\n assert_eq(np.eye(n_q, n_q), da.dot(q.T, q)) # q must be orthonormal\n assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular\n\n # test SVD\n u, s, vh = tsqr(data, compute_svd=True)\n s_exact = np.linalg.svd(mat)[1]\n assert_eq(s, s_exact) # s must contain the singular values\n assert_eq((m_u, n_u), u.shape) # shape check\n assert_eq((n_s,), s.shape) # shape check\n assert_eq((d_vh, d_vh), vh.shape) # shape check\n assert_eq(np.eye(n_u, n_u), da.dot(u.T, u)) # u must be orthonormal\n assert_eq(np.eye(d_vh, d_vh), da.dot(vh, vh.T)) # vh must be orthonormal\n assert_eq(mat, da.dot(da.dot(u, da.diag(s)), vh[:n_q])) # accuracy check\n else:\n with pytest.raises(error_type):\n q, r = tsqr(data)\n with pytest.raises(error_type):\n u, s, vh = tsqr(data, compute_svd=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_roots_annotations_test_fuse_roots_annotations.assert_eq_za_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_fuse_roots_annotations_test_fuse_roots_annotations.assert_eq_za_z_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 426, "end_line": 438, "span_ids": ["test_fuse_roots_annotations"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fuse_roots_annotations():\n x = da.ones(10, chunks=(2,))\n y = da.zeros(10, chunks=(2,))\n\n with dask.annotate(foo=\"bar\"):\n y = y**2\n\n z = (x + 1) + (2 * y)\n hlg = dask.blockwise.optimize_blockwise(z.dask)\n assert len(hlg.layers) == 3\n assert {\"foo\": \"bar\"} in [l.annotations for l in hlg.layers.values()]\n za = da.Array(hlg, z.name, z.chunks, z.dtype)\n assert_eq(za, z)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_blockwise_duplicate_dependency_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_optimization.py_test_optimize_blockwise_duplicate_dependency_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_optimization.py", "file_name": "test_optimization.py", "file_type": "text/x-python", "category": "test", "start_line": 460, "end_line": 471, "span_ids": ["test_optimize_blockwise_duplicate_dependency"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"optimize_graph\", [True, False])\ndef test_optimize_blockwise_duplicate_dependency(optimize_graph):\n # Two blockwise operations in a row with duplicate name\n # (See: https://github.com/dask/dask/issues/8535)\n xx = da.from_array(np.array([[1, 1], [2, 2]]), chunks=1)\n xx = xx * 2\n z = da.matmul(xx, xx)\n\n # Compare to known answer\n result = z.compute(optimize_graph=optimize_graph)\n assert assert_eq(result, [[12, 12], [24, 24]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_different_depths_test_map_overlap_multiarray_uneven_numblocks_exception.with_pytest_raises_ValueE.da_map_overlap_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_multiarray_different_depths_test_map_overlap_multiarray_uneven_numblocks_exception.with_pytest_raises_ValueE.da_map_overlap_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 392, "end_line": 429, "span_ids": ["test_map_overlap_multiarray_uneven_numblocks_exception", "test_map_overlap_multiarray_different_depths"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_multiarray_different_depths():\n x = da.ones(5, dtype=\"int\")\n y = da.ones(5, dtype=\"int\")\n\n def run(depth):\n return da.map_overlap(\n lambda x, y: x.sum() + y.sum(),\n x,\n y,\n depth=depth,\n chunks=(0,),\n trim=False,\n boundary=\"reflect\",\n ).compute()\n\n # Check that the number of elements added\n # to arrays in overlap works as expected\n # when depths differ for each array\n assert run([0, 0]) == 10\n assert run([0, 1]) == 12\n assert run([1, 1]) == 14\n assert run([1, 2]) == 16\n assert run([0, 5]) == 20\n assert run([5, 5]) == 30\n\n # Ensure that depth > chunk size results in error\n with pytest.raises(ValueError):\n run([0, 6])\n\n\ndef test_map_overlap_multiarray_uneven_numblocks_exception():\n x = da.arange(10, chunks=(10,))\n y = da.arange(10, chunks=(5, 5))\n with pytest.raises(ValueError):\n # Fail with chunk alignment explicitly disabled\n da.map_overlap(\n lambda x, y: x + y, x, y, align_arrays=False, boundary=\"none\"\n ).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_trim_using_drop_axis_and_different_depths_test_map_overlap_trim_using_drop_axis_and_different_depths.assert_array_almost_equal": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_trim_using_drop_axis_and_different_depths_test_map_overlap_trim_using_drop_axis_and_different_depths.assert_array_almost_equal", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 479, "end_line": 517, "span_ids": ["test_map_overlap_trim_using_drop_axis_and_different_depths"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"drop_axis\",\n (\n (0,),\n (1,),\n (2,),\n (0, 1),\n (1, 2),\n (2, 0),\n 1,\n (-3,),\n (-2,),\n (-1,),\n (-3, -2),\n (-2, -1),\n (-1, -3),\n -2,\n ),\n)\ndef test_map_overlap_trim_using_drop_axis_and_different_depths(drop_axis):\n x = da.random.standard_normal((5, 10, 8), chunks=(2, 5, 4))\n\n def _mean(x):\n return x.mean(axis=drop_axis)\n\n expected = _mean(x)\n\n # unique boundary and depth value per axis\n boundary = (0, \"reflect\", \"nearest\")\n depth = (1, 3, 2)\n # to match expected result, dropped axes must have depth 0\n _drop_axis = (drop_axis,) if np.isscalar(drop_axis) else drop_axis\n _drop_axis = [d % x.ndim for d in _drop_axis]\n depth = tuple(0 if i in _drop_axis else d for i, d in enumerate(depth))\n\n y = da.map_overlap(\n _mean, x, depth=depth, boundary=boundary, drop_axis=drop_axis, dtype=float\n ).compute()\n assert_array_almost_equal(expected, y)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_pytest_test_percentile.if_internal_method_td.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_pytest_test_percentile.if_internal_method_td.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_percentiles.py", "file_name": "test_percentiles.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 57, "span_ids": ["test_percentile", "imports"], "tokens": 407}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\npytest.importorskip(\"numpy\")\n\nimport numpy as np\n\nimport dask.array as da\nfrom dask.array.utils import assert_eq, same_keys\n\ntry:\n import crick\nexcept ImportError:\n crick = None\n\n\npercentile_internal_methods = pytest.mark.parametrize(\n \"internal_method\",\n [\n pytest.param(\n \"tdigest\", marks=pytest.mark.skipif(not crick, reason=\"Requires crick\")\n ),\n \"dask\",\n ],\n)\n\n\n@percentile_internal_methods\ndef test_percentile(internal_method):\n d = da.ones((16,), chunks=(4,))\n qs = [0, 50, 100]\n\n assert_eq(\n da.percentile(d, qs, internal_method=internal_method),\n np.array([1, 1, 1], dtype=d.dtype),\n )\n\n x = np.array([0, 0, 5, 5, 5, 5, 20, 20])\n d = da.from_array(x, chunks=(3,))\n\n result = da.percentile(d, qs, internal_method=internal_method)\n assert_eq(result, np.array([0, 5, 20], dtype=result.dtype))\n\n assert same_keys(\n da.percentile(d, qs, internal_method=internal_method),\n da.percentile(d, qs, internal_method=internal_method),\n )\n assert not same_keys(\n da.percentile(d, qs, internal_method=internal_method),\n da.percentile(d, [0, 50], internal_method=internal_method),\n )\n\n if internal_method != \"tdigest\":\n x = np.array([\"a\", \"a\", \"d\", \"d\", \"d\", \"e\"])\n d = da.from_array(x, chunks=(3,))\n assert_eq(\n da.percentile(d, [0, 50, 100]), np.array([\"a\", \"d\", \"e\"], dtype=x.dtype)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentiles_with_empty_arrays_test_percentiles_with_empty_q.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentiles_with_empty_arrays_test_percentiles_with_empty_q.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_percentiles.py", "file_name": "test_percentiles.py", "file_type": "text/x-python", "category": "test", "start_line": 79, "end_line": 94, "span_ids": ["test_percentiles_with_empty_q", "test_percentiles_with_empty_arrays"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@percentile_internal_methods\ndef test_percentiles_with_empty_arrays(internal_method):\n x = da.ones(10, chunks=((5, 0, 5),))\n assert_eq(\n da.percentile(x, [10, 50, 90], internal_method=internal_method),\n np.array([1, 1, 1], dtype=x.dtype),\n )\n\n\n@percentile_internal_methods\ndef test_percentiles_with_empty_q(internal_method):\n x = da.ones(10, chunks=((5, 0, 5),))\n assert_eq(\n da.percentile(x, [], internal_method=internal_method),\n np.array([], dtype=x.dtype),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentiles_with_scaler_percentile_test_percentiles_with_scaler_percentile.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_percentiles.py_test_percentiles_with_scaler_percentile_test_percentiles_with_scaler_percentile.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_percentiles.py", "file_name": "test_percentiles.py", "file_type": "text/x-python", "category": "test", "start_line": 97, "end_line": 106, "span_ids": ["test_percentiles_with_scaler_percentile"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@percentile_internal_methods\n@pytest.mark.parametrize(\"q\", [5, 5.0, np.int64(5), np.float64(5)])\ndef test_percentiles_with_scaler_percentile(internal_method, q):\n # Regression test to ensure da.percentile works with scalar percentiles\n # See #3020\n d = da.ones((16,), chunks=(4,))\n assert_eq(\n da.percentile(d, q, internal_method=internal_method),\n np.array([1], dtype=d.dtype),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_0d_test_test_reductions_0D.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_0d_test_test_reductions_0D.None_15", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 81, "span_ids": ["test_reductions_0D", "reduction_0d_test"], "tokens": 384}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduction_0d_test(da_func, darr, np_func, narr):\n expected = np_func(narr)\n actual = da_func(darr)\n\n assert_eq(actual, expected)\n assert_eq(da_func(narr), expected) # Ensure Dask reductions work with NumPy arrays\n assert actual.size == 1\n\n\ndef test_reductions_0D():\n x = np.int_(3) # np.int_ has a dtype attribute, np.int does not.\n a = da.from_array(x, chunks=(1,))\n\n reduction_0d_test(da.sum, a, np.sum, x)\n reduction_0d_test(da.prod, a, np.prod, x)\n reduction_0d_test(da.mean, a, np.mean, x)\n reduction_0d_test(da.var, a, np.var, x)\n reduction_0d_test(da.std, a, np.std, x)\n reduction_0d_test(da.min, a, np.min, x)\n reduction_0d_test(da.max, a, np.max, x)\n reduction_0d_test(da.any, a, np.any, x)\n reduction_0d_test(da.all, a, np.all, x)\n\n reduction_0d_test(da.nansum, a, np.nansum, x)\n reduction_0d_test(da.nanprod, a, np.nanprod, x)\n reduction_0d_test(da.nanmean, a, np.mean, x)\n reduction_0d_test(da.nanvar, a, np.var, x)\n reduction_0d_test(da.nanstd, a, np.std, x)\n reduction_0d_test(da.nanmin, a, np.nanmin, x)\n reduction_0d_test(da.nanmax, a, np.nanmax, x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_object_test_nan_object.with_warnings_catch_warni.assert_eq_np_array_getatt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nan_object_test_nan_object.with_warnings_catch_warni.assert_eq_np_array_getatt", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 425, "end_line": 454, "span_ids": ["test_nan_object"], "tokens": 365}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"nansum\", \"sum\", \"nanmin\", \"min\", \"nanmax\", \"max\"])\ndef test_nan_object(func):\n with warnings.catch_warnings():\n if os.name == \"nt\" and func in {\"min\", \"max\"}:\n # RuntimeWarning: invalid value encountered in reduce in wrapreduction\n # from NumPy.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n\n x = np.array([[1, np.nan, 3, 4], [5, 6, 7, np.nan], [9, 10, 11, 12]]).astype(\n object\n )\n d = da.from_array(x, chunks=(2, 2))\n\n if func in {\"nanmin\", \"nanmax\"}:\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n\n assert_eq(getattr(np, func)(x, axis=()), getattr(da, func)(d, axis=()))\n\n if func in {\"nanmin\", \"nanmax\"}:\n warnings.simplefilter(\"default\", RuntimeWarning)\n\n if func in {\"min\", \"max\"}:\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(getattr(np, func)(x, axis=0), getattr(da, func)(d, axis=0))\n if os.name != \"nt\" and func in {\"min\", \"max\"}:\n warnings.simplefilter(\"default\", RuntimeWarning)\n\n assert_eq(getattr(np, func)(x, axis=1), getattr(da, func)(d, axis=1))\n # wrap the scalar in a numpy array since the dask version cannot know dtype\n assert_eq(np.array(getattr(np, func)(x)).astype(object), getattr(da, func)(d))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_median_does_not_rechunk_if_whole_axis_in_one_chunk_test_object_reduction.assert_result_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_median_does_not_rechunk_if_whole_axis_in_one_chunk_test_object_reduction.assert_result_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 737, "end_line": 757, "span_ids": ["test_object_reduction", "test_median_does_not_rechunk_if_whole_axis_in_one_chunk"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"median\", \"nanmedian\"])\n@pytest.mark.parametrize(\"axis\", [0, [0, 2], 1])\ndef test_median_does_not_rechunk_if_whole_axis_in_one_chunk(axis, func):\n x = np.arange(100).reshape((2, 5, 10))\n d = da.from_array(x, chunks=(2, 1, 10))\n\n actual = getattr(da, func)(d, axis=axis)\n expected = getattr(np, func)(x, axis=axis)\n assert_eq(actual, expected)\n does_rechunk = \"rechunk\" in str(dict(actual.__dask_graph__()))\n if axis == 1:\n assert does_rechunk\n else:\n assert not does_rechunk\n\n\n@pytest.mark.parametrize(\"method\", [\"sum\", \"mean\", \"prod\"])\ndef test_object_reduction(method):\n arr = da.ones(1).astype(object)\n result = getattr(arr, method)().compute()\n assert result == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_empty_chunk_nanmin_nanmax_test_empty_chunk_nanmin_nanmax.assert_eq_getattr_da_fun": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_empty_chunk_nanmin_nanmax_test_empty_chunk_nanmin_nanmax.assert_eq_getattr_da_fun", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 760, "end_line": 773, "span_ids": ["test_empty_chunk_nanmin_nanmax"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"nanmin\", \"nanmax\"])\ndef test_empty_chunk_nanmin_nanmax(func):\n # see https://github.com/dask/dask/issues/8352\n x = np.arange(10).reshape(2, 5)\n d = da.from_array(x, chunks=2)\n x = x[x > 4]\n d = d[d > 4]\n block_lens = np.array([len(x.compute()) for x in d.blocks])\n assert 0 in block_lens\n with pytest.raises(ValueError) as err:\n getattr(da, func)(d)\n assert \"Arrays chunk sizes are unknown\" in str(err)\n d = d.compute_chunk_sizes()\n assert_eq(getattr(da, func)(d), getattr(np, func)(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_empty_chunk_nanmin_nanmax_raise_test_empty_chunk_nanmin_nanmax_raise.assert_str_err_np_value_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_empty_chunk_nanmin_nanmax_raise_test_empty_chunk_nanmin_nanmax_raise.assert_str_err_np_value_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 776, "end_line": 789, "span_ids": ["test_empty_chunk_nanmin_nanmax_raise"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"func\", [\"nanmin\", \"nanmax\"])\ndef test_empty_chunk_nanmin_nanmax_raise(func):\n # see https://github.com/dask/dask/issues/8352\n x = np.arange(10).reshape(2, 5)\n d = da.from_array(x, chunks=2)\n d = d[d > 9]\n x = x[x > 9]\n d = d.compute_chunk_sizes()\n with pytest.raises(ValueError) as err_np:\n getattr(np, func)(x)\n with pytest.raises(ValueError) as err_da:\n d = getattr(da, func)(d)\n d.compute()\n assert str(err_np.value) == str(err_da.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_prepend_test_diff_prepend.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_prepend_np_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_prepend_test_diff_prepend.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_prepend_np_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 521, "end_line": 546, "span_ids": ["test_diff_prepend"], "tokens": 377}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [0, 1, 2])\ndef test_diff_prepend(n):\n x = np.arange(5) + 1\n a = da.from_array(x, chunks=2)\n assert_eq(da.diff(a, n, prepend=0), np.diff(x, n, prepend=0))\n assert_eq(da.diff(a, n, prepend=[0]), np.diff(x, n, prepend=[0]))\n assert_eq(da.diff(a, n, prepend=[-1, 0]), np.diff(x, n, prepend=[-1, 0]))\n\n x = np.arange(16).reshape(4, 4)\n a = da.from_array(x, chunks=2)\n assert_eq(da.diff(a, n, axis=1, prepend=0), np.diff(x, n, axis=1, prepend=0))\n assert_eq(\n da.diff(a, n, axis=1, prepend=[[0], [0], [0], [0]]),\n np.diff(x, n, axis=1, prepend=[[0], [0], [0], [0]]),\n )\n assert_eq(da.diff(a, n, axis=0, prepend=0), np.diff(x, n, axis=0, prepend=0))\n assert_eq(\n da.diff(a, n, axis=0, prepend=[[0, 0, 0, 0]]),\n np.diff(x, n, axis=0, prepend=[[0, 0, 0, 0]]),\n )\n\n if n > 0:\n # When order is 0 the result is the input array, it doesn't raise\n # an error\n with pytest.raises(ValueError):\n da.diff(a, n, prepend=np.zeros((3, 3)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_append_test_diff_append.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_append_np_z": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_append_test_diff_append.if_n_0_.with_pytest_raises_ValueE.da_diff_a_n_append_np_z", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 549, "end_line": 574, "span_ids": ["test_diff_append"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"n\", [0, 1, 2])\ndef test_diff_append(n):\n x = np.arange(5) + 1\n a = da.from_array(x, chunks=2)\n assert_eq(da.diff(a, n, append=0), np.diff(x, n, append=0))\n assert_eq(da.diff(a, n, append=[0]), np.diff(x, n, append=[0]))\n assert_eq(da.diff(a, n, append=[-1, 0]), np.diff(x, n, append=[-1, 0]))\n\n x = np.arange(16).reshape(4, 4)\n a = da.from_array(x, chunks=2)\n assert_eq(da.diff(a, n, axis=1, append=0), np.diff(x, n, axis=1, append=0))\n assert_eq(\n da.diff(a, n, axis=1, append=[[0], [0], [0], [0]]),\n np.diff(x, n, axis=1, append=[[0], [0], [0], [0]]),\n )\n assert_eq(da.diff(a, n, axis=0, append=0), np.diff(x, n, axis=0, append=0))\n assert_eq(\n da.diff(a, n, axis=0, append=[[0, 0, 0, 0]]),\n np.diff(x, n, axis=0, append=[[0, 0, 0, 0]]),\n )\n\n if n > 0:\n with pytest.raises(ValueError):\n # When order is 0 the result is the input array, it doesn't raise\n # an error\n da.diff(a, n, append=np.zeros((3, 3)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_negative_order_test_ediff1d.assert_eq_da_ediff1d_a_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_diff_negative_order_test_ediff1d.assert_eq_da_ediff1d_a_t", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 577, "end_line": 588, "span_ids": ["test_ediff1d", "test_diff_negative_order"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diff_negative_order():\n with pytest.raises(ValueError):\n da.diff(da.arange(10), -1)\n\n\n@pytest.mark.parametrize(\"shape\", [(10,), (10, 15)])\n@pytest.mark.parametrize(\"to_end, to_begin\", [[None, None], [0, 0], [[1, 2], [3, 4]]])\ndef test_ediff1d(shape, to_end, to_begin):\n x = np.random.randint(0, 10, shape)\n a = da.from_array(x, chunks=(len(shape) * (5,)))\n\n assert_eq(da.ediff1d(a, to_end, to_begin), np.ediff1d(x, to_end, to_begin))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_bins_test_histogram_delayed_n_bins_raises_with_density.with_pytest_raises_.da_histogram_data_bins_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram_delayed_bins_test_histogram_delayed_n_bins_raises_with_density.with_pytest_raises_.da_histogram_data_bins_d", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 859, "end_line": 899, "span_ids": ["test_histogram_delayed_bins", "test_histogram_delayed_n_bins_raises_with_density"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"density\", [True, False])\n@pytest.mark.parametrize(\"weighted\", [True, False])\ndef test_histogram_delayed_bins(density, weighted):\n n = 100\n v = np.random.random(n)\n bins = np.array([0, 0.2, 0.5, 0.8, 1])\n\n vd = da.from_array(v, chunks=10)\n bins_d = da.from_array(bins, chunks=2)\n\n if weighted:\n weights = np.random.random(n)\n weights_d = da.from_array(weights, chunks=vd.chunks)\n\n hist_d, bins_d2 = da.histogram(\n vd,\n bins=bins_d,\n range=[bins_d[0], bins_d[-1]],\n density=density,\n weights=weights_d if weighted else None,\n )\n\n hist, bins = np.histogram(\n v,\n bins=bins,\n range=[bins[0], bins[-1]],\n density=density,\n weights=weights if weighted else None,\n )\n\n assert bins_d is bins_d2\n assert_eq(hist_d, hist)\n assert_eq(bins_d2, bins)\n\n\ndef test_histogram_delayed_n_bins_raises_with_density():\n data = da.random.random(10, chunks=2)\n with pytest.raises(\n NotImplementedError, match=\"`bins` cannot be a scalar Dask object\"\n ):\n da.histogram(data, bins=da.array(10), range=[0, 1], density=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram2d_test_histogram2d.assert_a1_compute_shape": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram2d_test_histogram2d.assert_a1_compute_shape", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 902, "end_line": 931, "span_ids": ["test_histogram2d"], "tokens": 350}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"weights\", [True, False])\n@pytest.mark.parametrize(\"density\", [True, False])\n@pytest.mark.parametrize(\"bins\", [(5, 6), 5])\ndef test_histogram2d(weights, density, bins):\n n = 800\n b = bins\n r = ((0, 1), (0, 1))\n x = da.random.uniform(0, 1, size=(n,), chunks=(200,))\n y = da.random.uniform(0, 1, size=(n,), chunks=(200,))\n w = da.random.uniform(0.2, 1.1, size=(n,), chunks=(200,)) if weights else None\n a1, b1x, b1y = da.histogram2d(x, y, bins=b, range=r, density=density, weights=w)\n a2, b2x, b2y = np.histogram2d(x, y, bins=b, range=r, density=density, weights=w)\n a3, b3x, b3y = np.histogram2d(\n x.compute(),\n y.compute(),\n bins=b,\n range=r,\n density=density,\n weights=w.compute() if weights else None,\n )\n assert_eq(a1, a2)\n assert_eq(a1, a3)\n if not (weights or density):\n assert a1.sum() == n\n assert a2.sum() == n\n assert same_keys(\n da.histogram2d(x, y, bins=b, range=r, density=density, weights=w)[0],\n a1,\n )\n assert a1.compute().shape == a3.shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram2d_array_bins_test_histogram2d_array_bins.assert_a1_compute_shape": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogram2d_array_bins_test_histogram2d_array_bins.assert_a1_compute_shape", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 934, "end_line": 962, "span_ids": ["test_histogram2d_array_bins"], "tokens": 370}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"weights\", [True, False])\n@pytest.mark.parametrize(\"density\", [True, False])\ndef test_histogram2d_array_bins(weights, density):\n n = 800\n xbins = [0.0, 0.2, 0.6, 0.9, 1.0]\n ybins = [0.0, 0.1, 0.4, 0.5, 1.0]\n b = [xbins, ybins]\n x = da.random.uniform(0, 1, size=(n,), chunks=(200,))\n y = da.random.uniform(0, 1, size=(n,), chunks=(200,))\n w = da.random.uniform(0.2, 1.1, size=(n,), chunks=(200,)) if weights else None\n a1, b1x, b1y = da.histogram2d(x, y, bins=b, density=density, weights=w)\n a2, b2x, b2y = np.histogram2d(x, y, bins=b, density=density, weights=w)\n a3, b3x, b3y = np.histogram2d(\n x.compute(),\n y.compute(),\n bins=b,\n density=density,\n weights=w.compute() if weights else None,\n )\n assert_eq(a1, a2)\n assert_eq(a1, a3)\n if not (weights or density):\n assert a1.sum() == n\n assert a2.sum() == n\n assert same_keys(\n da.histogram2d(x, y, bins=b, density=density, weights=w)[0],\n a1,\n )\n assert a1.compute().shape == a3.shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_test_histogramdd.assert_a1_compute_shape": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_histogramdd_test_histogramdd.assert_a1_compute_shape", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 965, "end_line": 977, "span_ids": ["test_histogramdd"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_histogramdd():\n n1, n2 = 800, 3\n x = da.random.uniform(0, 1, size=(n1, n2), chunks=(200, 3))\n bins = [[0, 0.5, 1], [0, 0.25, 0.85, 1], [0, 0.5, 0.8, 1]]\n (a1, b1) = da.histogramdd(x, bins=bins)\n (a2, b2) = np.histogramdd(x, bins=bins)\n (a3, b3) = np.histogramdd(x.compute(), bins=bins)\n assert_eq(a1, a2)\n assert_eq(a1, a3)\n assert a1.sum() == n1\n assert a2.sum() == n1\n assert same_keys(da.histogramdd(x, bins=bins)[0], a1)\n assert a1.compute().shape == a3.shape", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_roll_test_roll.if__maybe_len_shift__.else_.assert_eq_np_roll_x_shif": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_roll_test_roll.if__maybe_len_shift__.else_.assert_eq_np_roll_x_shif", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1317, "end_line": 1328, "span_ids": ["test_roll"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", [(4, 6), (2, 6)])\n@pytest.mark.parametrize(\"shift\", [3, 7, 9, (3, 9), (7, 2)])\n@pytest.mark.parametrize(\"axis\", [None, 0, 1, -1, (0, 1), (1, 0)])\ndef test_roll(chunks, shift, axis):\n x = np.random.randint(10, size=(4, 6))\n a = da.from_array(x, chunks=chunks)\n\n if _maybe_len(shift) != _maybe_len(axis):\n with pytest.raises(TypeError if axis is None else ValueError):\n da.roll(a, shift, axis)\n else:\n assert_eq(np.roll(x, shift, axis), da.roll(a, shift, axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_itertools_getitem": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_itertools_getitem", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 26, "span_ids": ["imports"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import itertools\nimport warnings\n\nimport pytest\nfrom tlz import merge\n\nnp = pytest.importorskip(\"numpy\")\n\nimport dask\nimport dask.array as da\nfrom dask import config\nfrom dask.array.slicing import (\n _sanitize_index_element,\n _slice_1d,\n make_block_sorted_slices,\n new_blockdim,\n normalize_index,\n sanitize_index,\n shuffle_slice,\n slice_array,\n slicing_plan,\n take,\n)\nfrom dask.array.utils import assert_eq, same_keys\n\nfrom ..chunk import getitem", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_random_if_sparse_.pytest_importorskip_numb": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_random_if_sparse_.pytest_importorskip_numb", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 17, "span_ids": ["imports"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import random\n\nimport numpy as np\nimport pytest\nfrom packaging.version import parse as parse_version\n\nimport dask\nimport dask.array as da\nfrom dask.array.utils import assert_eq\n\nsparse = pytest.importorskip(\"sparse\")\nSPARSE_VERSION = parse_version(sparse.__version__)\nif sparse:\n # Test failures on older versions of Numba.\n # Conda-Forge provides 0.35.0 on windows right now, causing failures like\n # searchsorted() got an unexpected keyword argument 'side'\n pytest.importorskip(\"numba\", minversion=\"0.40.0\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_functions_functions._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_functions_functions._", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 20, "end_line": 86, "span_ids": ["imports"], "tokens": 684}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "functions = [\n lambda x: x,\n lambda x: da.expm1(x),\n lambda x: 2 * x,\n lambda x: x / 2,\n lambda x: x**2,\n lambda x: x + x,\n lambda x: x * x,\n lambda x: x[0],\n lambda x: x[:, 1],\n lambda x: x[:1, None, 1:3],\n lambda x: x.T,\n lambda x: da.transpose(x, (1, 2, 0)),\n lambda x: x.sum(),\n lambda x: x.moment(order=0),\n pytest.param(\n lambda x: x.mean(),\n marks=pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/7169\"),\n ),\n pytest.param(\n lambda x: x.std(),\n marks=pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/7169\"),\n ),\n pytest.param(\n lambda x: x.var(),\n marks=pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/7169\"),\n ),\n lambda x: x.dot(np.arange(x.shape[-1])),\n lambda x: x.dot(np.eye(x.shape[-1])),\n lambda x: da.tensordot(x, np.ones(x.shape[:2]), axes=[(0, 1), (0, 1)]),\n lambda x: x.sum(axis=0),\n lambda x: x.max(axis=0),\n lambda x: x.sum(axis=(1, 2)),\n lambda x: x.astype(np.complex128),\n lambda x: x.map_blocks(lambda x: x * 2),\n lambda x: x.map_overlap(lambda x: x * 2, depth=0, trim=True, boundary=\"none\"),\n lambda x: x.map_overlap(lambda x: x * 2, depth=0, trim=False, boundary=\"none\"),\n lambda x: x.round(1),\n lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2])),\n lambda x: abs(x),\n lambda x: x > 0.5,\n lambda x: x.rechunk((4, 4, 4)),\n lambda x: x.rechunk((2, 2, 1)),\n lambda x: np.isneginf(x),\n lambda x: np.isposinf(x),\n pytest.param(\n lambda x: np.zeros_like(x),\n marks=pytest.mark.xfail(\n SPARSE_VERSION < parse_version(\"0.13.0\"),\n reason=\"https://github.com/pydata/xarray/issues/5654\",\n ),\n ),\n pytest.param(\n lambda x: np.ones_like(x),\n marks=pytest.mark.xfail(\n SPARSE_VERSION < parse_version(\"0.13.0\"),\n reason=\"https://github.com/pydata/xarray/issues/5654\",\n ),\n ),\n pytest.param(\n lambda x: np.full_like(x, fill_value=2),\n marks=pytest.mark.xfail(\n SPARSE_VERSION < parse_version(\"0.13.0\"),\n reason=\"https://github.com/pydata/xarray/issues/5654\",\n ),\n ),\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_metadata_test_metadata.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_sparse.py_test_metadata_test_metadata.None_13", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_sparse.py", "file_name": "test_sparse.py", "file_type": "text/x-python", "category": "test", "start_line": 178, "end_line": 196, "span_ids": ["test_metadata"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_metadata():\n y = da.random.random((10, 10), chunks=(5, 5))\n y[y < 0.8] = 0\n z = sparse.COO.from_numpy(y.compute())\n y = y.map_blocks(sparse.COO.from_numpy)\n\n assert isinstance(y._meta, sparse.COO)\n assert isinstance((y + 1)._meta, sparse.COO)\n assert isinstance(y.sum(axis=0)._meta, sparse.COO)\n assert isinstance(y.var(axis=0)._meta, sparse.COO)\n assert isinstance(y[:5, ::2]._meta, sparse.COO)\n assert isinstance(y.rechunk((2, 2))._meta, sparse.COO)\n assert isinstance((y - z)._meta, sparse.COO)\n assert isinstance(y.persist()._meta, sparse.COO)\n assert isinstance(np.concatenate([y, y])._meta, sparse.COO)\n assert isinstance(np.concatenate([y, y[:0], y])._meta, sparse.COO)\n assert isinstance(np.stack([y, y])._meta, sparse.COO)\n assert isinstance(np.stack([y[:0], y[:0]])._meta, sparse.COO)\n assert isinstance(np.concatenate([y[:0], y[:0]])._meta, sparse.COO)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_dtype_kwarg_test_dtype_kwarg.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_dtype_kwarg_test_dtype_kwarg.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 479, "end_line": 492, "span_ids": ["test_dtype_kwarg"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dt\", [\"float64\", \"float32\", \"int32\", \"int64\"])\ndef test_dtype_kwarg(dt):\n arr1 = np.array([1, 2, 3])\n arr2 = np.array([4, 5, 6])\n\n darr1 = da.from_array(arr1)\n darr2 = da.from_array(arr2)\n\n expected = np.add(arr1, arr2, dtype=dt)\n result = np.add(darr1, darr2, dtype=dt)\n assert_eq(expected, result)\n\n result = da.add(darr1, darr2, dtype=dt)\n assert_eq(expected, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_test_ufunc_where.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_test_ufunc_where.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 495, "end_line": 518, "span_ids": ["test_ufunc_where"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"dtype\", [None, \"f8\"])\n@pytest.mark.parametrize(\"left_is_da\", [False, True])\n@pytest.mark.parametrize(\"right_is_da\", [False, True])\n@pytest.mark.parametrize(\"where_kind\", [True, False, \"numpy\", \"dask\"])\ndef test_ufunc_where(dtype, left_is_da, right_is_da, where_kind):\n left = np.arange(12).reshape((3, 4))\n right = np.arange(4)\n out = np.zeros_like(left, dtype=dtype)\n d_out = da.zeros_like(left, dtype=dtype)\n\n if where_kind in (True, False):\n d_where = where = where_kind\n else:\n d_where = where = np.array([False, True, True, False])\n if where_kind == \"dask\":\n d_where = da.from_array(where, chunks=2)\n\n d_left = da.from_array(left, chunks=2) if left_is_da else left\n d_right = da.from_array(right, chunks=2) if right_is_da else right\n\n expected = np.add(left, right, where=where, out=out, dtype=dtype)\n result = da.add(d_left, d_right, where=d_where, out=d_out, dtype=dtype)\n assert result is d_out\n assert_eq(expected, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_broadcasts_test_ufunc_where_broadcasts.assert_eq_expected_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_broadcasts_test_ufunc_where_broadcasts.assert_eq_expected_resul", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 521, "end_line": 538, "span_ids": ["test_ufunc_where_broadcasts"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"left_is_da\", [False, True])\n@pytest.mark.parametrize(\"right_is_da\", [False, True])\n@pytest.mark.parametrize(\"where_is_da\", [False, True])\ndef test_ufunc_where_broadcasts(left_is_da, right_is_da, where_is_da):\n left = np.arange(4)\n right = np.arange(4, 8)\n where = np.array([[0, 1, 1, 0], [1, 0, 0, 1], [0, 1, 0, 1]]).astype(\"bool\")\n out = np.zeros(where.shape, dtype=left.dtype)\n\n d_out = da.zeros(where.shape, dtype=left.dtype)\n d_where = da.from_array(where, chunks=2) if where_is_da else where\n d_left = da.from_array(left, chunks=2) if left_is_da else left\n d_right = da.from_array(right, chunks=2) if right_is_da else right\n\n expected = np.add(left, right, where=where, out=out)\n result = da.add(d_left, d_right, where=d_where, out=d_out)\n assert result is d_out\n assert_eq(expected, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_no_out_test_ufunc_where_no_out.assert_not_np_equal_resul": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_no_out_test_ufunc_where_no_out.assert_not_np_equal_resul", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 541, "end_line": 563, "span_ids": ["test_ufunc_where_no_out"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ufunc_where_no_out():\n left = np.arange(4)\n right = np.arange(4, 8)\n where = np.array([[0, 1, 1, 0], [1, 0, 0, 1], [0, 1, 0, 1]]).astype(\"bool\")\n\n d_where = da.from_array(where, chunks=2)\n d_left = da.from_array(left, chunks=2)\n d_right = da.from_array(right, chunks=2)\n\n expected = np.add(left, right, where=where)\n result = da.add(d_left, d_right, where=d_where)\n\n # If no `out` is provided, numpy leaves elements that don't match `where`\n # uninitialized, so they effectively may be any random value. We test that\n # the set values match, and that the unset values aren't equal to if\n # `where` wasn't provided (to test that `where` was actually passed).\n\n expected_masked = np.where(where, expected, 0)\n result_masked = np.where(where, expected, 0)\n assert_eq(expected_masked, result_masked)\n\n expected_no_where = np.add(left, right)\n assert not np.equal(result.compute(), expected_no_where).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_doesnt_mutate_out_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_ufunc.py_test_ufunc_where_doesnt_mutate_out_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_ufunc.py", "file_name": "test_ufunc.py", "file_type": "text/x-python", "category": "test", "start_line": 566, "end_line": 580, "span_ids": ["test_ufunc_where_doesnt_mutate_out"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ufunc_where_doesnt_mutate_out():\n \"\"\"Dask array's are immutable, ensure that the backing numpy array for\n `out` isn't actually mutated\"\"\"\n left = da.from_array(np.arange(4, dtype=\"i8\"), chunks=2)\n right = da.from_array(np.arange(4, 8, dtype=\"i8\"), chunks=2)\n where = da.from_array(np.array([1, 0, 0, 1], dtype=\"bool\"), chunks=2)\n out_np = np.zeros(4, dtype=\"i8\")\n out = da.from_array(out_np, chunks=2)\n result = da.add(left, right, where=where, out=out)\n assert out is result\n assert_eq(out, np.array([4, 0, 0, 10], dtype=\"i8\"))\n\n # Check that original `out` array isn't mutated\n assert np.equal(out_np, 0).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_contextlib_meta_from_array.return.meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_contextlib_meta_from_array.return.meta", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 115, "span_ids": ["imports", "meta_from_array", "normalize_to_array"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\nimport functools\nimport itertools\nimport math\nimport numbers\nimport warnings\n\nimport numpy as np\nfrom tlz import concat, frequencies\n\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import has_keyword, is_arraylike, is_cupy_type\nfrom .core import Array\n\n\ndef normalize_to_array(x):\n if is_cupy_type(x):\n return x.get()\n else:\n return x\n\n\ndef meta_from_array(x, ndim=None, dtype=None):\n \"\"\"Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n \"\"\"\n # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)\n # implement a _meta attribute that are incompatible with Dask Array._meta\n if hasattr(x, \"_meta\") and isinstance(x, Array):\n x = x._meta\n\n if dtype is None and x is None:\n raise ValueError(\"You must specify the meta or dtype of the array\")\n\n if np.isscalar(x):\n x = np.array(x)\n\n if x is None:\n x = np.ndarray\n elif dtype is None and hasattr(x, \"dtype\"):\n dtype = x.dtype\n\n if isinstance(x, type):\n x = x(shape=(0,) * (ndim or 0), dtype=dtype)\n\n if isinstance(x, list) or isinstance(x, tuple):\n ndims = [\n 0\n if isinstance(a, numbers.Number)\n else a.ndim\n if hasattr(a, \"ndim\")\n else len(a)\n for a in x\n ]\n a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]\n return a if isinstance(x, list) else tuple(x)\n\n if (\n not hasattr(x, \"shape\")\n or not hasattr(x, \"dtype\")\n or not isinstance(x.shape, tuple)\n ):\n return x\n\n if ndim is None:\n ndim = x.ndim\n\n try:\n meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]\n if meta.ndim != ndim:\n if ndim > x.ndim:\n meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))]\n meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]\n elif ndim == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * ndim)\n except Exception:\n meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n if dtype and meta.dtype != dtype:\n try:\n meta = meta.astype(dtype)\n except ValueError as e:\n if (\n any(\n s in str(e)\n for s in [\n \"invalid literal\",\n \"could not convert string to float\",\n ]\n )\n and meta.dtype.kind in \"SU\"\n ):\n meta = np.array([]).astype(dtype)\n else:\n raise e\n\n return meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_scipy_linalg_safe_solve_triangular_safe.return.scipy_linalg_safe_solve_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_scipy_linalg_safe_solve_triangular_safe.return.scipy_linalg_safe_solve_", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 507, "end_line": 524, "span_ids": ["solve_triangular_safe", "scipy_linalg_safe"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def scipy_linalg_safe(func_name, *args, **kwargs):\n # need to evaluate at least the first input array\n # for gpu/cpu checking\n a = args[0]\n if is_cupy_type(a):\n import cupyx.scipy.linalg\n\n func = getattr(cupyx.scipy.linalg, func_name)\n else:\n import scipy.linalg\n\n func = getattr(scipy.linalg, func_name)\n\n return func(*args, **kwargs)\n\n\ndef solve_triangular_safe(a, b, lower=False):\n return scipy_linalg_safe(\"solve_triangular\", a, b, lower=lower)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py___getattr___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py___getattr___", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 527, "end_line": 539, "span_ids": ["__getattr__"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def __getattr__(name):\n # Can't use the @_deprecated decorator as it would not work on `except AxisError`\n if name == \"AxisError\":\n warnings.warn(\n \"AxisError was deprecated after version 2021.10.0 and will be removed in a \"\n \"future release. Please use numpy.AxisError instead.\",\n category=FutureWarning,\n stacklevel=2,\n )\n return np.AxisError\n else:\n raise AttributeError(f\"module {__name__} has no attribute {name}\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap__broadcast_trick_inner.return.np_broadcast_to_func_meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_wrap__broadcast_trick_inner.return.np_broadcast_to_func_meta", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 109, "end_line": 137, "span_ids": ["wrap", "_broadcast_trick_inner", "impl:2"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@curry\ndef wrap(wrap_func, func, func_like=None, **kwargs):\n if func_like is None:\n f = partial(wrap_func, func, **kwargs)\n else:\n f = partial(wrap_func, func_like, **kwargs)\n template = \"\"\"\n Blocked variant of %(name)s\n\n Follows the signature of %(name)s exactly except that it also features\n optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.\n\n Original signature follows below.\n \"\"\"\n if func.__doc__ is not None:\n f.__doc__ = template % {\"name\": func.__name__} + func.__doc__\n f.__name__ = \"blocked_\" + func.__name__\n return f\n\n\nw = wrap(wrap_func_shape_as_first_arg)\n\n\n@curry\ndef _broadcast_trick_inner(func, shape, meta=(), *args, **kwargs):\n # cupy-specific hack. numpy is happy with hardcoded shape=().\n null_shape = () if shape == () else 1\n\n return np.broadcast_to(func(meta, shape=null_shape, *args, **kwargs), shape)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_broadcast_trick_broadcast_trick.return.inner": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_broadcast_trick_broadcast_trick.return.inner", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 135, "end_line": 157, "span_ids": ["broadcast_trick"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def broadcast_trick(func):\n \"\"\"\n Provide a decorator to wrap common numpy function with a broadcast trick.\n\n Dask arrays are currently immutable; thus when we know an array is uniform,\n we can replace the actual data by a single value and have all elements point\n to it, thus reducing the size.\n\n >>> x = np.broadcast_to(1, (100,100,100))\n >>> x.base.nbytes\n 8\n\n Those array are not only more efficient locally, but dask serialisation is\n aware of the _real_ size of those array and thus can send them around\n efficiently and schedule accordingly.\n\n Note that those array are read-only and numpy will refuse to assign to them,\n so should be safe.\n \"\"\"\n inner = _broadcast_trick_inner(func)\n inner.__doc__ = func.__doc__\n inner.__name__ = func.__name__\n return inner", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_ones__full.__doc__._full___doc___replace_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_ones__full.__doc__._full___doc___replace_", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 165, "end_line": 185, "span_ids": ["impl:4"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "ones = w(broadcast_trick(np.ones_like), dtype=\"f8\")\nzeros = w(broadcast_trick(np.zeros_like), dtype=\"f8\")\nempty = w(broadcast_trick(np.empty_like), dtype=\"f8\")\n\n\nw_like = wrap(wrap_func_like)\n\n\nempty_like = w_like(np.empty, func_like=np.empty_like)\n\n\n# full and full_like require special casing due to argument check on fill_value\n# Generate wrapped functions only once\n_full = w(broadcast_trick(np.full_like))\n_full_like = w_like(np.full, func_like=np.full_like)\n\n# workaround for numpy doctest failure: https://github.com/numpy/numpy/pull/17472\n_full.__doc__ = _full.__doc__.replace(\n \"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\",\n \"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\",\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_full_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/wrap.py_full_", "embedding": null, "metadata": {"file_path": "dask/array/wrap.py", "file_name": "wrap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 188, "end_line": 218, "span_ids": ["impl:20", "full", "full_like"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def full(shape, fill_value, *args, **kwargs):\n # np.isscalar has somewhat strange behavior:\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html\n if np.ndim(fill_value) != 0:\n raise ValueError(\n f\"fill_value must be scalar. Received {type(fill_value).__name__} instead.\"\n )\n if \"dtype\" not in kwargs:\n if hasattr(fill_value, \"dtype\"):\n kwargs[\"dtype\"] = fill_value.dtype\n else:\n kwargs[\"dtype\"] = type(fill_value)\n return _full(shape=shape, fill_value=fill_value, *args, **kwargs)\n\n\ndef full_like(a, fill_value, *args, **kwargs):\n if np.ndim(fill_value) != 0:\n raise ValueError(\n f\"fill_value must be scalar. Received {type(fill_value).__name__} instead.\"\n )\n return _full_like(\n a=a,\n fill_value=fill_value,\n *args,\n **kwargs,\n )\n\n\nfull.__doc__ = _full.__doc__\nfull_like.__doc__ = _full_like.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_task_lazify_task.if_not_start_and_head_in_.else_.return._head_tuple_lazify_ta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_lazify_task_lazify_task.if_not_start_and_head_in_.else_.return._head_tuple_lazify_ta", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 70, "end_line": 95, "span_ids": ["lazify_task"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def lazify_task(task, start=True):\n \"\"\"\n Given a task, remove unnecessary calls to ``list`` and ``reify``.\n\n This traverses tasks and small lists. We choose not to traverse down lists\n of size >= 50 because it is unlikely that sequences this long contain other\n sequences in practice.\n\n Examples\n --------\n >>> def inc(x):\n ... return x + 1\n >>> task = (sum, (list, (map, inc, [1, 2, 3])))\n >>> lazify_task(task) # doctest: +ELLIPSIS\n (, (, , [1, 2, 3]))\n \"\"\"\n if type(task) is list and len(task) < 50:\n return [lazify_task(arg, False) for arg in task]\n if not istask(task):\n return task\n head, tail = task[0], task[1:]\n if not start and head in (list, reify):\n task = task[1]\n return lazify_task(*tail, start=False)\n else:\n return (head,) + tuple(lazify_task(arg, False) for arg in tail)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item.to_delayed_Item.to_delayed.return.Delayed_self_key_dsk_la": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Item.to_delayed_Item.to_delayed.return.Delayed_self_key_dsk_la", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 414, "end_line": 428, "span_ids": ["Item.to_delayed"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Item(DaskMethodsMixin):\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a ``dask.delayed`` object.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n \"\"\"\n from dask.delayed import Delayed\n\n dsk = self.__dask_graph__()\n if optimize_graph:\n dsk = self.__dask_optimize__(dsk, self.__dask_keys__())\n return Delayed(self.key, dsk, layer=self._layer)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_dataframe_Bag.to_dataframe.return.dd_DataFrame_dsk_dfs_nam": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_dataframe_Bag.to_dataframe.return.dd_DataFrame_dsk_dfs_nam", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1534, "end_line": 1611, "span_ids": ["Bag.to_dataframe"], "tokens": 741}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def to_dataframe(self, meta=None, columns=None, optimize_graph=True):\n \"\"\"Create Dask Dataframe from a Dask Bag.\n\n Bag should contain tuples, dict records, or scalars.\n\n Index will not be particularly meaningful. Use ``reindex`` afterwards\n if necessary.\n\n Parameters\n ----------\n meta : pd.DataFrame, dict, iterable, optional\n An empty ``pd.DataFrame`` that matches the dtypes and column names\n of the output. This metadata is necessary for many algorithms in\n dask dataframe to work. For ease of use, some alternative inputs\n are also available. Instead of a ``DataFrame``, a ``dict`` of\n ``{name: dtype}`` or iterable of ``(name, dtype)`` can be provided.\n If not provided or a list, a single element from the first\n partition will be computed, triggering a potentially expensive call\n to ``compute``. This may lead to unexpected results, so providing\n ``meta`` is recommended. For more information, see\n ``dask.dataframe.utils.make_meta``.\n columns : sequence, optional\n Column names to use. If the passed data do not have names\n associated with them, this argument provides names for the columns.\n Otherwise this argument indicates the order of the columns in the\n result (any names not found in the data will become all-NA\n columns). Note that if ``meta`` is provided, column names will be\n taken from there and this parameter is invalid.\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n :class:`dask.dataframe.DataFrame`.\n\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},\n ... {'name': 'Bob', 'balance': 200},\n ... {'name': 'Charlie', 'balance': 300}],\n ... npartitions=2)\n >>> df = b.to_dataframe()\n\n >>> df.compute()\n name balance\n 0 Alice 100\n 1 Bob 200\n 0 Charlie 300\n \"\"\"\n import pandas as pd\n\n import dask.dataframe as dd\n\n if meta is None:\n head = self.take(1, warn=False)\n if len(head) == 0:\n raise ValueError(\n \"`dask.bag.Bag.to_dataframe` failed to \"\n \"properly infer metadata, please pass in \"\n \"metadata via the `meta` keyword\"\n )\n meta = pd.DataFrame(list(head), columns=columns)\n elif columns is not None:\n raise ValueError(\"Can't specify both `meta` and `columns`\")\n else:\n meta = dd.utils.make_meta(meta, parent_meta=pd.DataFrame())\n # Serializing the columns and dtypes is much smaller than serializing\n # the empty frame\n cols = list(meta.columns)\n dtypes = meta.dtypes.to_dict()\n\n dfs = self.map_partitions(to_dataframe, cols, dtypes)\n if optimize_graph:\n dsk = self.__dask_optimize__(dfs.dask, dfs.__dask_keys__())\n else:\n dsk = dfs.dask\n\n divisions = [None] * (self.npartitions + 1)\n return dd.DataFrame(dsk, dfs.name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_delayed_Bag.to_delayed.return._Delayed_k_dsk_layer_la": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_Bag.to_delayed_Bag.to_delayed.return._Delayed_k_dsk_layer_la", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1613, "end_line": 1635, "span_ids": ["Bag.to_delayed"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Bag(DaskMethodsMixin):\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a list of ``dask.delayed`` objects, one per partition.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n See Also\n --------\n dask.bag.from_delayed\n \"\"\"\n from dask.delayed import Delayed\n\n keys = self.__dask_keys__()\n dsk = self.__dask_graph__()\n layer = self.name\n if optimize_graph:\n dsk = self.__dask_optimize__(dsk, keys)\n layer = \"delayed-\" + layer\n dsk = HighLevelGraph.from_collections(layer, dsk, dependencies=())\n return [Delayed(k, dsk, layer=layer) for k in keys]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map_bag_map.npartitions_7.npartitions_pop_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/core.py_bag_map_bag_map.npartitions_7.npartitions_pop_", "embedding": null, "metadata": {"file_path": "dask/bag/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2079, "end_line": 2168, "span_ids": ["bag_map"], "tokens": 743}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bag_map(func, *args, **kwargs):\n \"\"\"Apply a function elementwise across one or more bags.\n\n Note that all ``Bag`` arguments must be partitioned identically.\n\n Parameters\n ----------\n func : callable\n *args, **kwargs : Bag, Item, Delayed, or object\n Arguments and keyword arguments to pass to ``func``. Non-Bag args/kwargs\n are broadcasted across all calls to ``func``.\n\n Notes\n -----\n For calls with multiple `Bag` arguments, corresponding partitions should\n have the same length; if they do not, the call will error at compute time.\n\n Examples\n --------\n >>> import dask.bag as db\n >>> b = db.from_sequence(range(5), npartitions=2)\n >>> b2 = db.from_sequence(range(5, 10), npartitions=2)\n\n Apply a function to all elements in a bag:\n\n >>> db.map(lambda x: x + 1, b).compute()\n [1, 2, 3, 4, 5]\n\n Apply a function with arguments from multiple bags:\n\n >>> from operator import add\n >>> db.map(add, b, b2).compute()\n [5, 7, 9, 11, 13]\n\n Non-bag arguments are broadcast across all calls to the mapped function:\n\n >>> db.map(add, b, 1).compute()\n [1, 2, 3, 4, 5]\n\n Keyword arguments are also supported, and have the same semantics as\n regular arguments:\n\n >>> def myadd(x, y=0):\n ... return x + y\n >>> db.map(myadd, b, y=b2).compute()\n [5, 7, 9, 11, 13]\n >>> db.map(myadd, b, y=1).compute()\n [1, 2, 3, 4, 5]\n\n Both arguments and keyword arguments can also be instances of\n ``dask.bag.Item`` or ``dask.delayed.Delayed``. Here we'll add the max value\n in the bag to each element:\n\n >>> db.map(myadd, b, b.max()).compute()\n [4, 5, 6, 7, 8]\n \"\"\"\n name = \"{}-{}\".format(funcname(func), tokenize(func, \"map\", *args, **kwargs))\n dependencies = []\n\n bags = []\n args2 = []\n for a in args:\n if isinstance(a, Bag):\n bags.append(a)\n args2.append(a)\n elif isinstance(a, (Item, Delayed)):\n dependencies.append(a)\n args2.append((itertools.repeat, a.key))\n else:\n args2.append((itertools.repeat, a))\n\n bag_kwargs = {}\n other_kwargs = {}\n for k, v in kwargs.items():\n if isinstance(v, Bag):\n bag_kwargs[k] = v\n bags.append(v)\n else:\n other_kwargs[k] = v\n\n other_kwargs, collections = unpack_scalar_dask_kwargs(other_kwargs)\n dependencies.extend(collections)\n\n if not bags:\n raise ValueError(\"At least one argument must be a Bag.\")\n\n npartitions = {b.npartitions for b in bags}\n if len(npartitions) > 1:\n raise ValueError(\"All bags must have the same number of partitions.\")\n npartitions = npartitions.pop()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_with_scheduler_func_test_args.assert_c_npartitions_d": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_groupby_with_scheduler_func_test_args.assert_c_npartitions_d", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 796, "end_line": 829, "span_ids": ["test_args", "test_concat", "test_concat_after_map", "test_flatten", "test_groupby_with_scheduler_func"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_with_scheduler_func():\n from dask.threaded import get\n\n with dask.config.set(scheduler=get):\n b.groupby(lambda x: x, npartitions=1).compute()\n\n\ndef test_concat():\n a = db.from_sequence([1, 2, 3])\n b = db.from_sequence([4, 5, 6])\n c = db.concat([a, b])\n assert list(c) == [1, 2, 3, 4, 5, 6]\n assert c.name == db.concat([a, b]).name\n\n\ndef test_flatten():\n b = db.from_sequence([[1], [2, 3]])\n assert list(b.flatten()) == [1, 2, 3]\n assert b.flatten().name == b.flatten().name\n\n\ndef test_concat_after_map():\n a = db.from_sequence([1, 2])\n b = db.from_sequence([4, 5])\n result = db.concat([a.map(inc), b])\n assert list(result) == [2, 3, 4, 5]\n\n\ndef test_args():\n c = b.map(lambda x: x + 1)\n d = Bag(*c._args)\n\n assert list(c) == list(d)\n assert c.npartitions == d.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_class_extend_test_bag_compute_forward_kwargs.x_compute_bogus_keyword_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_bag_class_extend_test_bag_compute_forward_kwargs.x_compute_bogus_keyword_1", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1077, "end_line": 1098, "span_ids": ["test_gh715", "test_bag_compute_forward_kwargs", "test_bag_class_extend"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_bag_class_extend():\n dictbag = BagOfDicts(*db.from_sequence([{\"a\": {\"b\": \"c\"}}])._args)\n assert dictbag.get(\"a\").get(\"b\").compute()[0] == \"c\"\n assert dictbag.get(\"a\").set(\"d\", \"EXTENSIBILITY!!!\").compute()[0] == {\n \"b\": \"c\",\n \"d\": \"EXTENSIBILITY!!!\",\n }\n assert isinstance(dictbag.get(\"a\").get(\"b\"), BagOfDicts)\n\n\ndef test_gh715():\n bin_data = \"\\u20ac\".encode()\n with tmpfile() as fn:\n with open(fn, \"wb\") as f:\n f.write(bin_data)\n a = db.read_text(fn)\n assert a.compute()[0] == bin_data.decode(\"utf-8\")\n\n\ndef test_bag_compute_forward_kwargs():\n x = db.from_sequence([1, 2, 3]).map(lambda a: a + 1)\n x.compute(bogus_keyword=10)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.None_13": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_delayed_optimize_graph_test_to_delayed_optimize_graph.None_13", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1113, "end_line": 1138, "span_ids": ["test_to_delayed_optimize_graph"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_delayed_optimize_graph(tmpdir):\n b = db.from_sequence([1, 2, 3, 4, 5, 6], npartitions=1)\n b2 = b.map(inc).map(inc).map(inc)\n\n [d] = b2.to_delayed()\n text = str(dict(d.dask))\n assert text.count(\"reify\") == 1\n assert d.__dask_layers__() != b2.__dask_layers__()\n [d2] = b2.to_delayed(optimize_graph=False)\n assert dict(d2.dask) == dict(b2.dask)\n assert d2.__dask_layers__() == b2.__dask_layers__()\n assert d.compute() == d2.compute()\n\n x = b2.sum()\n d = x.to_delayed()\n text = str(dict(d.dask))\n assert d.__dask_layers__() == x.__dask_layers__()\n assert text.count(\"reify\") == 0\n d2 = x.to_delayed(optimize_graph=False)\n assert dict(d2.dask) == dict(x.dask)\n assert d2.__dask_layers__() == x.__dask_layers__()\n assert d.compute() == d2.compute()\n\n [d] = b2.to_textfiles(str(tmpdir), compute=False)\n text = str(dict(d.dask))\n assert text.count(\"reify\") <= 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_dask_layers_to_delayed_test_dask_layers_to_delayed.with_pytest_raises_ValueE.db_Item_arr_dask_arr_na": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_dask_layers_to_delayed_test_dask_layers_to_delayed.with_pytest_raises_ValueE.db_Item_arr_dask_arr_na", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1585, "end_line": 1614, "span_ids": ["test_dask_layers_to_delayed"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"optimize\", [False, True])\ndef test_dask_layers_to_delayed(optimize):\n # `da.Array.to_delayed` causes the layer name to not match the key.\n # Ensure the layer name is propagated between `Delayed` and `Item`.\n da = pytest.importorskip(\"dask.array\")\n i = db.Item.from_delayed(da.ones(1).to_delayed()[0])\n name = i.key[0]\n assert i.key[1:] == (0,)\n assert i.dask.layers.keys() == {\"delayed-\" + name}\n assert i.dask.dependencies == {\"delayed-\" + name: set()}\n assert i.__dask_layers__() == (\"delayed-\" + name,)\n\n arr = da.ones(1) + 1\n delayed = arr.to_delayed(optimize_graph=optimize)[0]\n i = db.Item.from_delayed(delayed)\n assert i.key == delayed.key\n assert i.dask is delayed.dask\n assert i.__dask_layers__() == delayed.__dask_layers__()\n\n back = i.to_delayed(optimize_graph=optimize)\n assert back.__dask_layers__() == i.__dask_layers__()\n\n if not optimize:\n assert back.dask is arr.dask\n # When not optimized, the key is not a layer in the graph, so using it should fail\n with pytest.raises(ValueError, match=\"not in\"):\n db.Item(back.dask, back.key)\n\n with pytest.raises(ValueError, match=\"not in\"):\n db.Item(arr.dask, (arr.name,), layer=\"foo\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_dataframe_optimize_graph_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_dataframe_optimize_graph_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 1617, "end_line": 1651, "span_ids": ["test_to_dataframe_optimize_graph"], "tokens": 332}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_dataframe_optimize_graph():\n pytest.importorskip(\"dask.dataframe\")\n from dask.dataframe.utils import assert_eq as assert_eq_df\n\n x = db.from_sequence(\n [{\"name\": \"test1\", \"v1\": 1}, {\"name\": \"test2\", \"v1\": 2}], npartitions=2\n )\n\n # linear `map` tasks will be fused by graph optimization\n with dask.annotate(foo=True):\n y = x.map(lambda a: dict(**a, v2=a[\"v1\"] + 1))\n y = y.map(lambda a: dict(**a, v3=a[\"v2\"] + 1))\n y = y.map(lambda a: dict(**a, v4=a[\"v3\"] + 1))\n\n # verifying the maps are not fused yet\n assert len(y.dask) == y.npartitions * 4\n\n # with optimizations\n d = y.to_dataframe()\n\n # All the `map` tasks have been fused\n assert len(d.dask) < len(y.dask)\n\n # no optimizations\n d2 = y.to_dataframe(optimize_graph=False)\n\n # Graph hasn't been fused. It contains all the original tasks,\n # plus one extra layer converting to DataFrame\n assert len(d2.dask) == len(y.dask) + d.npartitions\n\n # Annotations are still there\n assert hlg_layer_topological(d2.dask, 1).annotations == {\"foo\": True}\n\n assert_eq_df(d, d2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize_visualize._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize_visualize._", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 579, "end_line": 656, "span_ids": ["visualize"], "tokens": 822}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def visualize(\n *args, filename=\"mydask\", traverse=True, optimize_graph=False, maxval=None, **kwargs\n):\n \"\"\"\n Visualize several dask graphs simultaneously.\n\n Requires ``graphviz`` to be installed. All options that are not the dask\n graph(s) should be passed as keyword arguments.\n\n Parameters\n ----------\n args : object\n Any number of objects. If it is a dask collection (for example, a\n dask DataFrame, Array, Bag, or Delayed), its associated graph\n will be included in the output of visualize. By default, python builtin\n collections are also traversed to look for dask objects (for more\n information see the ``traverse`` keyword). Arguments lacking an\n associated graph will be ignored.\n filename : str or None, optional\n The name of the file to write to disk. If the provided `filename`\n doesn't include an extension, '.png' will be used by default.\n If `filename` is None, no file will be written, and we communicate\n with dot using only pipes.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n traverse : bool, optional\n By default, dask traverses builtin python collections looking for dask\n objects passed to ``visualize``. For large collections this can be\n expensive. If none of the arguments contain any dask objects, set\n ``traverse=False`` to avoid doing this traversal.\n optimize_graph : bool, optional\n If True, the graph is optimized before rendering. Otherwise,\n the graph is displayed as is. Default is False.\n color : {None, 'order', 'ages', 'freed', 'memoryincreases', 'memorydecreases', 'memorypressure'}, optional\n Options to color nodes. colormap:\n\n - None, the default, no colors.\n - 'order', colors the nodes' border based on the order they appear in the graph.\n - 'ages', how long the data of a node is held.\n - 'freed', the number of dependencies released after running a node.\n - 'memoryincreases', how many more outputs are held after the lifetime of a node.\n Large values may indicate nodes that should have run later.\n - 'memorydecreases', how many fewer outputs are held after the lifetime of a node.\n Large values may indicate nodes that should have run sooner.\n - 'memorypressure', the number of data held when the node is run (circle), or\n the data is released (rectangle).\n maxval : {int, float}, optional\n Maximum value for colormap to normalize form 0 to 1.0. Default is ``None``\n will make it the max number of values\n collapse_outputs : bool, optional\n Whether to collapse output boxes, which often have empty labels.\n Default is False.\n verbose : bool, optional\n Whether to label output and input boxes even if the data aren't chunked.\n Beware: these labels can get very long. Default is False.\n **kwargs\n Additional keyword arguments to forward to ``to_graphviz``.\n\n Examples\n --------\n >>> x.visualize(filename='dask.pdf') # doctest: +SKIP\n >>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP\n\n Returns\n -------\n result : IPython.diplay.Image, IPython.display.SVG, or None\n See dask.dot.dot_graph for more information.\n\n See Also\n --------\n dask.dot.dot_graph\n\n Notes\n -----\n For more information on optimization see here:\n\n https://docs.dask.org/en/latest/optimize.html\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize.from_dask_dot_import_dot__visualize.return.dot_graph_dsk_filename_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_visualize.from_dask_dot_import_dot__visualize.return.dot_graph_dsk_filename_f", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 652, "end_line": 739, "span_ids": ["visualize"], "tokens": 670}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def visualize(\n *args, filename=\"mydask\", traverse=True, optimize_graph=False, maxval=None, **kwargs\n):\n from dask.dot import dot_graph\n\n args, _ = unpack_collections(*args, traverse=traverse)\n\n dsk = dict(collections_to_dsk(args, optimize_graph=optimize_graph))\n\n color = kwargs.get(\"color\")\n\n if color in {\n \"order\",\n \"order-age\",\n \"order-freed\",\n \"order-memoryincreases\",\n \"order-memorydecreases\",\n \"order-memorypressure\",\n \"age\",\n \"freed\",\n \"memoryincreases\",\n \"memorydecreases\",\n \"memorypressure\",\n }:\n import matplotlib.pyplot as plt\n\n from .order import diagnostics, order\n\n o = order(dsk)\n try:\n cmap = kwargs.pop(\"cmap\")\n except KeyError:\n cmap = plt.cm.RdBu\n if isinstance(cmap, str):\n import matplotlib.pyplot as plt\n\n cmap = getattr(plt.cm, cmap)\n\n def label(x):\n return str(values[x])\n\n data_values = None\n if color != \"order\":\n info = diagnostics(dsk, o)[0]\n if color.endswith(\"age\"):\n values = {key: val.age for key, val in info.items()}\n elif color.endswith(\"freed\"):\n values = {key: val.num_dependencies_freed for key, val in info.items()}\n elif color.endswith(\"memorypressure\"):\n values = {key: val.num_data_when_run for key, val in info.items()}\n data_values = {\n key: val.num_data_when_released for key, val in info.items()\n }\n elif color.endswith(\"memoryincreases\"):\n values = {\n key: max(0, val.num_data_when_released - val.num_data_when_run)\n for key, val in info.items()\n }\n else: # memorydecreases\n values = {\n key: max(0, val.num_data_when_run - val.num_data_when_released)\n for key, val in info.items()\n }\n\n if color.startswith(\"order-\"):\n\n def label(x):\n return str(o[x]) + \"-\" + str(values[x])\n\n else:\n values = o\n if maxval is None:\n maxval = max(1, max(values.values()))\n colors = {k: _colorize(cmap(v / maxval, bytes=True)) for k, v in values.items()}\n if data_values is None:\n data_values = values\n data_colors = colors\n else:\n data_colors = {\n k: _colorize(cmap(v / maxval, bytes=True))\n for k, v in data_values.items()\n }\n\n kwargs[\"function_attributes\"] = {\n k: {\"color\": v, \"label\": label(k)} for k, v in colors.items()\n }\n kwargs[\"data_attributes\"] = {k: {\"color\": v} for k, v in data_colors.items()}\n elif color:\n raise NotImplementedError(\"Unknown value color=%s\" % color)\n\n return dot_graph(dsk, filename=filename, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist_persist.keys_postpersists_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist_persist.keys_postpersists_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 742, "end_line": 826, "span_ids": ["persist"], "tokens": 768}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def persist(*args, traverse=True, optimize_graph=True, scheduler=None, **kwargs):\n \"\"\"Persist multiple Dask collections into memory\n\n This turns lazy Dask collections into Dask collections with the same\n metadata, but now with their results fully computed or actively computing\n in the background.\n\n For example a lazy dask.array built up from many lazy calls will now be a\n dask.array of the same shape, dtype, chunks, etc., but now with all of\n those previously lazy tasks either computed in memory as many small :class:`numpy.array`\n (in the single-machine case) or asynchronously running in the\n background on a cluster (in the distributed case).\n\n This function operates differently if a ``dask.distributed.Client`` exists\n and is connected to a distributed scheduler. In this case this function\n will return as soon as the task graph has been submitted to the cluster,\n but before the computations have completed. Computations will continue\n asynchronously in the background. When using this function with the single\n machine scheduler it blocks until the computations have finished.\n\n When using Dask on a single machine you should ensure that the dataset fits\n entirely within memory.\n\n Examples\n --------\n >>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP\n >>> df = df[df.name == 'Alice'] # doctest: +SKIP\n >>> df['in-debt'] = df.balance < 0 # doctest: +SKIP\n >>> df = df.persist() # triggers computation # doctest: +SKIP\n\n >>> df.value().min() # future computations are now fast # doctest: +SKIP\n -10\n >>> df.value().max() # doctest: +SKIP\n 100\n\n >>> from dask import persist # use persist function on multiple collections\n >>> a, b = persist(a, b) # doctest: +SKIP\n\n Parameters\n ----------\n *args: Dask collections\n scheduler : string, optional\n Which scheduler to use like \"threads\", \"synchronous\" or \"processes\".\n If not provided, the default is to check the global settings first,\n and then fall back to the collection defaults.\n traverse : bool, optional\n By default dask traverses builtin python collections looking for dask\n objects passed to ``persist``. For large collections this can be\n expensive. If none of the arguments contain any dask objects, set\n ``traverse=False`` to avoid doing this traversal.\n optimize_graph : bool, optional\n If True [default], the graph is optimized before computation.\n Otherwise the graph is run as is. This can be useful for debugging.\n **kwargs\n Extra keywords to forward to the scheduler function.\n\n Returns\n -------\n New dask collections backed by in-memory data\n \"\"\"\n collections, repack = unpack_collections(*args, traverse=traverse)\n if not collections:\n return args\n\n schedule = get_scheduler(scheduler=scheduler, collections=collections)\n\n if inspect.ismethod(schedule):\n try:\n from distributed.client import default_client\n except ImportError:\n pass\n else:\n try:\n client = default_client()\n except ValueError:\n pass\n else:\n if client.get == schedule:\n results = client.persist(\n collections, optimize_graph=optimize_graph, **kwargs\n )\n return repack(results)\n\n dsk = collections_to_dsk(collections, optimize_graph, **kwargs)\n keys, postpersists = [], []\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist.for_a_in_collections__persist.return.repack_results2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_persist.for_a_in_collections__persist.return.repack_results2_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 827, "end_line": 836, "span_ids": ["persist"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def persist(*args, traverse=True, optimize_graph=True, scheduler=None, **kwargs):\n # ... other code\n for a in collections:\n a_keys = list(flatten(a.__dask_keys__()))\n rebuild, state = a.__dask_postpersist__()\n keys.extend(a_keys)\n postpersists.append((rebuild, a_keys, state))\n\n results = schedule(dsk, keys, **kwargs)\n d = dict(zip(keys, results))\n results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists]\n return repack(results2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py___normalize_set.return.normalize_token_sorted_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py___normalize_set.return.normalize_token_sorted_s_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 844, "end_line": 903, "span_ids": ["impl:4", "persist", "normalize_set", "impl:8", "tokenize", "normalize_dict", "normalize_ordered_dict"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "############\n# Tokenize #\n############\n\n# Pass `usedforsecurity=False` for Python 3.9+ to support FIPS builds of Python\nif _PY_VERSION >= parse_version(\"3.9\"):\n\n def _md5(x, _hashlib_md5=hashlib.md5):\n return _hashlib_md5(x, usedforsecurity=False)\n\nelse:\n _md5 = hashlib.md5\n\n\ndef tokenize(*args, **kwargs):\n \"\"\"Deterministic token\n\n >>> tokenize([1, 2, '3'])\n '7d6a880cd9ec03506eee6973ff551339'\n\n >>> tokenize('Hello') == tokenize('Hello')\n True\n \"\"\"\n hasher = _md5(str(tuple(map(normalize_token, args))).encode())\n if kwargs:\n hasher.update(str(normalize_token(kwargs)).encode())\n return hasher.hexdigest()\n\n\nnormalize_token = Dispatch()\nnormalize_token.register(\n (\n int,\n float,\n str,\n bytes,\n type(None),\n type,\n slice,\n complex,\n type(Ellipsis),\n datetime.date,\n ),\n identity,\n)\n\n\n@normalize_token.register(dict)\ndef normalize_dict(d):\n return normalize_token(sorted(d.items(), key=str))\n\n\n@normalize_token.register(OrderedDict)\ndef normalize_ordered_dict(d):\n return type(d).__name__, normalize_token(list(d.items()))\n\n\n@normalize_token.register(set)\ndef normalize_set(s):\n return normalize_token(sorted(s, key=str))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_function_cache_normalize_function.try_.except_TypeError_not_.return._normalize_function_func_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_function_cache_normalize_function.try_.except_TypeError_not_.return._normalize_function_func_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 959, "end_line": 976, "span_ids": ["impl:11", "normalize_function"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "function_cache: dict[Callable, Callable] = {}\nfunction_cache_lock = threading.Lock()\n\n\ndef normalize_function(func: Callable) -> Callable:\n try:\n return function_cache[func]\n except KeyError:\n result = _normalize_function(func)\n if len(function_cache) >= 500: # clear half of cache if full\n with function_cache_lock:\n if len(function_cache) >= 500:\n for k in list(function_cache)[::2]:\n del function_cache[k]\n function_cache[func] = result\n return result\n except TypeError: # not hashable\n return _normalize_function(func)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__init___Blockwise.__repr__.return.f_Blockwise_self_indices": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_Blockwise.__init___Blockwise.__repr__.return.f_Blockwise_self_indices", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 396, "end_line": 454, "span_ids": ["Blockwise.__repr__", "Blockwise.dims", "Blockwise.__init__"], "tokens": 522}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Blockwise(Layer):\n\n def __init__(\n self,\n output: str,\n output_indices: Iterable[str],\n dsk: Mapping[str, tuple],\n indices: Iterable[tuple[str | BlockwiseDep, Iterable[str] | None]],\n numblocks: Mapping[str, Sequence[int]],\n concatenate: bool | None = None,\n new_axes: Mapping[str, int] | None = None,\n output_blocks: set[tuple[int, ...]] | None = None,\n annotations: Mapping[str, Any] | None = None,\n io_deps: Mapping[str, BlockwiseDep] | None = None,\n ):\n super().__init__(annotations=annotations)\n self.output = output\n self.output_indices = tuple(output_indices)\n self.output_blocks = output_blocks\n self.dsk = dsk\n\n # Remove `BlockwiseDep` arguments from input indices\n # and add them to `self.io_deps`.\n # TODO: Remove `io_deps` and handle indexable objects\n # in `self.indices` throughout `Blockwise`.\n self.indices = []\n self.numblocks = numblocks\n self.io_deps = io_deps or {}\n for dep, ind in indices:\n name = dep\n if isinstance(dep, BlockwiseDep):\n name = tokenize(dep)\n self.io_deps[name] = dep\n self.numblocks[name] = dep.numblocks\n self.indices.append((name, tuple(ind) if ind is not None else ind))\n self.indices = tuple(self.indices)\n\n # optimize_blockwise won't merge where `concatenate` doesn't match, so\n # enforce a canonical value if there are no axes for reduction.\n output_indices_set = set(self.output_indices)\n if concatenate is not None and all(\n i in output_indices_set\n for name, ind in self.indices\n if ind is not None\n for i in ind\n ):\n concatenate = None\n self.concatenate = concatenate\n self.new_axes = new_axes or {}\n\n @property\n def dims(self):\n \"\"\"Returns a dictionary mapping between each index specified in\n `self.indices` and the number of output blocks for that indice.\n \"\"\"\n if not hasattr(self, \"_dims\"):\n self._dims = _make_dims(self.indices, self.numblocks, self.new_axes)\n return self._dims\n\n def __repr__(self):\n return f\"Blockwise<{self.indices} -> {self.output}>\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__unique_dep_rewrite_blockwise.changed.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py__unique_dep_rewrite_blockwise.changed.True", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1326, "end_line": 1375, "span_ids": ["_unique_dep", "rewrite_blockwise"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _unique_dep(dep, ind):\n # Append blockwise index information to dependency name\n return dep + \"_\" + \"_\".join(str(i) for i in list(ind))\n\n\ndef rewrite_blockwise(inputs):\n \"\"\"Rewrite a stack of Blockwise expressions into a single blockwise expression\n\n Given a set of Blockwise layers, combine them into a single layer. The provided\n layers are expected to fit well together. That job is handled by\n ``optimize_blockwise``\n\n Parameters\n ----------\n inputs : list[Blockwise]\n\n Returns\n -------\n blockwise: Blockwise\n\n See Also\n --------\n optimize_blockwise\n \"\"\"\n if len(inputs) == 1:\n # Fast path: if there's only one input we can just use it as-is.\n return inputs[0]\n\n inputs = {inp.output: inp for inp in inputs}\n dependencies = {\n inp.output: {d for d, v in inp.indices if v is not None and d in inputs}\n for inp in inputs.values()\n }\n dependents = reverse_dict(dependencies)\n\n new_index_iter = (\n c + (str(d) if d else \"\") # A, B, ... A1, B1, ...\n for d in itertools.count()\n for c in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n )\n\n [root] = [k for k, v in dependents.items() if not v]\n\n # Our final results. These will change during fusion below\n indices = list(inputs[root].indices)\n new_axes = inputs[root].new_axes\n concatenate = inputs[root].concatenate\n dsk = dict(inputs[root].dsk)\n\n changed = True\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise.while_changed__rewrite_blockwise.sub_12._blockwise_token_k_bloc": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise.while_changed__rewrite_blockwise.sub_12._blockwise_token_k_bloc", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1353, "end_line": 1436, "span_ids": ["rewrite_blockwise"], "tokens": 827}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rewrite_blockwise(inputs):\n # ... other code\n while changed:\n changed = False\n for i, (dep, ind) in enumerate(indices):\n if ind is None:\n continue\n if dep not in inputs:\n continue\n\n changed = True\n\n # Change dep name to avoid fusing the same dep\n # (in different iteration orders) into a single\n # subgraph key/dependency\n # (see: https://github.com/dask/dask/issues/8535)\n local_dep = dep if dep == root else _unique_dep(dep, ind)\n\n # Replace _n with dep name in existing tasks\n # (inc, _0) -> (inc, 'b')\n dsk = {k: subs(v, {blockwise_token(i): local_dep}) for k, v in dsk.items()}\n\n # Remove current input from input indices\n # [('a', 'i'), ('b', 'i')] -> [('a', 'i')]\n _, current_dep_indices = indices.pop(i)\n sub = {\n blockwise_token(i): blockwise_token(i - 1)\n for i in range(i + 1, len(indices) + 1)\n }\n dsk = subs(dsk, sub)\n\n # Change new input_indices to match give index from current computation\n # [('c', j')] -> [('c', 'i')]\n new_indices = inputs[dep].indices\n sub = dict(zip(inputs[dep].output_indices, current_dep_indices))\n contracted = {\n x\n for _, j in new_indices\n if j is not None\n for x in j\n if x not in inputs[dep].output_indices\n }\n extra = dict(zip(contracted, new_index_iter))\n sub.update(extra)\n new_indices = [(x, index_subs(j, sub)) for x, j in new_indices]\n\n # Update new_axes\n for k, v in inputs[dep].new_axes.items():\n new_axes[sub[k]] = v\n\n # Bump new inputs up in list\n sub = {}\n # Map from (id(key), inds or None) -> index in indices. Used to deduplicate indices.\n index_map = {(id(k), inds): n for n, (k, inds) in enumerate(indices)}\n for ii, index in enumerate(new_indices):\n id_key = (id(index[0]), index[1])\n if id_key in index_map: # use old inputs if available\n sub[blockwise_token(ii)] = blockwise_token(index_map[id_key])\n else:\n index_map[id_key] = len(indices)\n sub[blockwise_token(ii)] = blockwise_token(len(indices))\n indices.append(index)\n new_dsk = subs(inputs[dep].dsk, sub)\n\n # Change new_dsk key to match local_dep\n if dep != local_dep and dep in new_dsk:\n new_dsk[local_dep] = new_dsk.pop(dep)\n\n # indices.extend(new_indices)\n dsk.update(new_dsk)\n\n # De-duplicate indices like [(a, ij), (b, i), (a, ij)] -> [(a, ij), (b, i)]\n # Make sure that we map everything else appropriately as we remove inputs\n new_indices = []\n seen = {}\n sub = {} # like {_0: _0, _1: _0, _2: _1}\n for i, x in enumerate(indices):\n if x[1] is not None and x in seen:\n sub[i] = seen[x]\n else:\n if x[1] is not None:\n seen[x] = len(new_indices)\n sub[i] = len(new_indices)\n new_indices.append(x)\n\n sub = {blockwise_token(k): blockwise_token(v) for k, v in sub.items()}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise.dsk_13_rewrite_blockwise.return.Blockwise_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_rewrite_blockwise.dsk_13_rewrite_blockwise.return.Blockwise_", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1437, "end_line": 1458, "span_ids": ["rewrite_blockwise"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rewrite_blockwise(inputs):\n # ... other code\n dsk = {k: subs(v, sub) for k, v in dsk.items() if k not in sub.keys()}\n\n indices_check = {k for k, v in indices if v is not None}\n numblocks = toolz.merge([inp.numblocks for inp in inputs.values()])\n numblocks = {k: v for k, v in numblocks.items() if v is None or k in indices_check}\n\n # Update IO-dependency information\n io_deps = {}\n for v in inputs.values():\n io_deps.update(v.io_deps)\n\n return Blockwise(\n root,\n inputs[root].output_indices,\n dsk,\n new_indices,\n numblocks=numblocks,\n new_axes=new_axes,\n concatenate=concatenate,\n annotations=inputs[root].annotations,\n io_deps=io_deps,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_simple_test_loc.with_open_os_path_join_di.with_f_as_f_.assert_f_loc_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_simple_test_loc.with_open_os_path_join_di.with_f_as_f_.assert_f_loc_4", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 45, "end_line": 69, "span_ids": ["test_loc", "test_simple"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_simple(dir_server):\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn)[0]\n with f as f:\n data = f.read()\n with open(os.path.join(dir_server, fn), \"rb\") as expected:\n assert data == expected.read()\n\n\ndef test_loc(dir_server):\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn)[0]\n with open(os.path.join(dir_server, fn), \"rb\") as expected:\n expected = expected.read()\n with f as f:\n data = f.read(2)\n assert data == expected[:2]\n assert f.loc == 2\n f.seek(0)\n data = f.read(3)\n assert data == expected[:3]\n f.seek(1, 1)\n assert f.loc == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_fetch_range_with_headers_test_fetch_range_with_headers.with_open_os_path_join_di.assert_data_expected_r": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_fetch_range_with_headers_test_fetch_range_with_headers.with_open_os_path_join_di.assert_data_expected_r", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 72, "end_line": 81, "span_ids": ["test_fetch_range_with_headers"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fetch_range_with_headers(dir_server):\n # https://github.com/dask/dask/issues/4479\n root = \"http://localhost:8999/\"\n fn = files[0]\n headers = {\"Date\": \"Wed, 21 Oct 2015 07:28:00 GMT\"}\n f = open_files(root + fn, headers=headers)[0]\n with f as f:\n data = f.read(length=1) + f.read(length=-1)\n with open(os.path.join(dir_server, fn), \"rb\") as expected:\n assert data == expected.read()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_test_ops.with_open_os_path_join_di.with_f_as_f_.assert_f_read_expect": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_test_ops.with_open_os_path_join_di.with_f_as_f_.assert_f_read_expect", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 84, "end_line": 98, "span_ids": ["test_ops"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"block_size\", [None, 99999])\ndef test_ops(dir_server, block_size):\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn)[0]\n with open(os.path.join(dir_server, fn), \"rb\") as expected:\n expected = expected.read()\n with f as f:\n # these pass because the default\n assert f.read(10) == expected[:10]\n f.seek(0)\n assert f.read(10) == expected[:10]\n assert f.read(10) == expected[10:20]\n f.seek(-10, 2)\n assert f.read() == expected[-10:]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_blocksize_test_ops_blocksize.with_open_os_path_join_di.None_1.if_parse_version_fsspec__.else_.assert_f_read_10_expe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_test_ops_blocksize_test_ops_blocksize.with_open_os_path_join_di.None_1.if_parse_version_fsspec__.else_.assert_f_read_10_expe", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 124, "span_ids": ["test_ops_blocksize"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ops_blocksize(dir_server):\n root = \"http://localhost:8999/\"\n fn = files[0]\n f = open_files(root + fn, block_size=2)[0]\n with open(os.path.join(dir_server, fn), \"rb\") as expected:\n expected = expected.read()\n with f as f:\n # it's OK to read the whole file\n assert f.read() == expected\n # and now the file magically has a size\n assert f.size == len(expected)\n\n # note that if we reuse f from above, because it is tokenized, we get\n # the same open file - where is this cached?\n fn = files[1]\n f = open_files(root + fn, block_size=2)[0]\n with f as f:\n if parse_version(fsspec.__version__) < parse_version(\"2021.11.1\"):\n # fails because we want only 12 bytes\n with pytest.raises(ValueError):\n assert f.read(10) == expected[:10]\n else:\n # fixed in https://github.com/fsspec/filesystem_spec/pull/830\n assert f.read(10) == expected[:10]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_pickability_of_lazy_files_test_pickability_of_lazy_files.with_filetexts_files_mod.for_f_f2_in_zip_myfiles_.with_f_as_f_open_f2_as_f.assert_f_open_read_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_pickability_of_lazy_files_test_pickability_of_lazy_files.with_filetexts_files_mod.for_f_f2_in_zip_myfiles_.with_f_as_f_open_f2_as_f.assert_f_open_read_f", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 313, "end_line": 324, "span_ids": ["test_pickability_of_lazy_files"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pickability_of_lazy_files(tmpdir):\n tmpdir = str(tmpdir)\n\n with filetexts(files, mode=\"b\"):\n myfiles = open_files(\".test.accounts.*\")\n myfiles2 = cloudpickle.loads(cloudpickle.dumps(myfiles))\n\n for f, f2 in zip(myfiles, myfiles2):\n assert f.path == f2.path\n assert type(f.fs) == type(f2.fs)\n with f as f_open, f2 as f2_open:\n assert f_open.read() == f2_open.read()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_py2_local_bytes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_local.py_test_py2_local_bytes_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 327, "end_line": 354, "span_ids": ["test_abs_paths", "test_py2_local_bytes"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_py2_local_bytes(tmpdir):\n fn = str(tmpdir / \"myfile.txt.gz\")\n with gzip.open(fn, mode=\"wb\") as f:\n f.write(b\"hello\\nworld\")\n\n files = open_files(fn, compression=\"gzip\", mode=\"rt\")\n\n with files[0] as f:\n assert all(isinstance(line, str) for line in f)\n\n\ndef test_abs_paths(tmpdir):\n tmpdir = str(tmpdir)\n here = os.getcwd()\n os.chdir(tmpdir)\n with open(\"tmp\", \"w\") as f:\n f.write(\"hi\")\n out = LocalFileSystem().glob(\"*\")\n assert len(out) == 1\n assert \"/\" in out[0]\n assert \"tmp\" in out[0]\n\n fs = LocalFileSystem()\n os.chdir(here)\n with fs.open(out[0], \"r\") as f:\n res = f.read()\n assert res == \"hi\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_base_s3_base.with_ensure_safe_environm.try_.except_subprocess_Timeout.if_sys_platform_win32.subprocess_call_f_TASKKIL": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_s3_base_s3_base.with_ensure_safe_environm.try_.except_subprocess_Timeout.if_sys_platform_win32.subprocess_call_f_TASKKIL", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 71, "end_line": 104, "span_ids": ["s3_base"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture(scope=\"module\")\ndef s3_base():\n with ensure_safe_environment_variables():\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"foobar_key\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"foobar_secret\"\n\n # pipe to null to avoid logging in terminal\n proc = subprocess.Popen(\n shlex.split(\"moto_server s3 -p 5555\"), stdout=subprocess.DEVNULL\n )\n\n timeout = 8\n while True:\n try:\n # OK to go once server is accepting connections\n r = requests.get(endpoint_uri)\n if r.ok:\n break\n except Exception:\n pass\n timeout -= 0.1\n time.sleep(0.1)\n assert timeout > 0, \"Timed out waiting for moto server\"\n yield\n\n # shut down external process\n proc.terminate()\n try:\n proc.wait(timeout=3)\n except subprocess.TimeoutExpired:\n proc.kill()\n if sys.platform == \"win32\":\n # belt & braces\n subprocess.call(f\"TASKKILL /F /PID {proc.pid} /T\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_append_test_parquet_append.dd_utils_assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_append_test_parquet_append.dd_utils_assert_eq_", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 484, "end_line": 535, "span_ids": ["test_parquet_append"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"engine\", [\"pyarrow\", \"fastparquet\"])\ndef test_parquet_append(s3, engine, s3so):\n pytest.importorskip(engine)\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n np = pytest.importorskip(\"numpy\")\n\n url = \"s3://%s/test.parquet.append\" % test_bucket_name\n\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"you\", \"people\"], size=1000).astype(\n \"O\"\n ),\n },\n )\n df = dd.from_pandas(data, chunksize=500)\n df.to_parquet(\n url,\n engine=engine,\n storage_options=s3so,\n write_index=False,\n )\n df.to_parquet(\n url,\n engine=engine,\n storage_options=s3so,\n write_index=False,\n append=True,\n ignore_divisions=True,\n )\n\n files = [f.split(\"/\")[-1] for f in s3.ls(url)]\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"part.0.parquet\" in files\n\n df2 = dd.read_parquet(\n url,\n index=False,\n engine=engine,\n storage_options=s3so,\n )\n\n dd.utils.assert_eq(\n pd.concat([data, data]),\n df2,\n check_index=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_ensure_file_ensure_file.try_.except_OSError_.pass": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_ensure_file_ensure_file.try_.except_OSError_.pass", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 227, "end_line": 285, "span_ids": ["ensure_file"], "tokens": 429}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ensure_file(source: str, destination: str = None, comment: bool = True) -> None:\n \"\"\"\n Copy file to default location if it does not already exist\n\n This tries to move a default configuration file to a default location if\n if does not already exist. It also comments out that file by default.\n\n This is to be used by downstream modules (like dask.distributed) that may\n have default configuration files that they wish to include in the default\n configuration path.\n\n Parameters\n ----------\n source : string, filename\n Source configuration file, typically within a source directory.\n destination : string, directory\n Destination directory. Configurable by ``DASK_CONFIG`` environment\n variable, falling back to ~/.config/dask.\n comment : bool, True by default\n Whether or not to comment out the config file when copying.\n \"\"\"\n if destination is None:\n destination = PATH\n\n # destination is a file and already exists, never overwrite\n if os.path.isfile(destination):\n return\n\n # If destination is not an existing file, interpret as a directory,\n # use the source basename as the filename\n directory = destination\n destination = os.path.join(directory, os.path.basename(source))\n\n try:\n if not os.path.exists(destination):\n os.makedirs(directory, exist_ok=True)\n\n # Atomically create destination. Parallel testing discovered\n # a race condition where a process can be busy creating the\n # destination while another process reads an empty config file.\n tmp = \"%s.tmp.%d\" % (destination, os.getpid())\n with open(source) as f:\n lines = list(f)\n\n if comment:\n lines = [\n \"# \" + line if line.strip() and not line.startswith(\"#\") else line\n for line in lines\n ]\n\n with open(tmp, \"w\") as f:\n f.write(\"\".join(lines))\n\n try:\n os.rename(tmp, destination)\n except OSError:\n os.remove(tmp)\n except OSError:\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_expand_environment_variables_expand_environment_variables.if_isinstance_config_Map.else_.return.config": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_expand_environment_variables_expand_environment_variables.if_isinstance_config_Map.else_.return.config", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 561, "end_line": 588, "span_ids": ["expand_environment_variables"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def expand_environment_variables(config: Any) -> Any:\n \"\"\"Expand environment variables in a nested config dictionary\n\n This function will recursively search through any nested dictionaries\n and/or lists.\n\n Parameters\n ----------\n config : dict, iterable, or str\n Input object to search for environment variables\n\n Returns\n -------\n config : same type as input\n\n Examples\n --------\n >>> expand_environment_variables({'x': [1, 2, '$USER']}) # doctest: +SKIP\n {'x': [1, 2, 'my-username']}\n \"\"\"\n if isinstance(config, Mapping):\n return {k: expand_environment_variables(v) for k, v in config.items()}\n elif isinstance(config, str):\n return os.path.expandvars(config)\n elif isinstance(config, (list, tuple, builtins.set)):\n return type(config)(expand_environment_variables(v) for v in config)\n else:\n return config", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_deprecations_deprecations._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_deprecations_deprecations._", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 591, "end_line": 607, "span_ids": ["impl:20"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "deprecations = {\n \"fuse_ave_width\": \"optimization.fuse.ave-width\",\n \"fuse_max_height\": \"optimization.fuse.max-height\",\n \"fuse_max_width\": \"optimization.fuse.max-width\",\n \"fuse_subgraphs\": \"optimization.fuse.subgraphs\",\n \"fuse_rename_keys\": \"optimization.fuse.rename-keys\",\n \"fuse_max_depth_new_edges\": \"optimization.fuse.max-depth-new-edges\",\n # See https://github.com/dask/distributed/pull/4916\n \"ucx.cuda_copy\": \"distributed.ucx.cuda_copy\",\n \"ucx.tcp\": \"distributed.ucx.tcp\",\n \"ucx.nvlink\": \"distributed.ucx.nvlink\",\n \"ucx.infiniband\": \"distributed.ucx.infiniband\",\n \"ucx.rdmacm\": \"distributed.ucx.rdmacm\",\n \"ucx.net-devices\": \"distributed.ucx.net-devices\",\n \"ucx.reuse-endpoints\": \"distributed.ucx.reuse-endpoints\",\n \"rmm.pool-size\": \"distributed.rmm.pool-size\",\n}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_make_meta_object_make_meta_object.raise_TypeError_f_Don_t_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_make_meta_object_make_meta_object.raise_TypeError_f_Don_t_k", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 90, "end_line": 153, "span_ids": ["make_meta_object"], "tokens": 583}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@make_meta_obj.register(meta_object_types)\ndef make_meta_object(x, index=None):\n \"\"\"Create an empty pandas object containing the desired metadata.\n\n Parameters\n ----------\n x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar\n To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or\n an iterable of `(name, dtype)` tuples. To create a `Series`, provide a\n tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index\n should match the desired output. If a dtype or scalar, a scalar of the\n same dtype is returned.\n index : pd.Index, optional\n Any pandas index to use in the metadata. If none provided, a\n `RangeIndex` will be used.\n\n Examples\n --------\n\n >>> make_meta_object([('a', 'i8'), ('b', 'O')])\n Empty DataFrame\n Columns: [a, b]\n Index: []\n >>> make_meta_object(('a', 'f8'))\n Series([], Name: a, dtype: float64)\n >>> make_meta_object('i8')\n 1\n \"\"\"\n\n if is_arraylike(x) and x.shape:\n return x[:0]\n\n if index is not None:\n index = make_meta_dispatch(index)\n\n if isinstance(x, dict):\n return pd.DataFrame(\n {c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index\n )\n if isinstance(x, tuple) and len(x) == 2:\n return _empty_series(x[0], x[1], index=index)\n elif isinstance(x, Iterable) and not isinstance(x, str):\n if not all(isinstance(i, tuple) and len(i) == 2 for i in x):\n raise ValueError(f\"Expected iterable of tuples of (name, dtype), got {x}\")\n return pd.DataFrame(\n {c: _empty_series(c, d, index=index) for (c, d) in x},\n columns=[c for c, d in x],\n index=index,\n )\n elif not hasattr(x, \"dtype\") and x is not None:\n # could be a string, a dtype object, or a python type. Skip `None`,\n # because it is implictly converted to `dtype('f8')`, which we don't\n # want here.\n try:\n dtype = np.dtype(x)\n return _scalar_from_dtype(dtype)\n except Exception:\n # Continue on to next check\n pass\n\n if is_scalar(x):\n return _nonempty_scalar(x)\n\n raise TypeError(f\"Don't know how to create metadata from {x}\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_meta_nonempty_object_meta_nonempty_dataframe.return.res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_meta_nonempty_object_meta_nonempty_dataframe.return.res", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 154, "end_line": 184, "span_ids": ["meta_nonempty_object", "meta_nonempty_dataframe"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@meta_nonempty.register(object)\ndef meta_nonempty_object(x):\n \"\"\"Create a nonempty pandas object from the given metadata.\n\n Returns a pandas DataFrame, Series, or Index that contains two rows\n of fake data.\n \"\"\"\n if is_scalar(x):\n return _nonempty_scalar(x)\n else:\n raise TypeError(\n \"Expected Pandas-like Index, Series, DataFrame, or scalar, \"\n f\"got {typename(type(x))}\"\n )\n\n\n@meta_nonempty.register(pd.DataFrame)\ndef meta_nonempty_dataframe(x):\n idx = meta_nonempty(x.index)\n dt_s_dict = dict()\n data = dict()\n for i, c in enumerate(x.columns):\n series = x.iloc[:, i]\n dt = series.dtype\n if dt not in dt_s_dict:\n dt_s_dict[dt] = _nonempty_series(x.iloc[:, i], idx=idx)\n data[i] = dt_s_dict[dt]\n res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns)))\n res.columns = x.columns\n res.attrs = x.attrs\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py__nonempty_index__nonempty_index.raise_TypeError_f_Don_t_k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py__nonempty_index__nonempty_index.raise_TypeError_f_Don_t_k", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 189, "end_line": 252, "span_ids": ["_nonempty_index"], "tokens": 720}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@meta_nonempty.register(pd.Index)\ndef _nonempty_index(idx):\n typ = type(idx)\n if typ is pd.RangeIndex:\n return pd.RangeIndex(2, name=idx.name)\n elif idx.is_numeric():\n return typ([1, 2], name=idx.name)\n elif typ is pd.Index:\n if idx.dtype == bool:\n # pd 1.5 introduce bool dtypes and respect non-uniqueness\n return pd.Index([True, False], name=idx.name)\n else:\n # for pd 1.5 in the case of bool index this would be cast as [True, True]\n # breaking uniqueness\n return pd.Index([\"a\", \"b\"], name=idx.name, dtype=idx.dtype)\n elif typ is pd.DatetimeIndex:\n start = \"1970-01-01\"\n # Need a non-monotonic decreasing index to avoid issues with\n # partial string indexing see https://github.com/dask/dask/issues/2389\n # and https://github.com/pandas-dev/pandas/issues/16515\n # This doesn't mean `_meta_nonempty` should ever rely on\n # `self.monotonic_increasing` or `self.monotonic_decreasing`\n try:\n return pd.date_range(\n start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name\n )\n except ValueError: # older pandas versions\n data = [start, \"1970-01-02\"] if idx.freq is None else None\n return pd.DatetimeIndex(\n data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name\n )\n elif typ is pd.PeriodIndex:\n return pd.period_range(\n start=\"1970-01-01\", periods=2, freq=idx.freq, name=idx.name\n )\n elif typ is pd.TimedeltaIndex:\n start = np.timedelta64(1, \"D\")\n try:\n return pd.timedelta_range(\n start=start, periods=2, freq=idx.freq, name=idx.name\n )\n except ValueError: # older pandas versions\n start = np.timedelta64(1, \"D\")\n data = [start, start + 1] if idx.freq is None else None\n return pd.TimedeltaIndex(\n data, start=start, periods=2, freq=idx.freq, name=idx.name\n )\n elif typ is pd.CategoricalIndex:\n if len(idx.categories) == 0:\n data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered)\n else:\n data = pd.Categorical.from_codes(\n [-1, 0], categories=idx.categories, ordered=idx.ordered\n )\n return pd.CategoricalIndex(data, name=idx.name)\n elif typ is pd.MultiIndex:\n levels = [_nonempty_index(l) for l in idx.levels]\n codes = [[0, 0] for i in idx.levels]\n try:\n return pd.MultiIndex(levels=levels, codes=codes, names=idx.names)\n except TypeError: # older pandas versions\n return pd.MultiIndex(levels=levels, labels=codes, names=idx.names)\n\n raise TypeError(f\"Don't know how to handle index of type {typename(type(idx))}\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_categorical_dtype_pandas_percentile.return._percentile_a_q_interpo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_categorical_dtype_pandas_percentile.return._percentile_a_q_interpo", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 514, "end_line": 533, "span_ids": ["is_categorical_dtype_pandas", "tolist_pandas", "categorical_dtype_pandas", "percentile"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@categorical_dtype_dispatch.register((pd.DataFrame, pd.Series, pd.Index))\ndef categorical_dtype_pandas(categories=None, ordered=False):\n return pd.api.types.CategoricalDtype(categories=categories, ordered=ordered)\n\n\n@tolist_dispatch.register((pd.Series, pd.Index, pd.Categorical))\ndef tolist_pandas(obj):\n return obj.tolist()\n\n\n@is_categorical_dtype_dispatch.register(\n (pd.Series, pd.Index, pd.api.extensions.ExtensionDtype, np.dtype)\n)\ndef is_categorical_dtype_pandas(obj):\n return pd.api.types.is_categorical_dtype(obj)\n\n\n@percentile_lookup.register((pd.Series, pd.Index))\ndef percentile(a, q, interpolation=\"linear\"):\n return _percentile(a, q, interpolation)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_None_3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_None_3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 536, "end_line": 551, "span_ids": ["_register_cudf", "percentile"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "######################################\n# cuDF: Pandas Dataframes on the GPU #\n######################################\n\n\n@concat_dispatch.register_lazy(\"cudf\")\n@hash_object_dispatch.register_lazy(\"cudf\")\n@group_split_dispatch.register_lazy(\"cudf\")\n@get_parallel_type.register_lazy(\"cudf\")\n@meta_nonempty.register_lazy(\"cudf\")\n@make_meta_dispatch.register_lazy(\"cudf\")\n@make_meta_obj.register_lazy(\"cudf\")\n@percentile_lookup.register_lazy(\"cudf\")\ndef _register_cudf():\n import dask_cudf # noqa: F401", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_operator_pd_set_option_compute_us": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_operator_pd_set_option_compute_us", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 80, "span_ids": ["imports"], "tokens": 477}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import operator\nimport warnings\nfrom collections.abc import Iterator, Sequence\nfrom functools import partial, wraps\nfrom numbers import Integral, Number\nfrom operator import getitem\nfrom pprint import pformat\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import (\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_numeric_dtype,\n is_timedelta64_dtype,\n)\nfrom tlz import first, merge, partition_all, remove, unique\n\nfrom .. import array as da\nfrom .. import core, threaded\nfrom ..array.core import Array, normalize_arg\nfrom ..base import DaskMethodsMixin, dont_optimize, is_dask_collection, tokenize\nfrom ..blockwise import Blockwise, BlockwiseDep, BlockwiseDepDict, blockwise\nfrom ..context import globalmethod\nfrom ..delayed import Delayed, delayed, unpack_collections\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..layers import DataFrameTreeReduction\nfrom ..utils import (\n IndexCallable,\n M,\n OperatorMethodMixin,\n _deprecated,\n apply,\n derived_from,\n funcname,\n has_keyword,\n is_arraylike,\n iter_chunks,\n key_split,\n memory_repr,\n parse_bytes,\n partial_by_order,\n pseudorandom,\n put_lines,\n random_state_data,\n typename,\n)\nfrom ..widgets import get_template\nfrom . import methods\nfrom ._compat import PANDAS_GT_140, PANDAS_GT_150\nfrom .accessor import CachedAccessor, DatetimeAccessor, StringAccessor\nfrom .categorical import CategoricalAccessor, categorize\nfrom .dispatch import (\n get_parallel_type,\n group_split_dispatch,\n hash_object_dispatch,\n meta_nonempty,\n)\nfrom .optimize import optimize\nfrom .utils import (\n PANDAS_GT_110,\n PANDAS_GT_120,\n check_matching_columns,\n clear_known_categories,\n drop_by_shallow_copy,\n has_known_categories,\n index_summary,\n insert_meta_param_description,\n is_categorical_dtype,\n is_dataframe_like,\n is_index_like,\n is_series_like,\n make_meta,\n raise_on_meta_error,\n valid_divisions,\n)\n\nno_default = \"__no_default__\"\n\npd.set_option(\"compute.use_numexpr\", False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__numeric_only__numeric_only.return.wrapper": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__numeric_only__numeric_only.return.wrapper", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 96, "span_ids": ["_numeric_only"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _numeric_only(func):\n \"\"\"Decorator for methods that accept a numeric_only kwarg\"\"\"\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n # numeric_only is None by default - in that case self = self.\n if kwargs.get(\"numeric_only\") is False:\n raise NotImplementedError(\n \"'numeric_only=False' is not implemented in Dask.\"\n )\n elif kwargs.get(\"numeric_only\") is True:\n self = self._get_numeric_data()\n return func(self, *args, **kwargs)\n\n return wrapper", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar.to_delayed_Scalar.to_delayed.return.Delayed_self_key_dsk_la": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Scalar.to_delayed_Scalar.to_delayed.return.Delayed_self_key_dsk_la", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 244, "end_line": 259, "span_ids": ["Scalar.to_delayed"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Scalar(DaskMethodsMixin, OperatorMethodMixin):\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into a ``dask.delayed`` object.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n \"\"\"\n dsk = self.__dask_graph__()\n layer = self.__dask_layers__()[0]\n if optimize_graph:\n dsk = self.__dask_optimize__(dsk, self.__dask_keys__())\n layer = \"delayed-\" + self._name\n dsk = HighLevelGraph.from_collections(layer, dsk, dependencies=())\n return Delayed(self.key, dsk, layer=layer)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame__Frame._constructor.return.new_dd_object": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame__Frame._constructor.return.new_dd_object", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 295, "end_line": 357, "span_ids": ["_Frame.__dask_layers__", "_Frame.__dask_keys__", "_Frame.__dask_tokenize__", "_Frame._rebuild", "_Frame:3", "_Frame._constructor", "_Frame", "_Frame.__dask_postcompute__", "_Frame.__init__", "_Frame.__dask_graph__", "_Frame.__dask_postpersist__"], "tokens": 465}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n \"\"\"Superclass for DataFrame and Series\n\n Parameters\n ----------\n dsk: dict\n The dask graph to compute this DataFrame\n name: str\n The key prefix that specifies which keys in the dask comprise this\n particular DataFrame / Series\n meta: pandas.DataFrame, pandas.Series, or pandas.Index\n An empty pandas object with names, dtypes, and indices matching the\n expected output.\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n \"\"\"\n\n def __init__(self, dsk, name, meta, divisions):\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n self.dask = dsk\n self._name = name\n meta = make_meta(meta)\n if not self._is_partition_type(meta):\n raise TypeError(\n f\"Expected meta to specify type {type(self).__name__}, got type \"\n f\"{typename(type(meta))}\"\n )\n self._meta = meta\n self._divisions = tuple(divisions)\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [(self._name, i) for i in range(self.npartitions)]\n\n def __dask_layers__(self):\n return (self._name,)\n\n def __dask_tokenize__(self):\n return self._name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"dataframe_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return self._rebuild, ()\n\n def _rebuild(self, dsk, *, rename=None):\n name = self._name\n if rename:\n name = rename.get(name, name)\n return type(self)(dsk, name, self._meta, self.divisions)\n\n @property\n def _constructor(self):\n return new_dd_object", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.divisions__Frame.divisions.return.self__divisions": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.divisions__Frame.divisions.return.self__divisions", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 359, "end_line": 381, "span_ids": ["_Frame.divisions"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def divisions(self):\n \"\"\"\n Tuple of ``npartitions + 1`` values, in ascending order, marking the\n lower/upper bounds of each partition's index. Divisions allow Dask\n to know which partition will contain a given value, significantly\n speeding up operations like `loc`, `merge`, and `groupby` by not\n having to search the full dataset.\n\n Example: for ``divisions = (0, 10, 50, 100)``, there are three partitions,\n where the index in each partition contains values [0, 10), [10, 50),\n and [50, 100], respectively. Dask therefore knows ``df.loc[45]``\n will be in the second partition.\n\n When every item in ``divisions`` is ``None``, the divisions are unknown.\n Most operations can still be performed, but some will be much slower,\n and a few may fail.\n\n It is uncommon to set ``divisions`` directly. Instead, use ``set_index``,\n which sorts and splits the data as needed.\n See https://docs.dask.org/en/latest/dataframe-design.html#partitions.\n \"\"\"\n return self._divisions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.divisions_9__Frame.divisions_9.self._divisions.value": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.divisions_9__Frame.divisions_9.self._divisions.value", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 383, "end_line": 398, "span_ids": ["_Frame.divisions_9"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @divisions.setter\n def divisions(self, value):\n if None in value:\n if any(v is not None for v in value):\n warnings.warn(\n \"recieved `divisions` with mix of nulls and non-nulls, future versions will only accept \"\n \"`divisions` that are all null or all non-null\",\n PendingDeprecationWarning,\n )\n if not isinstance(value, tuple):\n warnings.warn(\n f\"recieved `divisions` of type {type(value)}, future versions will only accept `divisions` of type \"\n \"tuple\",\n PendingDeprecationWarning,\n )\n self._divisions = value", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.npartitions__Frame.__setstate__.self_dask_self__name_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.npartitions__Frame.__setstate__.self_dask_self__name_se", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 402, "end_line": 442, "span_ids": ["_Frame._args", "_Frame.attrs_12", "_Frame._meta_nonempty", "_Frame.__getstate__", "_Frame.attrs", "_Frame.__setstate__", "_Frame.npartitions", "_Frame.size"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def npartitions(self):\n \"\"\"Return number of partitions\"\"\"\n return len(self.divisions) - 1\n\n @property # type: ignore\n @derived_from(pd.DataFrame)\n def attrs(self):\n return self._meta.attrs\n\n @attrs.setter\n def attrs(self, value):\n self._meta.attrs = dict(value)\n\n @property\n def size(self):\n \"\"\"Size of the Series or DataFrame as a Delayed object.\n\n Examples\n --------\n >>> series.size # doctest: +SKIP\n dd.Scalar\n \"\"\"\n return self.reduction(\n methods.size, np.sum, token=\"size\", meta=int, split_every=False\n )\n\n @property\n def _meta_nonempty(self):\n \"\"\"A non-empty version of `_meta` with fake data.\"\"\"\n return meta_nonempty(self._meta)\n\n @property\n def _args(self):\n return (self.dask, self._name, self._meta, self.divisions)\n\n def __getstate__(self):\n return self._args\n\n def __setstate__(self, state):\n self.dask, self._name, self._meta, self._divisions = state", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.copy__Frame.__array_wrap__.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.copy__Frame.__array_wrap__.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 442, "end_line": 467, "span_ids": ["_Frame.__array_wrap__", "_Frame.copy", "_Frame.__array__"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def copy(self, deep=False):\n \"\"\"Make a copy of the dataframe\n\n This is strictly a shallow copy of the underlying computational graph.\n It does not affect the underlying data\n\n Parameters\n ----------\n deep : boolean, default False\n The deep value must be `False` and it is declared as a parameter just for\n compatibility with third-party libraries like cuDF\n \"\"\"\n if deep is not False:\n raise ValueError(\n \"The `deep` value must be False. This is strictly a shallow copy \"\n \"of the underlying computational graph.\"\n )\n return new_dd_object(self.dask, self._name, self._meta, self.divisions)\n\n def __array__(self, dtype=None, **kwargs):\n self._computed = self.compute()\n x = np.array(self._computed)\n return x\n\n def __array_wrap__(self, array, context=None):\n raise NotImplementedError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__array_ufunc____Frame.__array_ufunc__.if_method___call___.else_.return.NotImplemented": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.__array_ufunc____Frame.__array_ufunc__.if_method___call___.else_.return.NotImplemented", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 469, "end_line": 492, "span_ids": ["_Frame.__array_ufunc__"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n # ufuncs work with 0-dimensional NumPy ndarrays\n # so we don't want to raise NotImplemented\n if isinstance(x, np.ndarray) and x.shape == ():\n continue\n elif not isinstance(\n x, (Number, Scalar, _Frame, Array, pd.DataFrame, pd.Series, pd.Index)\n ):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc.signature is not None:\n return NotImplemented\n if numpy_ufunc.nout > 1:\n # ufuncs with multiple output values\n # are not yet supported for frames\n return NotImplemented\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n else:\n # ufunc methods are not yet supported for frames\n return NotImplemented", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.index__Frame.index_27.self._meta.result__meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.index__Frame.index_27.self._meta.result__meta", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 526, "end_line": 545, "span_ids": ["_Frame.index", "_Frame.index_27"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @property\n def index(self):\n \"\"\"Return dask Index instance\"\"\"\n return self.map_partitions(\n getattr,\n \"index\",\n token=self._name + \"-index\",\n meta=self._meta.index,\n enforce_metadata=False,\n )\n\n @index.setter\n def index(self, value):\n self.divisions = value.divisions\n result = map_partitions(\n methods.assign_index, self, value, enforce_metadata=False\n )\n self.dask = result.dask\n self._name = result._name\n self._meta = result._meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.autocorr_Series.view.return.self_map_partitions_M_vie": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.autocorr_Series.view.return.self_map_partitions_M_vie", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3949, "end_line": 4008, "span_ids": ["Series.is_monotonic_decreasing", "Series.view", "Series.__divmod__", "Series.is_monotonic_increasing", "Series.__rdivmod__", "Series.is_monotonic", "Series.autocorr", "Series.memory_usage"], "tokens": 439}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def autocorr(self, lag=1, split_every=False):\n if not isinstance(lag, Integral):\n raise TypeError(\"lag must be an integer\")\n return self.corr(self if lag == 0 else self.shift(lag), split_every=split_every)\n\n @derived_from(pd.Series)\n def memory_usage(self, index=True, deep=False):\n result = self.map_partitions(\n M.memory_usage, index=index, deep=deep, enforce_metadata=False\n )\n return delayed(sum)(result.to_delayed())\n\n def __divmod__(self, other):\n res1 = self // other\n res2 = self % other\n return res1, res2\n\n def __rdivmod__(self, other):\n res1 = other // self\n res2 = other % self\n return res1, res2\n\n @property\n @derived_from(pd.Series)\n def is_monotonic(self):\n if PANDAS_GT_150:\n warnings.warn(\n \"is_monotonic is deprecated and will be removed in a future version. \"\n \"Use is_monotonic_increasing instead.\",\n FutureWarning,\n )\n return self.is_monotonic_increasing\n\n @property\n @derived_from(pd.Series)\n def is_monotonic_increasing(self):\n return aca(\n self,\n chunk=methods.monotonic_increasing_chunk,\n aggregate=methods.monotonic_increasing_aggregate,\n meta=bool,\n token=\"monotonic_increasing\",\n )\n\n @property\n @derived_from(pd.Series)\n def is_monotonic_decreasing(self):\n return aca(\n self,\n chunk=methods.monotonic_decreasing_chunk,\n aggregate=methods.monotonic_decreasing_aggregate,\n meta=bool,\n token=\"monotonic_decreasing\",\n )\n\n @derived_from(pd.Series)\n def view(self, dtype):\n meta = self._meta.view(dtype)\n return self.map_partitions(M.view, dtype, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.to_series_Index.to_frame.return.self_map_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.to_series_Index.to_frame.return.self_map_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3987, "end_line": 4006, "span_ids": ["Index.to_series", "Index.to_frame"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n @derived_from(pd.Index)\n def to_series(self):\n return self.map_partitions(\n M.to_series,\n meta=self._meta.to_series(),\n transform_divisions=False,\n )\n\n @derived_from(pd.Index, ua_args=[\"index\"])\n def to_frame(self, index=True, name=None):\n if not index:\n raise NotImplementedError()\n args = [index] if name is None else [index, name]\n\n return self.map_partitions(\n M.to_frame,\n *args,\n meta=self._meta.to_frame(*args),\n transform_divisions=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame_DataFrame.columns_3.self.dask.renamed_dask": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame_DataFrame.columns_3.self.dask.renamed_dask", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4044, "end_line": 4123, "span_ids": ["DataFrame", "DataFrame.columns", "DataFrame.__array_wrap__", "DataFrame.columns_3", "DataFrame.axes", "DataFrame.__init__"], "tokens": 594}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n \"\"\"\n Parallel Pandas DataFrame\n\n Do not use this class directly. Instead use functions like\n ``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.\n\n Parameters\n ----------\n dsk: dict\n The dask graph to compute this DataFrame\n name: str\n The key prefix that specifies which keys in the dask comprise this\n particular DataFrame\n meta: pandas.DataFrame\n An empty ``pandas.DataFrame`` with names, dtypes, and index matching\n the expected output.\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n \"\"\"\n\n _partition_type = pd.DataFrame\n _is_partition_type = staticmethod(is_dataframe_like)\n _token_prefix = \"dataframe-\"\n _accessors = set()\n\n def __init__(self, dsk, name, meta, divisions):\n super().__init__(dsk, name, meta, divisions)\n if self.dask.layers[name].collection_annotations is None:\n self.dask.layers[name].collection_annotations = {\n \"npartitions\": self.npartitions,\n \"columns\": [col for col in self.columns],\n \"type\": typename(type(self)),\n \"dataframe_type\": typename(type(self._meta)),\n \"series_dtypes\": {\n col: self._meta[col].dtype\n if hasattr(self._meta[col], \"dtype\")\n else None\n for col in self._meta.columns\n },\n }\n else:\n self.dask.layers[name].collection_annotations.update(\n {\n \"npartitions\": self.npartitions,\n \"columns\": [col for col in self.columns],\n \"type\": typename(type(self)),\n \"dataframe_type\": typename(type(self._meta)),\n \"series_dtypes\": {\n col: self._meta[col].dtype\n if hasattr(self._meta[col], \"dtype\")\n else None\n for col in self._meta.columns\n },\n }\n )\n\n def __array_wrap__(self, array, context=None):\n if isinstance(context, tuple) and len(context) > 0:\n if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():\n index = None\n else:\n index = context[1][0].index\n\n return pd.DataFrame(array, index=index, columns=self.columns)\n\n @property\n def axes(self):\n return [self.index, self.columns]\n\n @property\n def columns(self):\n return self._meta.columns\n\n @columns.setter\n def columns(self, columns):\n renamed = _rename_dask(self, columns)\n self._meta = renamed._meta\n self._name = renamed._name\n self.dask = renamed.dask", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.assign_DataFrame.assign.return.data": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.assign_DataFrame.assign.return.data", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4581, "end_line": 4618, "span_ids": ["DataFrame.assign"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def assign(self, **kwargs):\n data = self.copy()\n for k, v in kwargs.items():\n if not (\n isinstance(v, Scalar)\n or is_series_like(v)\n or callable(v)\n or pd.api.types.is_scalar(v)\n or is_index_like(v)\n or isinstance(v, Array)\n ):\n raise TypeError(\n f\"Column assignment doesn't support type {typename(type(v))}\"\n )\n if callable(v):\n kwargs[k] = v(data)\n if isinstance(v, Array):\n from .io import from_dask_array\n\n if len(v.shape) > 1:\n raise ValueError(\"Array assignment only supports 1-D arrays\")\n if v.npartitions != data.npartitions:\n raise ValueError(\n \"Number of partitions do not match \"\n f\"({v.npartitions} != {data.npartitions})\"\n )\n kwargs[k] = from_dask_array(v, index=data.index, meta=data._meta)\n\n pairs = [k, kwargs[k]]\n\n # Figure out columns of the output\n df2 = data._meta_nonempty.assign(\n **_extract_meta({k: kwargs[k]}, nonempty=True)\n )\n data = elemwise(methods.assign, data, *pairs, meta=df2)\n\n return data", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.eval_DataFrame.to_string.return.self__repr_data_to_stri": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.eval_DataFrame.to_string.return.self__repr_data_to_stri", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4649, "end_line": 4743, "span_ids": ["DataFrame.explode", "DataFrame.dropna", "DataFrame.clip", "DataFrame.to_parquet", "DataFrame.eval", "DataFrame.clip_upper", "DataFrame.to_orc", "DataFrame.squeeze", "DataFrame.to_bag", "DataFrame.to_timestamp", "DataFrame.to_string", "DataFrame.clip_lower"], "tokens": 752}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def eval(self, expr, inplace=None, **kwargs):\n if inplace is None:\n inplace = False\n if \"=\" in expr and inplace in (True, None):\n raise NotImplementedError(\n \"Inplace eval not supported. Please use inplace=False\"\n )\n meta = self._meta.eval(expr, inplace=inplace, **kwargs)\n return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)\n\n @derived_from(pd.DataFrame)\n def dropna(self, how=\"any\", subset=None, thresh=None):\n return self.map_partitions(\n M.dropna, how=how, subset=subset, thresh=thresh, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def clip(self, lower=None, upper=None, out=None):\n if out is not None:\n raise ValueError(\"'out' must be None\")\n return self.map_partitions(\n M.clip, lower=lower, upper=upper, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def clip_lower(self, threshold):\n return self.map_partitions(\n M.clip_lower, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def clip_upper(self, threshold):\n return self.map_partitions(\n M.clip_upper, threshold=threshold, enforce_metadata=False\n )\n\n @derived_from(pd.DataFrame)\n def squeeze(self, axis=None):\n if axis in [None, 1]:\n if len(self.columns) == 1:\n return self[self.columns[0]]\n else:\n return self\n\n elif axis == 0:\n raise NotImplementedError(\n f\"{type(self)} does not support squeeze along axis 0\"\n )\n\n elif axis not in [0, 1, None]:\n raise ValueError(f\"No axis {axis} for object type {type(self)}\")\n\n @derived_from(pd.DataFrame)\n def to_timestamp(self, freq=None, how=\"start\", axis=0):\n df = elemwise(M.to_timestamp, self, freq, how, axis)\n df.divisions = tuple(pd.Index(self.divisions).to_timestamp())\n return df\n\n @derived_from(pd.DataFrame)\n def explode(self, column):\n meta = self._meta.explode(column)\n return self.map_partitions(M.explode, column, meta=meta, enforce_metadata=False)\n\n def to_bag(self, index=False, format=\"tuple\"):\n \"\"\"Convert to a dask Bag of tuples of each row.\n\n Parameters\n ----------\n index : bool, optional\n If True, the index is included as the first element of each tuple.\n Default is False.\n format : {\"tuple\", \"dict\"},optional\n Whether to return a bag of tuples or dictionaries.\n \"\"\"\n from .io import to_bag\n\n return to_bag(self, index, format)\n\n def to_parquet(self, path, *args, **kwargs):\n \"\"\"See dd.to_parquet docstring for more information\"\"\"\n from .io import to_parquet\n\n return to_parquet(self, path, *args, **kwargs)\n\n def to_orc(self, path, *args, **kwargs):\n \"\"\"See dd.to_orc docstring for more information\"\"\"\n from .io import to_orc\n\n return to_orc(self, path, *args, **kwargs)\n\n @derived_from(pd.DataFrame)\n def to_string(self, max_rows=5):\n # option_context doesn't affect\n return self._repr_data().to_string(max_rows=max_rows, show_dimensions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._get_numeric_data_DataFrame._validate_axis.return._None_0_index_0_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame._get_numeric_data_DataFrame._validate_axis.return._None_0_index_0_co", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4745, "end_line": 4761, "span_ids": ["DataFrame._get_numeric_data", "DataFrame._validate_axis"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def _get_numeric_data(self, how=\"any\", subset=None):\n # calculate columns to avoid unnecessary calculation\n numerics = self._meta._get_numeric_data()\n\n if len(numerics.columns) < len(self.columns):\n name = self._token_prefix + \"-get_numeric_data\"\n return self.map_partitions(M._get_numeric_data, meta=numerics, token=name)\n else:\n # use myself if all numerics\n return self\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, 1, \"index\", \"columns\", None):\n raise ValueError(f\"No axis named {axis}\")\n # convert to numeric axis\n return {None: 0, \"index\": 0, \"columns\": 1}.get(axis, axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.applymap_DataFrame.nunique.if_axis_1_.else_.return.Series_graph_name_self_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.applymap_DataFrame.nunique.if_axis_1_.else_.return.Series_graph_name_self_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5135, "end_line": 5173, "span_ids": ["DataFrame.round", "DataFrame.applymap", "DataFrame.nunique"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def applymap(self, func, meta=\"__no_default__\"):\n return elemwise(M.applymap, self, func, meta=meta)\n\n @derived_from(pd.DataFrame)\n def round(self, decimals=0):\n return elemwise(M.round, self, decimals)\n\n @derived_from(pd.DataFrame)\n def nunique(self, split_every=False, dropna=True, axis=0):\n if axis == 1:\n # split_every not used for axis=1\n meta = self._meta_nonempty.nunique(axis=axis)\n return self.map_partitions(\n M.nunique,\n meta=meta,\n token=\"series-nunique\",\n axis=axis,\n dropna=dropna,\n enforce_metadata=False,\n )\n else:\n nunique_list = [\n self[col].nunique(split_every=split_every, dropna=dropna)\n for col in self.columns\n ]\n name = \"series-\" + tokenize(*nunique_list)\n dsk = {\n (name, 0): (\n apply,\n pd.Series,\n [[(s._name, 0) for s in nunique_list]],\n {\"index\": self.columns},\n )\n }\n graph = HighLevelGraph.from_collections(\n name, dsk, dependencies=nunique_list\n )\n return Series(graph, name, self._meta.nunique(), (None, None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.mode_DataFrame.corr.return.cov_corr_self_min_period": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.mode_DataFrame.corr.return.cov_corr_self_min_period", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5346, "end_line": 5383, "span_ids": ["DataFrame.mode", "DataFrame.corr", "DataFrame.cov"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n @derived_from(pd.DataFrame)\n def mode(self, dropna=True, split_every=False):\n mode_series_list = []\n for col_index in range(len(self.columns)):\n col_series = self.iloc[:, col_index]\n mode_series = Series.mode(\n col_series, dropna=dropna, split_every=split_every\n )\n mode_series_list.append(mode_series)\n\n name = \"concat-\" + tokenize(*mode_series_list)\n\n dsk = {\n (name, 0): (\n apply,\n methods.concat,\n [[(df._name, 0) for df in mode_series_list]],\n {\"axis\": 1},\n )\n }\n\n meta = methods.concat([df._meta for df in mode_series_list], axis=1)\n graph = HighLevelGraph.from_collections(\n name, dsk, dependencies=mode_series_list\n )\n ddf = new_dd_object(graph, name, meta, divisions=(None, None))\n\n return ddf\n\n @derived_from(pd.DataFrame)\n def cov(self, min_periods=None, split_every=False):\n return cov_corr(self, min_periods, split_every=split_every)\n\n @derived_from(pd.DataFrame)\n def corr(self, method=\"pearson\", min_periods=None, split_every=False):\n if method != \"pearson\":\n raise NotImplementedError(\"Only Pearson correlation has been implemented\")\n return cov_corr(self, min_periods, True, split_every=split_every)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.to_records_DataFrame._repr_html_.return.get_template_dataframe_h": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_DataFrame.to_records_DataFrame._repr_html_.return.get_template_dataframe_h", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 5560, "end_line": 5599, "span_ids": ["DataFrame.to_html", "DataFrame._repr_data", "DataFrame._repr_html_", "DataFrame.to_records"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrame(_Frame):\n\n def to_records(self, index=False, lengths=None):\n from .io import to_records\n\n if lengths is True:\n lengths = tuple(self.map_partitions(len).compute())\n\n records = to_records(self)\n\n chunks = self._validate_chunks(records, lengths)\n records._chunks = (chunks[0],)\n\n return records\n\n @derived_from(pd.DataFrame)\n def to_html(self, max_rows=5):\n # pd.Series doesn't have html repr\n data = self._repr_data().to_html(max_rows=max_rows, show_dimensions=False)\n return get_template(\"dataframe.html.j2\").render(\n data=data, name=self._name, task=self.dask\n )\n\n def _repr_data(self):\n meta = self._meta\n index = self._repr_divisions\n cols = meta.columns\n if len(cols) == 0:\n series_df = pd.DataFrame([[]] * len(index), columns=cols, index=index)\n else:\n series_df = pd.concat(\n [_repr_data_series(s, index=index) for _, s in meta.items()], axis=1\n )\n return series_df\n\n def _repr_html_(self):\n data = self._repr_data().to_html(\n max_rows=5, show_dimensions=False, notebook=True\n )\n return get_template(\"dataframe.html.j2\").render(\n data=data, name=self._name, task=self.dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_to_datetime_to_datetime.return.map_partitions_pd_to_date": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_to_datetime_to_datetime.return.map_partitions_pd_to_date", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 7135, "end_line": 7151, "span_ids": ["to_datetime"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(pd.to_datetime)\ndef to_datetime(arg, meta=None, **kwargs):\n tz_kwarg = {\"tz\": \"utc\"} if kwargs.get(\"utc\") else {}\n if meta is None:\n if isinstance(arg, Index):\n meta = pd.DatetimeIndex([], **tz_kwarg)\n meta.name = arg.name\n elif not (is_dataframe_like(arg) or is_series_like(arg)):\n raise NotImplementedError(\n \"dask.dataframe.to_datetime does not support \"\n \"non-index-able arguments (like scalars)\"\n )\n else:\n meta = pd.Series([pd.Timestamp(\"2000\", **tz_kwarg)])\n meta.index = meta.index.astype(arg.index.dtype)\n meta.index.name = arg.index.name\n return map_partitions(pd.to_datetime, arg, meta=meta, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_to_timedelta_has_parallel_type.return.get_parallel_type_x_is_n": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_to_timedelta_has_parallel_type.return.get_parallel_type_x_is_n", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 7154, "end_line": 7182, "span_ids": ["impl:24", "to_timedelta", "_repr_data_series", "has_parallel_type"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@wraps(pd.to_timedelta)\ndef to_timedelta(arg, unit=\"ns\", errors=\"raise\"):\n meta = pd.Series([pd.Timedelta(1, unit=unit)])\n return map_partitions(pd.to_timedelta, arg, unit=unit, errors=errors, meta=meta)\n\n\nif hasattr(pd, \"isna\"):\n\n @wraps(pd.isna)\n def isna(arg):\n return map_partitions(pd.isna, arg)\n\n\ndef _repr_data_series(s, index):\n \"\"\"A helper for creating the ``_repr_data`` property\"\"\"\n npartitions = len(index) - 1\n if is_categorical_dtype(s):\n if has_known_categories(s):\n dtype = \"category[known]\"\n else:\n dtype = \"category[unknown]\"\n else:\n dtype = str(s.dtype)\n return pd.Series([dtype] + [\"...\"] * npartitions, index=index, name=s.name)\n\n\ndef has_parallel_type(x):\n \"\"\"Does this object have a dask dataframe equivalent?\"\"\"\n return get_parallel_type(x) is not Scalar", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_collections__determine_levels.if_isinstance_by_tuple_.else_.return.0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_collections__determine_levels.if_isinstance_by_tuple_.else_.return.0", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 72, "span_ids": ["imports", "_determine_levels"], "tokens": 553}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import collections\nimport itertools as it\nimport operator\nimport warnings\nfrom numbers import Integral\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import M, _deprecated, derived_from, funcname, itemgetter\nfrom .core import (\n DataFrame,\n Series,\n _extract_meta,\n aca,\n map_partitions,\n new_dd_object,\n no_default,\n split_out_on_index,\n)\nfrom .methods import concat, drop_columns\nfrom .shuffle import shuffle\nfrom .utils import (\n PANDAS_GT_110,\n insert_meta_param_description,\n is_dataframe_like,\n is_series_like,\n make_meta,\n raise_on_meta_error,\n)\n\n# #############################################\n#\n# GroupBy implementation notes\n#\n# Dask groupby supports reductions, i.e., mean, sum and alike, and apply. The\n# former do not shuffle the data and are efficiently implemented as tree\n# reductions. The latter is implemented by shuffling the underlying partitions\n# such that all items of a group can be found in the same partition.\n#\n# The argument to ``.groupby`` (``by``), can be a ``str``, ``dd.DataFrame``,\n# ``dd.Series``, or a list thereof. In operations on the grouped object, the\n# divisions of the the grouped object and the items of ``by`` have to align.\n# Currently, there is no support to shuffle the ``by`` values as part of the\n# groupby operation. Therefore, the alignment has to be guaranteed by the\n# caller.\n#\n# To operate on matching partitions, most groupby operations exploit the\n# corresponding support in ``apply_concat_apply``. Specifically, this function\n# operates on matching partitions of frame-like objects passed as varargs.\n#\n# After the initial chunk step, ``by``` is implicitly passed along to\n# subsequent operations as the index of the partitions. Groupby operations on\n# the individual partitions can then access ``by`` via the ``levels``\n# parameter of the ``groupby`` function. The correct argument is determined by\n# the ``_determine_levels`` function.\n#\n# To minimize overhead, any ``by`` that is a series contained within the\n# dataframe is passed as a columnn key. This transformation is implemented as\n# ``_normalize_by``.\n#\n# #############################################\n\n\ndef _determine_levels(by):\n \"\"\"Determine the correct levels argument to groupby.\"\"\"\n if isinstance(by, (tuple, list)) and len(by) > 1:\n return list(range(len(by)))\n else:\n return 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_by__normalize_by.if_not_isinstance_df_Dat.else_.return.by": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__normalize_by__normalize_by.if_not_isinstance_df_Dat.else_.return.by", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 75, "end_line": 94, "span_ids": ["_normalize_by"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _normalize_by(df, by):\n \"\"\"Replace series with column names wherever possible.\"\"\"\n if not isinstance(df, DataFrame):\n return by\n\n elif isinstance(by, list):\n return [_normalize_by(df, col) for col in by]\n\n elif is_series_like(by) and by.name in df.columns and by._name == df[by.name]._name:\n return by.name\n\n elif (\n isinstance(by, DataFrame)\n and set(by.columns).issubset(df.columns)\n and by._name == df[by.columns]._name\n ):\n return list(by.columns)\n\n else:\n return by", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_shift__groupby_slice_shift.return.g_shift_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_slice_shift__groupby_slice_shift.return.g_shift_kwargs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 191, "end_line": 203, "span_ids": ["_groupby_slice_shift"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_slice_shift(\n df, grouper, key, shuffled, group_keys=True, dropna=None, observed=None, **kwargs\n):\n # No need to use raise if unaligned here - this is only called after\n # shuffling, which makes everything aligned already\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n observed = {\"observed\": observed} if observed is not None else {}\n if shuffled:\n df = df.sort_index()\n g = df.groupby(grouper, group_keys=group_keys, **observed, **dropna)\n if key:\n g = g[key]\n return g.shift(**kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_aggregate__apply_chunk.if_is_series_like_df_or_.else_.return.func_g_columns_kwargs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_aggregate__apply_chunk.if_is_series_like_df_or_.else_.return.func_g_columns_kwargs", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 287, "end_line": 309, "span_ids": ["_apply_chunk", "_groupby_aggregate"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_aggregate(\n df, aggfunc=None, levels=None, dropna=None, sort=False, observed=None, **kwargs\n):\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n observed = {\"observed\": observed} if observed is not None else {}\n\n grouped = df.groupby(level=levels, sort=sort, **observed, **dropna)\n return aggfunc(grouped, **kwargs)\n\n\ndef _apply_chunk(df, *by, dropna=None, observed=None, **kwargs):\n func = kwargs.pop(\"chunk\")\n columns = kwargs.pop(\"columns\")\n dropna = {\"dropna\": dropna} if dropna is not None else {}\n observed = {\"observed\": observed} if observed is not None else {}\n\n g = _groupby_raise_unaligned(df, by=by, **observed, **dropna)\n if is_series_like(df) or columns is None:\n return func(g, **kwargs)\n else:\n if isinstance(columns, (tuple, list, set, pd.Index)):\n columns = list(columns)\n return func(g[columns], **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__mul_cols__mul_cols.return._df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__mul_cols__mul_cols.return._df", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 394, "end_line": 409, "span_ids": ["_mul_cols"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _mul_cols(df, cols):\n \"\"\"Internal function to be used with apply to multiply\n each column in a dataframe by every other column\n\n a b c -> a*a, a*b, b*b, b*c, c*c\n \"\"\"\n _df = df.__class__()\n for i, j in it.combinations_with_replacement(cols, 2):\n col = f\"{i}{j}\"\n _df[col] = df[i] * df[j]\n\n # Fix index in a groupby().apply() context\n # https://github.com/dask/dask/issues/8137\n # https://github.com/pandas-dev/pandas/issues/43568\n _df.index = [0] * len(_df)\n return _df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_chunk__cov_chunk.return._x_mul_n_col_mapping_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__cov_chunk__cov_chunk.return._x_mul_n_col_mapping_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 412, "end_line": 450, "span_ids": ["_cov_chunk"], "tokens": 296}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cov_chunk(df, *by):\n \"\"\"Covariance Chunk Logic\n\n Parameters\n ----------\n df : Pandas.DataFrame\n std : bool, optional\n When std=True we are calculating with Correlation\n\n Returns\n -------\n tuple\n Processed X, Multiplied Cols,\n \"\"\"\n if is_series_like(df):\n df = df.to_frame()\n df = df.copy()\n\n # mapping columns to str(numerical) values allows us to easily handle\n # arbitrary column names (numbers, string, empty strings)\n col_mapping = collections.OrderedDict()\n for i, c in enumerate(df.columns):\n col_mapping[c] = str(i)\n df = df.rename(columns=col_mapping)\n cols = df._get_numeric_data().columns\n\n # when grouping by external series don't exclude columns\n is_mask = any(is_series_like(s) for s in by)\n if not is_mask:\n by = [col_mapping[k] for k in by]\n cols = cols.drop(np.array(by))\n\n g = _groupby_raise_unaligned(df, by=by)\n x = g.sum()\n\n mul = g.apply(_mul_cols, cols=cols).reset_index(level=-1, drop=True)\n\n n = g[x.columns].count().rename(columns=lambda c: f\"{c}-count\")\n return (x, mul, n, col_mapping)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__nunique_df_combine__make_agg_id.return.f_func_s_column_s_to": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__nunique_df_combine__make_agg_id.return.f_func_s_column_s_to", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 539, "end_line": 575, "span_ids": ["_nunique_series_chunk", "_nunique_df_combine", "_make_agg_id", "_nunique_df_aggregate"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _nunique_df_combine(df, levels, sort=False):\n result = (\n df.groupby(level=levels, sort=sort)\n .apply(_drop_duplicates_reindex)\n .reset_index(level=-1, drop=True)\n )\n return result\n\n\ndef _nunique_df_aggregate(df, levels, name, sort=False):\n return df.groupby(level=levels, sort=sort)[name].nunique()\n\n\ndef _nunique_series_chunk(df, *by, **_ignored_):\n # convert series to data frame, then hand over to dataframe code path\n assert is_series_like(df)\n\n df = df.to_frame()\n kwargs = dict(name=df.columns[0], levels=_determine_levels(by))\n return _nunique_df_chunk(df, *by, **kwargs)\n\n\n###############################################################\n# Aggregate support\n#\n# Aggregate is implemented as:\n#\n# 1. group-by-aggregate all partitions into intermediate values\n# 2. collect all partitions into a single partition\n# 3. group-by-aggregate the result into intermediate values\n# 4. transform all intermediate values into the result\n#\n# In Step 1 and 3 the dataframe is grouped on the same columns.\n#\n###############################################################\ndef _make_agg_id(func, column):\n return f\"{func!s}-{column!s}-{tokenize(func, column)}\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_single__build_agg_args_single.if_func_in_simple_impl_ke.else_.raise_ValueError_f_unknow": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__build_agg_args_single__build_agg_args_single.if_func_in_simple_impl_ke.else_.raise_ValueError_f_unknow", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 723, "end_line": 756, "span_ids": ["_build_agg_args_single"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _build_agg_args_single(result_column, func, input_column):\n simple_impl = {\n \"sum\": (M.sum, M.sum),\n \"min\": (M.min, M.min),\n \"max\": (M.max, M.max),\n \"count\": (M.count, M.sum),\n \"size\": (M.size, M.sum),\n \"first\": (M.first, M.first),\n \"last\": (M.last, M.last),\n \"prod\": (M.prod, M.prod),\n }\n\n if func in simple_impl.keys():\n return _build_agg_args_simple(\n result_column, func, input_column, simple_impl[func]\n )\n\n elif func == \"var\":\n return _build_agg_args_var(result_column, func, input_column)\n\n elif func == \"std\":\n return _build_agg_args_std(result_column, func, input_column)\n\n elif func == \"mean\":\n return _build_agg_args_mean(result_column, func, input_column)\n\n elif func == \"list\":\n return _build_agg_args_list(result_column, func, input_column)\n\n elif isinstance(func, Aggregation):\n return _build_agg_args_custom(result_column, func, input_column)\n\n else:\n raise ValueError(f\"unknown aggregate {func}\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_apply_funcs__groupby_apply_funcs.if_is_dataframe_like_df_.else_.return.df_head_0_to_frame___c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__groupby_apply_funcs__groupby_apply_funcs.if_is_dataframe_like_df_.else_.return.df_head_0_to_frame___c", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 886, "end_line": 933, "span_ids": ["_groupby_apply_funcs"], "tokens": 341}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _groupby_apply_funcs(df, *by, **kwargs):\n \"\"\"\n Group a dataframe and apply multiple aggregation functions.\n\n Parameters\n ----------\n df: pandas.DataFrame\n The dataframe to work on.\n by: list of groupers\n If given, they are added to the keyword arguments as the ``by``\n argument.\n funcs: list of result-colum, function, keywordargument triples\n The list of functions that are applied on the grouped data frame.\n Has to be passed as a keyword argument.\n kwargs:\n All keyword arguments, but ``funcs``, are passed verbatim to the groupby\n operation of the dataframe\n\n Returns\n -------\n aggregated:\n the aggregated dataframe.\n \"\"\"\n if len(by):\n # since we're coming through apply, `by` will be a tuple.\n # Pandas treats tuples as a single key, and lists as multiple keys\n # We want multiple keys\n kwargs.update(by=list(by))\n\n funcs = kwargs.pop(\"funcs\")\n grouped = _groupby_raise_unaligned(df, **kwargs)\n\n result = collections.OrderedDict()\n for result_column, func, func_kwargs in funcs:\n r = func(grouped, **func_kwargs)\n\n if isinstance(r, tuple):\n for idx, s in enumerate(r):\n result[f\"{result_column}-{idx}\"] = s\n\n else:\n result[result_column] = r\n\n if is_dataframe_like(df):\n return df.__class__(result)\n else:\n # Get the DataFrame type of this Series object\n return df.head(0).to_frame().__class__(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.shift__GroupBy.shift.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.shift__GroupBy.shift.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1836, "end_line": 1924, "span_ids": ["_GroupBy.shift"], "tokens": 667}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n @insert_meta_param_description(pad=12)\n def shift(self, periods=1, freq=None, axis=0, fill_value=None, meta=no_default):\n \"\"\"Parallel version of pandas GroupBy.shift\n\n This mimics the pandas version except for the following:\n\n If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n\n Parameters\n ----------\n periods : Delayed, Scalar or int, default 1\n Number of periods to shift.\n freq : Delayed, Scalar or str, optional\n Frequency string.\n axis : axis to shift, default 0\n Shift direction.\n fill_value : Scalar, Delayed or object, optional\n The scalar value to use for newly introduced missing values.\n $META\n\n Returns\n -------\n shifted : Series or DataFrame shifted within each group.\n\n Examples\n --------\n >>> import dask\n >>> ddf = dask.datasets.timeseries(freq=\"1H\")\n >>> result = ddf.groupby(\"name\").shift(1, meta={\"id\": int, \"x\": float, \"y\": float})\n \"\"\"\n if meta is no_default:\n with raise_on_meta_error(\"groupby.shift()\", udf=False):\n meta_kwargs = _extract_meta(\n {\n \"periods\": periods,\n \"freq\": freq,\n \"axis\": axis,\n \"fill_value\": fill_value,\n },\n nonempty=True,\n )\n meta = self._meta_nonempty.shift(**meta_kwargs)\n\n msg = (\n \"`meta` is not specified, inferred from partial data. \"\n \"Please provide `meta` if the result is unexpected.\\n\"\n \" Before: .shift(1)\\n\"\n \" After: .shift(1, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\\n\"\n \" or: .shift(1, meta=('x', 'f8')) for series result\"\n )\n warnings.warn(msg, stacklevel=2)\n\n meta = make_meta(meta, parent_meta=self._meta.obj)\n\n # Validate self.by\n if isinstance(self.by, list) and any(\n isinstance(item, Series) for item in self.by\n ):\n raise NotImplementedError(\n \"groupby-shift with a multiple Series is currently not supported\"\n )\n df = self.obj\n should_shuffle = not (df.known_divisions and df._contains_index_name(self.by))\n\n if should_shuffle:\n df2, by = self._shuffle(meta)\n else:\n df2 = df\n by = self.by\n\n # Perform embarrassingly parallel groupby-shift\n result = map_partitions(\n _groupby_slice_shift,\n df2,\n by,\n self._slice,\n should_shuffle,\n periods=periods,\n freq=freq,\n axis=axis,\n fill_value=fill_value,\n token=\"groupby-shift\",\n group_keys=self.group_keys,\n meta=meta,\n **self.observed,\n **self.dropna,\n )\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.rolling__GroupBy.rolling.return.RollingGroupby_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.rolling__GroupBy.rolling.return.RollingGroupby_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1905, "end_line": 1964, "span_ids": ["_GroupBy.rolling"], "tokens": 471}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n def rolling(self, window, min_periods=None, center=False, win_type=None, axis=0):\n \"\"\"Provides rolling transformations.\n\n .. note::\n\n Since MultiIndexes are not well supported in Dask, this method returns a\n dataframe with the same index as the original data. The groupby column is\n not added as the first level of the index like pandas does.\n\n This method works differently from other groupby methods. It does a groupby\n on each partition (plus some overlap). This means that the output has the\n same shape and number of partitions as the original.\n\n Parameters\n ----------\n window : str, offset\n Size of the moving window. This is the number of observations used\n for calculating the statistic. Data must have a ``DatetimeIndex``\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n center : boolean, default False\n Set the labels at the center of the window.\n win_type : string, default None\n Provide a window type. The recognized window types are identical\n to pandas.\n axis : int, default 0\n\n Returns\n -------\n a Rolling object on which to call a method to compute a statistic\n\n Examples\n --------\n >>> import dask\n >>> ddf = dask.datasets.timeseries(freq=\"1H\")\n >>> result = ddf.groupby(\"name\").x.rolling('1D').max()\n \"\"\"\n from dask.dataframe.rolling import RollingGroupby\n\n if isinstance(window, Integral):\n raise ValueError(\n \"Only time indexes are supported for rolling groupbys in dask dataframe. \"\n \"``window`` must be a ``freq`` (e.g. '1H').\"\n )\n\n if min_periods is not None:\n if not isinstance(min_periods, Integral):\n raise ValueError(\"min_periods must be an integer\")\n if min_periods < 0:\n raise ValueError(\"min_periods must be >= 0\")\n\n return RollingGroupby(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n axis=axis,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.tail_SeriesGroupBy.tail.return.self__aca_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.tail_SeriesGroupBy.tail.return.self__aca_agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2111, "end_line": 2123, "span_ids": ["SeriesGroupBy.tail"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SeriesGroupBy(_GroupBy):\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def tail(self, n=5, split_every=None, split_out=1):\n index_levels = len(self.by) if isinstance(self.by, list) else 1\n return self._aca_agg(\n token=\"tail\",\n func=_tail_chunk,\n aggfunc=_tail_aggregate,\n meta=M.tail(self._meta_nonempty),\n chunk_kwargs={\"n\": n},\n aggregate_kwargs={\"n\": n, \"index_levels\": index_levels},\n split_every=split_every,\n split_out=split_out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.head_SeriesGroupBy.head.return.self__aca_agg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py_SeriesGroupBy.head_SeriesGroupBy.head.return.self__aca_agg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2125, "end_line": 2137, "span_ids": ["SeriesGroupBy.head"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SeriesGroupBy(_GroupBy):\n\n @derived_from(pd.core.groupby.SeriesGroupBy)\n def head(self, n=5, split_every=None, split_out=1):\n index_levels = len(self.by) if isinstance(self.by, list) else 1\n return self._aca_agg(\n token=\"head\",\n func=_head_chunk,\n aggfunc=_head_aggregate,\n meta=M.head(self._meta_nonempty),\n chunk_kwargs={\"n\": n},\n aggregate_kwargs={\"n\": n, \"index_levels\": index_levels},\n split_every=split_every,\n split_out=split_out,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_os_clear_known_categories": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_os_clear_known_categories", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 36, "span_ids": ["imports"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nfrom collections.abc import Mapping\nfrom io import BytesIO\nfrom warnings import catch_warnings, simplefilter, warn\n\nfrom ...highlevelgraph import HighLevelGraph\nfrom ...layers import DataFrameIOLayer\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nimport fsspec.implementations.local\nimport numpy as np\nimport pandas as pd\nfrom fsspec.compression import compr\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.core import open as open_file\nfrom fsspec.core import open_files\nfrom fsspec.utils import infer_compression\nfrom pandas.api.types import (\n CategoricalDtype,\n is_datetime64_any_dtype,\n is_float_dtype,\n is_integer_dtype,\n is_object_dtype,\n)\n\nfrom ...base import tokenize\nfrom ...bytes import read_bytes\nfrom ...core import flatten\nfrom ...delayed import delayed\nfrom ...utils import asciitable, parse_bytes\nfrom ..core import new_dd_object\nfrom ..utils import clear_known_categories", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas.if_blocksize_and_sample_a_read_pandas.try_.except_pd_errors_ParserEr.raise": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas.if_blocksize_and_sample_a_read_pandas.try_.except_pd_errors_ParserEr.raise", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 538, "end_line": 619, "span_ids": ["read_pandas"], "tokens": 816}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_pandas(\n reader,\n urlpath,\n blocksize=\"default\",\n lineterminator=None,\n compression=\"infer\",\n sample=256000,\n sample_rows=10,\n enforce=False,\n assume_missing=False,\n storage_options=None,\n include_path_column=False,\n **kwargs,\n):\n # ... other code\n if blocksize and sample and blocksize < sample and lastskiprow != 0:\n warn(\n \"Unexpected behavior can result from passing skiprows when\\n\"\n \"blocksize is smaller than sample size.\\n\"\n \"Setting ``sample=blocksize``\"\n )\n sample = blocksize\n b_lineterminator = lineterminator.encode()\n b_out = read_bytes(\n urlpath,\n delimiter=b_lineterminator,\n blocksize=blocksize,\n sample=sample,\n compression=compression,\n include_path=include_path_column,\n **(storage_options or {}),\n )\n\n if include_path_column:\n b_sample, values, paths = b_out\n path = (include_path_column, path_converter)\n else:\n b_sample, values = b_out\n path = None\n\n if not isinstance(values[0], (tuple, list)):\n values = [values]\n # If we have not sampled, then use the first row of the first values\n # as a representative sample.\n if b_sample is False and len(values[0]):\n b_sample = values[0][0].compute()\n\n # Get header row, and check that sample is long enough. If the file\n # contains a header row, we need at least 2 nonempty rows + the number of\n # rows to skip.\n names = kwargs.get(\"names\", None)\n header = kwargs.get(\"header\", \"infer\" if names is None else None)\n need = 1 if header is None else 2\n\n if kwargs.get(\"comment\"):\n # if comment is provided, step through lines of b_sample and strip out comments\n parts = []\n for part in b_sample.split(b_lineterminator):\n split_comment = part.decode().split(kwargs.get(\"comment\"))\n if len(split_comment) > 1:\n # if line starts with comment, don't include that line in parts.\n if len(split_comment[0]) > 0:\n parts.append(split_comment[0].strip().encode())\n else:\n parts.append(part)\n if len(parts) > need:\n break\n else:\n parts = b_sample.split(b_lineterminator, lastskiprow + need)\n\n # If the last partition is empty, don't count it\n nparts = 0 if not parts else len(parts) - int(not parts[-1])\n\n if sample is not False and nparts < lastskiprow + need and len(b_sample) >= sample:\n raise ValueError(\n \"Sample is not large enough to include at least one \"\n \"row of data. Please increase the number of bytes \"\n \"in `sample` in the call to `read_csv`/`read_table`\"\n )\n\n if isinstance(header, int):\n firstrow += header\n header = b\"\" if header is None else parts[firstrow] + b_lineterminator\n\n # Use sample to infer dtypes and check for presence of include_path_column\n head_kwargs = kwargs.copy()\n head_kwargs.pop(\"skipfooter\", None)\n try:\n head = reader(BytesIO(b_sample), nrows=sample_rows, **head_kwargs)\n except pd.errors.ParserError as e:\n if \"EOF\" in str(e):\n raise ValueError(\n \"EOF encountered while reading header. \\n\"\n \"Pass argument `sample_rows` and make sure the value of `sample` \"\n \"is large enough to accommodate that many rows of data\"\n ) from e\n raise\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas.None_20_read_pandas.return.text_blocks_to_pandas_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_read_pandas.None_20_read_pandas.return.text_blocks_to_pandas_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 620, "end_line": 650, "span_ids": ["read_pandas"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_pandas(\n reader,\n urlpath,\n blocksize=\"default\",\n lineterminator=None,\n compression=\"infer\",\n sample=256000,\n sample_rows=10,\n enforce=False,\n assume_missing=False,\n storage_options=None,\n include_path_column=False,\n **kwargs,\n):\n # ... other code\n if include_path_column and (include_path_column in head.columns):\n raise ValueError(\n \"Files already contain the column name: %s, so the \"\n \"path column cannot use this name. Please set \"\n \"`include_path_column` to a unique name.\" % include_path_column\n )\n\n specified_dtypes = kwargs.get(\"dtype\", {})\n if specified_dtypes is None:\n specified_dtypes = {}\n # If specified_dtypes is a single type, then all columns were specified\n if assume_missing and isinstance(specified_dtypes, dict):\n # Convert all non-specified integer columns to floats\n for c in head.columns:\n if is_integer_dtype(head[c].dtype) and c not in specified_dtypes:\n head[c] = head[c].astype(float)\n\n values = [[list(dsk.dask.values()) for dsk in block] for block in values]\n\n return text_blocks_to_pandas(\n reader,\n values,\n header,\n head,\n kwargs,\n enforce=enforce,\n specified_dtypes=specified_dtypes,\n path=path,\n blocksize=blocksize,\n urlpath=urlpath,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_make_reader__write_csv.return.os_path_normpath_fil_path": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/csv.py_make_reader__write_csv.return.os_path_normpath_fil_path", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/csv.py", "file_name": "csv.py", "file_type": "text/x-python", "category": "implementation", "start_line": 728, "end_line": 770, "span_ids": ["impl:9", "make_reader", "_write_csv"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_reader(reader, reader_name, file_type):\n def read(\n urlpath,\n blocksize=\"default\",\n lineterminator=None,\n compression=\"infer\",\n sample=256000,\n sample_rows=10,\n enforce=False,\n assume_missing=False,\n storage_options=None,\n include_path_column=False,\n **kwargs,\n ):\n return read_pandas(\n reader,\n urlpath,\n blocksize=blocksize,\n lineterminator=lineterminator,\n compression=compression,\n sample=sample,\n sample_rows=sample_rows,\n enforce=enforce,\n assume_missing=assume_missing,\n storage_options=storage_options,\n include_path_column=include_path_column,\n **kwargs,\n )\n\n read.__doc__ = READ_DOC_TEMPLATE.format(reader=reader_name, file_type=file_type)\n read.__name__ = reader_name\n return read\n\n\nread_csv = make_reader(pd.read_csv, \"read_csv\", \"CSV\")\nread_table = make_reader(pd.read_table, \"read_table\", \"delimited\")\nread_fwf = make_reader(pd.read_fwf, \"read_fwf\", \"fixed-width\")\n\n\ndef _write_csv(df, fil, *, depend_on=None, **kwargs):\n with fil as f:\n df.to_csv(f, **kwargs)\n return os.path.normpath(fil.path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/demo.py_make_timeseries_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/demo.py", "file_name": "demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 118, "end_line": 189, "span_ids": ["make_timeseries"], "tokens": 730}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_timeseries(\n start=\"2000-01-01\",\n end=\"2000-12-31\",\n dtypes={\"name\": str, \"id\": int, \"x\": float, \"y\": float},\n freq=\"10s\",\n partition_freq=\"1M\",\n seed=None,\n **kwargs,\n):\n \"\"\"Create timeseries dataframe with random data\n\n Parameters\n ----------\n start: datetime (or datetime-like string)\n Start of time series\n end: datetime (or datetime-like string)\n End of time series\n dtypes: dict\n Mapping of column names to types.\n Valid types include {float, int, str, 'category'}\n freq: string\n String like '2s' or '1H' or '12W' for the time series frequency\n partition_freq: string\n String like '1M' or '2Y' to divide the dataframe into partitions\n seed: int (optional)\n Randomstate seed\n kwargs:\n Keywords to pass down to individual column creation functions.\n Keywords should be prefixed by the column name and then an underscore.\n\n Examples\n --------\n >>> import dask.dataframe as dd\n >>> df = dd.demo.make_timeseries('2000', '2010',\n ... {'value': float, 'name': str, 'id': int},\n ... freq='2H', partition_freq='1D', seed=1)\n >>> df.head() # doctest: +SKIP\n id name value\n 2000-01-01 00:00:00 969 Jerry -0.309014\n 2000-01-01 02:00:00 1010 Ray -0.760675\n 2000-01-01 04:00:00 1016 Patricia -0.063261\n 2000-01-01 06:00:00 960 Charlie 0.788245\n 2000-01-01 08:00:00 1031 Kevin 0.466002\n \"\"\"\n divisions = list(pd.date_range(start=start, end=end, freq=partition_freq))\n npartitions = len(divisions) - 1\n if seed is None:\n # Get random integer seed for each partition. We can\n # call `random_state_data` in `MakeTimeseriesPart`\n state_data = np.random.randint(2e9, size=npartitions)\n else:\n state_data = random_state_data(npartitions, seed)\n label = \"make-timeseries-\"\n name = label + tokenize(start, end, dtypes, freq, partition_freq, state_data)\n\n # Build parts\n parts = []\n for i in range(len(divisions) - 1):\n parts.append((divisions[i : i + 2], state_data[i]))\n\n # Construct Layer and Collection\n layer = DataFrameIOLayer(\n name=name,\n columns=None,\n inputs=parts,\n io_func=MakeTimeseriesPart(dtypes, freq, kwargs),\n label=label,\n )\n graph = HighLevelGraph({name: layer}, {name: set()})\n head = make_timeseries_part(\"2000\", \"2000\", dtypes, \"1H\", state_data[0], kwargs)\n return DataFrame(graph, name, head, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array.for_i_chunk_ind_in_en__link.return.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py_from_dask_array.for_i_chunk_ind_in_en__link.return.None", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 478, "end_line": 496, "span_ids": ["_link", "from_dask_array"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def from_dask_array(x, columns=None, index=None, meta=None):\n # ... other code\n for i, (chunk, ind) in enumerate(zip(x.__dask_keys__(), index)):\n if x.ndim == 2:\n chunk = chunk[0]\n if is_series_like(meta):\n dsk[name, i] = (type(meta), chunk, ind, x.dtype, meta.name)\n else:\n dsk[name, i] = (type(meta), chunk, ind, meta.columns)\n\n to_merge.extend([ensure_dict(x.dask), dsk])\n return new_dd_object(merge(*to_merge), name, meta, divisions)\n\n\ndef _link(token, result):\n \"\"\"A dummy function to link results together in a graph\n\n We use this to enforce an artificial sequential ordering on tasks that\n don't explicitly pass around a shared resource\n \"\"\"\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py__df_to_bag__df_to_bag.if_isinstance_df_pd_Data.elif_isinstance_df_pd_Se.if_format_tuple_.elif_format_dict_.return.df_to_frame_to_dict_ori": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/io.py__df_to_bag__df_to_bag.if_isinstance_df_pd_Data.elif_isinstance_df_pd_Se.if_format_tuple_.elif_format_dict_.return.df_to_frame_to_dict_ori", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/io.py", "file_name": "io.py", "file_type": "text/x-python", "category": "implementation", "start_line": 500, "end_line": 516, "span_ids": ["_df_to_bag"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _df_to_bag(df, index=False, format=\"tuple\"):\n if isinstance(df, pd.DataFrame):\n if format == \"tuple\":\n return list(map(tuple, df.itertuples(index)))\n elif format == \"dict\":\n if index:\n return [\n {**{\"index\": idx}, **values}\n for values, idx in zip(df.to_dict(\"records\"), df.index)\n ]\n else:\n return df.to_dict(orient=\"records\")\n elif isinstance(df, pd.Series):\n if format == \"tuple\":\n return list(df.items()) if index else list(df)\n elif format == \"dict\":\n return df.to_frame().to_dict(orient=\"records\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_io_write_json_partition.return.os_path_normpath_openfile": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_io_write_json_partition.return.os_path_normpath_openfile", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/json.py", "file_name": "json.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 103, "span_ids": ["to_json", "imports", "write_json_partition"], "tokens": 751}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import io\nimport os\nfrom itertools import zip_longest\n\nimport pandas as pd\nfrom fsspec.core import open_files\n\nfrom ...base import compute as dask_compute\nfrom ...bytes import read_bytes\nfrom ...core import flatten\nfrom ...delayed import delayed\nfrom ..utils import insert_meta_param_description, make_meta\nfrom .io import from_delayed\n\n\ndef to_json(\n df,\n url_path,\n orient=\"records\",\n lines=None,\n storage_options=None,\n compute=True,\n encoding=\"utf-8\",\n errors=\"strict\",\n compression=None,\n compute_kwargs=None,\n name_function=None,\n **kwargs,\n):\n \"\"\"Write dataframe into JSON text files\n\n This utilises ``pandas.DataFrame.to_json()``, and most parameters are\n passed through - see its docstring.\n\n Differences: orient is 'records' by default, with lines=True; this\n produces the kind of JSON output that is most common in big-data\n applications, and which can be chunked when reading (see ``read_json()``).\n\n Parameters\n ----------\n df: dask.DataFrame\n Data to save\n url_path: str, list of str\n Location to write to. If a string, and there are more than one\n partitions in df, should include a glob character to expand into a\n set of file names, or provide a ``name_function=`` parameter.\n Supports protocol specifications such as ``\"s3://\"``.\n encoding, errors:\n The text encoding to implement, e.g., \"utf-8\" and how to respond\n to errors in the conversion (see ``str.encode()``).\n orient, lines, kwargs\n passed to pandas; if not specified, lines=True when orient='records',\n False otherwise.\n storage_options: dict\n Passed to backend file-system implementation\n compute: bool\n If true, immediately executes. If False, returns a set of delayed\n objects, which can be computed at a later time.\n compute_kwargs : dict, optional\n Options to be passed in to the compute method\n encoding, errors:\n Text conversion, ``see str.encode()``\n compression : string or None\n String like 'gzip' or 'xz'.\n name_function : callable, default None\n Function accepting an integer (partition index) and producing a\n string to replace the asterisk in the given filename globstring.\n Should preserve the lexicographic order of partitions.\n \"\"\"\n if lines is None:\n lines = orient == \"records\"\n if orient != \"records\" and lines:\n raise ValueError(\n \"Line-delimited JSON is only available with\" 'orient=\"records\".'\n )\n kwargs[\"orient\"] = orient\n kwargs[\"lines\"] = lines and orient == \"records\"\n outfiles = open_files(\n url_path,\n \"wt\",\n encoding=encoding,\n errors=errors,\n name_function=name_function,\n num=df.npartitions,\n compression=compression,\n **(storage_options or {}),\n )\n parts = [\n delayed(write_json_partition)(d, outfile, kwargs)\n for outfile, d in zip(outfiles, df.to_delayed())\n ]\n if compute:\n if compute_kwargs is None:\n compute_kwargs = dict()\n return list(dask_compute(*parts, **compute_kwargs))\n else:\n return parts\n\n\ndef write_json_partition(df, openfile, kwargs):\n with openfile as f:\n df.to_json(f, **kwargs)\n return os.path.normpath(openfile.path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/__init__.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/__init__.py__", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["imports"], "tokens": 11}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .core import read_orc, to_orc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py_pa_ArrowORCEngine.read_metadata.return.parts_schema_meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py_pa_ArrowORCEngine.read_metadata.return.parts_schema_meta", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 78, "span_ids": ["imports", "ArrowORCEngine.read_metadata", "ArrowORCEngine"], "tokens": 538}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pyarrow as pa\nimport pyarrow.orc as orc\n\nfrom ..utils import _get_pyarrow_dtypes, _meta_from_dtypes\n\n\nclass ArrowORCEngine:\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n columns,\n index,\n split_stripes,\n aggregate_files,\n **kwargs,\n ):\n\n # Convert root directory to file list.\n # TODO: Handle hive-partitioned data\n if len(paths) == 1 and not fs.isfile(paths[0]):\n paths = fs.find(paths[0])\n\n schema = None\n parts = []\n\n def _get_schema(_o, schema):\n if schema is None:\n schema = _o.schema\n elif schema != _o.schema:\n raise ValueError(\"Incompatible schemas while parsing ORC files\")\n return schema\n\n if split_stripes:\n offset = 0\n for path in paths:\n with fs.open(path, \"rb\") as f:\n o = orc.ORCFile(f)\n if schema is None:\n schema = o.schema\n elif schema != o.schema:\n raise ValueError(\"Incompatible schemas while parsing ORC files\")\n _stripes = list(range(o.nstripes))\n if offset:\n parts.append([(path, _stripes[0:offset])])\n while offset < o.nstripes:\n parts.append(\n [(path, _stripes[offset : offset + int(split_stripes)])]\n )\n offset += int(split_stripes)\n if aggregate_files and int(split_stripes) > 1:\n offset -= o.nstripes\n else:\n offset = 0\n else:\n for path in paths:\n if schema is None:\n with fs.open(paths[0], \"rb\") as f:\n o = orc.ORCFile(f)\n schema = o.schema\n parts.append([(path, None)])\n\n schema = _get_pyarrow_dtypes(schema, categories=None)\n if columns is not None:\n ex = set(columns) - set(schema)\n if ex:\n raise ValueError(\n f\"Requested columns ({ex}) not in schema ({set(schema)})\"\n )\n\n # Check if we can aggregate adjacent parts together\n parts = cls._aggregate_files(aggregate_files, split_stripes, parts)\n\n columns = list(schema) if columns is None else columns\n index = [index] if isinstance(index, str) else index\n meta = _meta_from_dtypes(columns, schema, index, [])\n return parts, schema, meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py_ArrowORCEngine._aggregate_files_ArrowORCEngine._aggregate_files.if_aggregate_files_is_Tru.else_.return.parts": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py_ArrowORCEngine._aggregate_files_ArrowORCEngine._aggregate_files.if_aggregate_files_is_Tru.else_.return.parts", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 80, "end_line": 98, "span_ids": ["ArrowORCEngine._aggregate_files"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowORCEngine:\n\n @classmethod\n def _aggregate_files(cls, aggregate_files, split_stripes, parts):\n if aggregate_files is True and int(split_stripes) > 1 and len(parts) > 1:\n new_parts = []\n new_part = parts[0]\n nstripes = len(new_part[0][1])\n for part in parts[1:]:\n next_nstripes = len(part[0][1])\n if next_nstripes + nstripes <= split_stripes:\n new_part.append(part[0])\n nstripes += next_nstripes\n else:\n new_parts.append(new_part)\n new_part = part\n nstripes = next_nstripes\n new_parts.append(new_part)\n return new_parts\n else:\n return parts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py_ArrowORCEngine.read_partition_ArrowORCEngine.write_partition.with_fs_open_fs_sep_join_.orc_write_table_table_f_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py_ArrowORCEngine.read_partition_ArrowORCEngine.write_partition.with_fs_open_fs_sep_join_.orc_write_table_table_f_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 100, "end_line": 111, "span_ids": ["ArrowORCEngine.read_partition", "ArrowORCEngine.write_partition"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowORCEngine:\n\n @classmethod\n def read_partition(cls, fs, parts, schema, columns, **kwargs):\n batches = []\n for path, stripes in parts:\n batches += _read_orc_stripes(fs, path, stripes, schema, columns)\n return pa.Table.from_batches(batches).to_pandas(date_as_object=False)\n\n @classmethod\n def write_partition(cls, df, path, fs, filename, **kwargs):\n table = pa.Table.from_pandas(df)\n with fs.open(fs.sep.join([path, filename]), \"wb\") as f:\n orc.write_table(table, f)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py__read_orc_stripes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/arrow.py__read_orc_stripes_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 114, "end_line": 127, "span_ids": ["_read_orc_stripes"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _read_orc_stripes(fs, path, stripes, schema, columns):\n # Construct a list of RecordBatch objects.\n # Each ORC stripe will corresonpond to a single RecordBatch.\n if columns is None:\n columns = list(schema)\n\n batches = []\n with fs.open(path, \"rb\") as f:\n o = orc.ORCFile(f)\n _stripes = range(o.nstripes) if stripes is None else stripes\n for stripe in _stripes:\n batches.append(o.read_stripe(stripe, columns))\n return batches", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py_copy_ORCFunctionWrapper.__call__.return._df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py_copy_ORCFunctionWrapper.__call__.return._df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 47, "span_ids": ["ORCFunctionWrapper", "imports", "ORCFunctionWrapper.__init__", "ORCFunctionWrapper.project_columns", "ORCFunctionWrapper.__call__"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import copy\n\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.utils import stringify_path\nfrom packaging.version import parse as parse_version\n\nfrom ....base import compute_as_if_collection, tokenize\nfrom ....highlevelgraph import HighLevelGraph\nfrom ....layers import DataFrameIOLayer\nfrom ....utils import apply\nfrom ...core import DataFrame, Scalar, new_dd_object\nfrom .utils import ORCEngine\n\n\nclass ORCFunctionWrapper:\n \"\"\"\n ORC Function-Wrapper Class\n Reads ORC data from disk to produce a partition.\n \"\"\"\n\n def __init__(self, fs, columns, schema, engine, index):\n self.fs = fs\n self.columns = columns\n self.schema = schema\n self.engine = engine\n self.index = index\n\n def project_columns(self, columns):\n \"\"\"Return a new ORCFunctionWrapper object with\n a sub-column projection.\n \"\"\"\n if columns == self.columns:\n return self\n func = copy.deepcopy(self)\n func.columns = columns\n return func\n\n def __call__(self, parts):\n _df = self.engine.read_partition(\n self.fs,\n parts,\n self.schema,\n self.columns,\n )\n if self.index:\n _df.set_index(self.index, inplace=True)\n return _df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py__get_engine__get_engine.return.engine": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py__get_engine__get_engine.return.engine", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 50, "end_line": 63, "span_ids": ["_get_engine"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_engine(engine, write=False):\n # Get engine\n if engine == \"pyarrow\":\n import pyarrow as pa\n\n from .arrow import ArrowORCEngine\n\n if write and parse_version(pa.__version__) < parse_version(\"4.0.0\"):\n raise ValueError(\"to_orc is not supported for pyarrow<4.0.0\")\n\n return ArrowORCEngine\n elif not isinstance(engine, ORCEngine):\n raise TypeError(\"engine must be 'pyarrow', or an ORCEngine object\")\n return engine", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py_read_orc_read_orc.return.new_dd_object_graph_outp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py_read_orc_read_orc.return.new_dd_object_graph_outp", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 66, "end_line": 142, "span_ids": ["read_orc"], "tokens": 581}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_orc(\n path,\n engine=\"pyarrow\",\n columns=None,\n index=None,\n split_stripes=1,\n aggregate_files=None,\n storage_options=None,\n):\n \"\"\"Read dataframe from ORC file(s)\n\n Parameters\n ----------\n path: str or list(str)\n Location of file(s), which can be a full URL with protocol\n specifier, and may include glob character if a single string.\n engine: 'pyarrow' or ORCEngine\n Backend ORC engine to use for IO. Default is \"pyarrow\".\n columns: None or list(str)\n Columns to load. If None, loads all.\n index: str\n Column name to set as index.\n split_stripes: int or False\n Maximum number of ORC stripes to include in each output-DataFrame\n partition. Use False to specify a 1-to-1 mapping between files\n and partitions. Default is 1.\n aggregate_files : bool, default False\n Whether distinct file paths may be aggregated into the same output\n partition. A setting of True means that any two file paths may be\n aggregated into the same output partition, while False means that\n inter-file aggregation is prohibited.\n storage_options: None or dict\n Further parameters to pass to the bytes backend.\n\n Returns\n -------\n Dask.DataFrame (even if there is only one column)\n\n Examples\n --------\n >>> df = dd.read_orc('https://github.com/apache/orc/raw/'\n ... 'master/examples/demo-11-zlib.orc') # doctest: +SKIP\n \"\"\"\n\n # Get engine\n engine = _get_engine(engine)\n\n # Process file path(s)\n storage_options = storage_options or {}\n fs, fs_token, paths = get_fs_token_paths(\n path, mode=\"rb\", storage_options=storage_options\n )\n\n # Let backend engine generate a list of parts\n # from the ORC metadata. The backend should also\n # return the schema and DataFrame-collection metadata\n parts, schema, meta = engine.read_metadata(\n fs,\n paths,\n columns,\n index,\n split_stripes,\n aggregate_files,\n )\n\n # Construct and return a Blockwise layer\n label = \"read-orc-\"\n output_name = label + tokenize(fs_token, path, columns)\n layer = DataFrameIOLayer(\n output_name,\n columns,\n parts,\n ORCFunctionWrapper(fs, columns, schema, engine, index),\n label=label,\n )\n graph = HighLevelGraph({output_name: layer}, {output_name: set()})\n return new_dd_object(graph, output_name, meta, [None] * (len(parts) + 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py_to_orc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/core.py_to_orc_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 145, "end_line": 238, "span_ids": ["to_orc"], "tokens": 682}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_orc(\n df,\n path,\n engine=\"pyarrow\",\n write_index=True,\n storage_options=None,\n compute=True,\n compute_kwargs=None,\n):\n \"\"\"Store Dask.dataframe to ORC files\n\n Notes\n -----\n Each partition will be written to a separate file.\n\n Parameters\n ----------\n df : dask.dataframe.DataFrame\n path : string or pathlib.Path\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n engine : 'pyarrow' or ORCEngine\n Parquet library to use. If only one library is installed, it will use\n that one; if both, it will use 'fastparquet'.\n write_index : boolean, default True\n Whether or not to write the index. Defaults to True.\n storage_options : dict, default None\n Key/value pairs to be passed on to the file-system backend, if any.\n compute : bool, default True\n If True (default) then the result is computed immediately. If False\n then a ``dask.delayed`` object is returned for future computation.\n compute_kwargs : dict, default True\n Options to be passed in to the compute method\n\n Examples\n --------\n >>> df = dd.read_csv(...) # doctest: +SKIP\n >>> df.to_orc('/path/to/output/', ...) # doctest: +SKIP\n\n See Also\n --------\n read_orc: Read ORC data to dask.dataframe\n \"\"\"\n\n # Get engine\n engine = _get_engine(engine, write=True)\n\n if hasattr(path, \"name\"):\n path = stringify_path(path)\n fs, _, _ = get_fs_token_paths(path, mode=\"wb\", storage_options=storage_options)\n # Trim any protocol information from the path before forwarding\n path = fs._strip_protocol(path)\n\n if not write_index:\n # Not writing index - might as well drop it\n df = df.reset_index(drop=True)\n\n # Use df.npartitions to define file-name list\n fs.mkdirs(path, exist_ok=True)\n filenames = [f\"part.{i}.orc\" for i in range(df.npartitions)]\n\n # Construct IO graph\n dsk = {}\n name = \"to-orc-\" + tokenize(\n df,\n fs,\n path,\n engine,\n write_index,\n storage_options,\n )\n final_name = name + \"-final\"\n for d, filename in enumerate(filenames):\n dsk[(name, d)] = (\n apply,\n engine.write_partition,\n [\n (df._name, d),\n path,\n fs,\n filename,\n ],\n )\n part_tasks = list(dsk.keys())\n dsk[(final_name, 0)] = (lambda x: None, part_tasks)\n graph = HighLevelGraph.from_collections((final_name, 0), dsk, dependencies=[df])\n\n # Compute or return future\n if compute:\n if compute_kwargs is None:\n compute_kwargs = dict()\n return compute_as_if_collection(DataFrame, graph, part_tasks, **compute_kwargs)\n return Scalar(graph, final_name, \"\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/utils.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/orc/utils.py__", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/orc/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 17, "span_ids": ["ORCEngine.write_partition", "ORCEngine.read_partition", "ORCEngine.read_metadata", "ORCEngine"], "tokens": 103}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ORCEngine:\n \"\"\"The API necessary to provide a new ORC reader/writer\"\"\"\n\n @classmethod\n def read_metadata(\n cls, fs, paths, columns, index, split_stripes, aggregate_files, **kwargs\n ):\n raise NotImplementedError()\n\n @classmethod\n def read_partition(cls, fs, part, columns, **kwargs):\n raise NotImplementedError()\n\n @classmethod\n def write_partition(cls, df, path, fs, filename, **kwargs):\n raise NotImplementedError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__frag_subset__get_pandas_metadata.if_has_pandas_metadata_.else_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__frag_subset__get_pandas_metadata.if_has_pandas_metadata_.else_.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 166, "end_line": 189, "span_ids": ["_frag_subset", "_get_pandas_metadata"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _frag_subset(old_frag, row_groups):\n \"\"\"Create new fragment with row-group subset.\n\n Used by `ArrowDatasetEngine` only.\n \"\"\"\n return old_frag.format.make_fragment(\n old_frag.path,\n old_frag.filesystem,\n old_frag.partition_expression,\n row_groups=row_groups,\n )\n\n\ndef _get_pandas_metadata(schema):\n \"\"\"Get pandas-specific metadata from schema.\n\n Used by `ArrowDatasetEngine` and `ArrowLegacyEngine`.\n \"\"\"\n\n has_pandas_metadata = schema.metadata is not None and b\"pandas\" in schema.metadata\n if has_pandas_metadata:\n return json.loads(schema.metadata[b\"pandas\"].decode(\"utf8\"))\n else:\n return {}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_None_4_ArrowDatasetEngine.multi_support.return.cls_ArrowDatasetEngine": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_None_4_ArrowDatasetEngine.multi_support.return.cls_ArrowDatasetEngine", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 339, "end_line": 400, "span_ids": ["ArrowDatasetEngine.multi_support", "_need_fragments", "ArrowDatasetEngine", "ArrowDatasetEngine.read_metadata"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#\n# ArrowDatasetEngine\n#\n\n\nclass ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n #\n\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n split_row_groups=None,\n chunksize=None,\n aggregate_files=None,\n ignore_metadata_file=False,\n metadata_task_size=0,\n **kwargs,\n ):\n\n # Stage 1: Collect general dataset information\n dataset_info = cls._collect_dataset_info(\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n )\n\n # Stage 2: Generate output `meta`\n meta = cls._create_dd_meta(dataset_info)\n\n # Stage 3: Generate parts and stats\n parts, stats, common_kwargs = cls._construct_collection_plan(dataset_info)\n\n # Add `common_kwargs` and `aggregation_depth` to the first\n # element of `parts`. We can return as a separate element\n # in the future, but should avoid breaking the API for now.\n if len(parts):\n parts[0][\"common_kwargs\"] = common_kwargs\n parts[0][\"aggregation_depth\"] = dataset_info[\"aggregation_depth\"]\n\n return (meta, stats, parts, dataset_info[\"index\"])\n\n @classmethod\n def multi_support(cls):\n return cls == ArrowDatasetEngine", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info_ArrowDatasetEngine._collect_dataset_info.has_metadata_file.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info_ArrowDatasetEngine._collect_dataset_info.has_metadata_file.False", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 793, "end_line": 844, "span_ids": ["ArrowDatasetEngine._collect_dataset_info"], "tokens": 374}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _collect_dataset_info(\n cls,\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n ):\n \"\"\"pyarrow.dataset version of _collect_dataset_info\n Use pyarrow.dataset API to construct a dictionary of all\n general information needed to read the dataset.\n\n This method is overriden by `ArrowLegacyEngine`.\n \"\"\"\n\n # Use pyarrow.dataset API\n ds = None\n valid_paths = None # Only used if `paths` is a list containing _metadata\n\n # Extract \"supported\" key-word arguments from `kwargs`\n (\n _dataset_kwargs,\n read_kwargs,\n user_kwargs,\n ) = _split_user_options(**kwargs)\n\n # Discover Partitioning - Note that we need to avoid creating\n # this factory until it is actually used. The `partitioning`\n # object can be overridden if a \"partitioning\" kwarg is passed\n # in, containing a `dict` with a required \"obj\" argument and\n # optional \"arg\" and \"kwarg\" elements. Note that the \"obj\"\n # value must support the \"discover\" attribute.\n partitioning = _dataset_kwargs.pop(\n \"partitioning\",\n {\"obj\": pa_ds.HivePartitioning},\n )\n\n # Set require_extension option\n require_extension = _dataset_kwargs.pop(\n \"require_extension\", (\".parq\", \".parquet\")\n )\n\n # Case-dependent pyarrow.dataset creation\n has_metadata_file = False\n #\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info.return_ArrowDatasetEngine._collect_dataset_info.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info.return_ArrowDatasetEngine._collect_dataset_info.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1009, "end_line": 1033, "span_ids": ["ArrowDatasetEngine._collect_dataset_info"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _collect_dataset_info(\n cls,\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n ):\n # ... other code\n return {\n \"ds\": ds,\n \"physical_schema\": physical_schema,\n \"has_metadata_file\": has_metadata_file,\n \"schema\": ds.schema,\n \"fs\": fs,\n \"valid_paths\": valid_paths,\n \"gather_statistics\": gather_statistics,\n \"categories\": categories,\n \"index\": index,\n \"filters\": filters,\n \"split_row_groups\": split_row_groups,\n \"chunksize\": chunksize,\n \"aggregate_files\": aggregate_files,\n \"aggregation_depth\": aggregation_depth,\n \"partitions\": partition_obj,\n \"partition_names\": partition_names,\n \"partitioning\": partitioning,\n \"metadata_task_size\": metadata_task_size,\n \"kwargs\": {\n \"dataset\": _dataset_kwargs,\n \"read\": read_kwargs,\n **user_kwargs,\n },\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_file_parts_ArrowDatasetEngine._collect_file_parts.cmax_last._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_file_parts_ArrowDatasetEngine._collect_file_parts.cmax_last._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1275, "end_line": 1336, "span_ids": ["ArrowDatasetEngine._collect_file_parts"], "tokens": 495}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _collect_file_parts(\n cls,\n files_or_frags,\n dataset_info_kwargs,\n ):\n\n # Collect necessary information from dataset_info\n fs = dataset_info_kwargs[\"fs\"]\n split_row_groups = dataset_info_kwargs[\"split_row_groups\"]\n gather_statistics = dataset_info_kwargs[\"gather_statistics\"]\n partitions = dataset_info_kwargs[\"partitions\"]\n\n # Make sure we are processing a non-empty list\n if not isinstance(files_or_frags, list):\n files_or_frags = [files_or_frags]\n elif not files_or_frags:\n return [], []\n\n # Make sure we are starting with file fragments\n if isinstance(files_or_frags[0], str):\n\n # Check if we are using a simple file-partition map\n # without requiring any file or row-group statistics\n if not (split_row_groups or partitions) and gather_statistics is False:\n # Cool - We can return immediately\n return [\n {\"piece\": (file_or_frag, None, None)}\n for file_or_frag in files_or_frags\n ], None\n\n # Need more information - convert the path to a fragment\n partitioning = dataset_info_kwargs[\"partitioning\"]\n file_frags = list(\n pa_ds.dataset(\n files_or_frags,\n filesystem=fs,\n format=\"parquet\",\n partitioning=partitioning[\"obj\"].discover(\n *partitioning.get(\"args\", []),\n **partitioning.get(\"kwargs\", {}),\n ),\n ).get_fragments()\n )\n else:\n file_frags = files_or_frags\n\n # Collect settings from dataset_info\n filters = dataset_info_kwargs[\"filters\"]\n ds_filters = dataset_info_kwargs[\"ds_filters\"]\n schema = dataset_info_kwargs[\"schema\"]\n stat_col_indices = dataset_info_kwargs[\"stat_col_indices\"]\n aggregation_depth = dataset_info_kwargs[\"aggregation_depth\"]\n chunksize = dataset_info_kwargs[\"chunksize\"]\n\n # Intialize row-group and statistiscs data structures\n file_row_groups = defaultdict(list)\n file_row_group_stats = defaultdict(list)\n file_row_group_column_stats = defaultdict(list)\n single_rg_parts = int(split_row_groups) == 1\n hive_partition_keys = {}\n cmax_last = {}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_file_parts.for_file_frag_in_file_fra_ArrowDatasetEngine._collect_file_parts.return._row_groups_to_parts_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_file_parts.for_file_frag_in_file_fra_ArrowDatasetEngine._collect_file_parts.return._row_groups_to_parts_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1337, "end_line": 1443, "span_ids": ["ArrowDatasetEngine._collect_file_parts"], "tokens": 830}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _collect_file_parts(\n cls,\n files_or_frags,\n dataset_info_kwargs,\n ):\n # ... other code\n for file_frag in file_frags:\n fpath = file_frag.path\n\n # Extract hive-partition keys, and make sure they\n # are orederd the same as they are in `partitions`\n raw_keys = pa_ds._get_partition_keys(file_frag.partition_expression)\n hive_partition_keys[fpath] = [\n (hive_part.name, raw_keys[hive_part.name]) for hive_part in partitions\n ]\n\n for frag in file_frag.split_by_row_group(ds_filters, schema=schema):\n row_group_info = frag.row_groups\n if gather_statistics or split_row_groups:\n # If we are gathering statistics or splitting by\n # row-group, we may need to ensure our fragment\n # metadata is complete.\n if row_group_info is None:\n frag.ensure_complete_metadata()\n row_group_info = frag.row_groups\n if not len(row_group_info):\n continue\n else:\n file_row_groups[fpath] = [None]\n continue\n for row_group in row_group_info:\n file_row_groups[fpath].append(row_group.id)\n if gather_statistics:\n statistics = _get_rg_statistics(row_group, stat_col_indices)\n if single_rg_parts:\n s = {\n \"file_path_0\": fpath,\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n \"columns\": [],\n }\n else:\n s = {\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n }\n cstats = []\n for name, i in stat_col_indices.items():\n if name in statistics:\n cmin = statistics[name][\"min\"]\n cmax = statistics[name][\"max\"]\n last = cmax_last.get(name, None)\n if not (filters or chunksize or aggregation_depth):\n # Only think about bailing if we don't need\n # stats for filtering\n if cmin is None or (last and cmin < last):\n # We are collecting statistics for divisions\n # only (no filters) - Column isn't sorted, or\n # we have an all-null partition, so lets bail.\n #\n # Note: This assumes ascending order.\n #\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": pd.Timestamp(cmin)\n if isinstance(cmin, datetime)\n else cmin,\n \"max\": pd.Timestamp(cmax)\n if isinstance(cmax, datetime)\n else cmax,\n }\n )\n else:\n cstats += [cmin, cmax]\n cmax_last[name] = cmax\n else:\n if single_rg_parts:\n s[\"columns\"].append({\"name\": name})\n else:\n cstats += [None, None, None]\n if gather_statistics:\n file_row_group_stats[fpath].append(s)\n if not single_rg_parts:\n file_row_group_column_stats[fpath].append(tuple(cstats))\n\n # Check if we have empty parts to return\n if not file_row_groups:\n return [], []\n\n # Convert organized row-groups to parts\n return _row_groups_to_parts(\n gather_statistics,\n split_row_groups,\n aggregation_depth,\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n cls._make_part,\n make_part_kwargs={\n \"fs\": fs,\n \"partition_keys\": hive_partition_keys,\n \"partition_obj\": partitions,\n \"data_path\": \"\",\n },\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._make_part_ArrowDatasetEngine._make_part.return._piece_full_path_rg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._make_part_ArrowDatasetEngine._make_part.return._piece_full_path_rg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1445, "end_line": 1467, "span_ids": ["ArrowDatasetEngine._make_part"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _make_part(\n cls,\n filename,\n rg_list,\n fs=None,\n partition_keys=None,\n partition_obj=None,\n data_path=None,\n ):\n \"\"\"Generate a partition-specific element of `parts`.\n\n This method is used by both `ArrowDatasetEngine`\n and `ArrowLegacyEngine`.\n \"\"\"\n\n # Get full path (empty strings should be ignored)\n full_path = fs.sep.join([p for p in [data_path, filename] if p != \"\"])\n\n pkeys = partition_keys.get(full_path, None)\n if partition_obj and pkeys is None:\n return None # This partition was filtered\n return {\"piece\": (full_path, rg_list, pkeys)}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._read_table_ArrowDatasetEngine._read_table.return.arrow_table": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._read_table_ArrowDatasetEngine._read_table.return.arrow_table", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1524, "end_line": 1634, "span_ids": ["ArrowDatasetEngine._read_table"], "tokens": 765}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _read_table(\n cls,\n path_or_frag,\n fs,\n row_groups,\n columns,\n schema,\n filters,\n partitions,\n partition_keys,\n **kwargs,\n ):\n \"\"\"Read in a pyarrow table.\n\n This method is overridden in `ArrowLegacyEngine`.\n \"\"\"\n\n if isinstance(path_or_frag, pa_ds.ParquetFileFragment):\n frag = path_or_frag\n\n else:\n frag = None\n\n # Check if we have partitioning information.\n # Will only have this if the engine=\"pyarrow-dataset\"\n partitioning = kwargs.pop(\"partitioning\", None)\n\n # Check if we need to generate a fragment for filtering.\n # We only need to do this if we are applying filters to\n # columns that were not already filtered by \"partition\".\n if (partitions and partition_keys is None) or (\n partitioning and _need_fragments(filters, partition_keys)\n ):\n\n # We are filtering with \"pyarrow-dataset\".\n # Need to convert the path and row-group IDs\n # to a single \"fragment\" to read\n ds = pa_ds.dataset(\n path_or_frag,\n filesystem=fs,\n format=\"parquet\",\n partitioning=partitioning[\"obj\"].discover(\n *partitioning.get(\"args\", []),\n **partitioning.get(\"kwargs\", {}),\n ),\n **kwargs.get(\"dataset\", {}),\n )\n frags = list(ds.get_fragments())\n assert len(frags) == 1\n frag = (\n _frag_subset(frags[0], row_groups)\n if row_groups != [None]\n else frags[0]\n )\n\n # Extract hive-partition keys, and make sure they\n # are orderd the same as they are in `partitions`\n raw_keys = pa_ds._get_partition_keys(frag.partition_expression)\n partition_keys = [\n (hive_part.name, raw_keys[hive_part.name])\n for hive_part in partitions\n ]\n\n if frag:\n cols = []\n for name in columns:\n if name is None:\n if \"__index_level_0__\" in schema.names:\n columns.append(\"__index_level_0__\")\n else:\n cols.append(name)\n\n arrow_table = frag.to_table(\n use_threads=False,\n schema=schema,\n columns=cols,\n filter=pq._filters_to_expression(filters) if filters else None,\n )\n else:\n arrow_table = _read_table_from_path(\n path_or_frag,\n fs,\n row_groups,\n columns,\n schema,\n filters,\n None, # partitions,\n [], # partition_keys,\n cls._parquet_piece_as_arrow,\n **kwargs,\n )\n\n # For pyarrow.dataset api, if we did not read directly from\n # fragments, we need to add the partitioned columns here.\n if partitions and isinstance(partitions, list):\n keys_dict = {k: v for (k, v) in partition_keys}\n for partition in partitions:\n if partition.name not in arrow_table.schema.names:\n # We read from file paths, so the partition\n # columns are NOT in our table yet.\n cat = keys_dict.get(partition.name, None)\n cat_ind = np.full(\n len(arrow_table), partition.keys.index(cat), dtype=\"i4\"\n )\n arr = pa.DictionaryArray.from_arrays(\n cat_ind, pa.array(partition.keys)\n )\n arrow_table = arrow_table.append_column(partition.name, arr)\n\n return arrow_table", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._arrow_table_to_pandas_ArrowDatasetEngine.aggregate_metadata.if_out_path_.else_.return.meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._arrow_table_to_pandas_ArrowDatasetEngine.aggregate_metadata.if_out_path_.else_.return.meta", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1580, "end_line": 1626, "span_ids": ["ArrowDatasetEngine.collect_file_metadata", "ArrowDatasetEngine._arrow_table_to_pandas", "ArrowDatasetEngine.aggregate_metadata", "ArrowDatasetEngine._parquet_piece_as_arrow"], "tokens": 350}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _arrow_table_to_pandas(\n cls, arrow_table: pa.Table, categories, **kwargs\n ) -> pd.DataFrame:\n _kwargs = kwargs.get(\"arrow_to_pandas\", {})\n _kwargs.update({\"use_threads\": False, \"ignore_metadata\": False})\n\n return arrow_table.to_pandas(categories=categories, **_kwargs)\n\n @classmethod\n def _parquet_piece_as_arrow(\n cls, piece: pq.ParquetDatasetPiece, columns, partitions, **kwargs\n ) -> pa.Table:\n arrow_table = piece.read(\n columns=columns,\n partitions=partitions,\n use_pandas_metadata=True,\n use_threads=False,\n **kwargs.get(\"read\", {}),\n )\n return arrow_table\n\n @classmethod\n def collect_file_metadata(cls, path, fs, file_path):\n with fs.open(path, \"rb\") as f:\n meta = pq.ParquetFile(f).metadata\n if file_path:\n meta.set_file_path(file_path)\n return meta\n\n @classmethod\n def aggregate_metadata(cls, meta_list, fs, out_path):\n meta = None\n for _meta in meta_list:\n if meta:\n _append_row_groups(meta, _meta)\n else:\n meta = _meta\n if out_path:\n metadata_path = fs.sep.join([out_path, \"_metadata\"])\n with fs.open(metadata_path, \"wb\") as fil:\n if not meta:\n raise ValueError(\"Cannot write empty metdata!\")\n meta.write_metadata_file(fil)\n return None\n else:\n return meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine_ArrowLegacyEngine._collect_dataset_info.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine_ArrowLegacyEngine._collect_dataset_info.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1738, "end_line": 1824, "span_ids": ["ArrowLegacyEngine._collect_dataset_info", "ArrowLegacyEngine"], "tokens": 451}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n #\n\n @classmethod\n def _collect_dataset_info(\n cls,\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n ):\n \"\"\"pyarrow-legacy version of _collect_dataset_info\n Use the ParquetDataset API to construct a dictionary of all\n general information needed to read the dataset.\n\n This method overrides `ArrowDatasetEngine._collect_dataset_info`.\n \"\"\"\n\n if ignore_metadata_file:\n raise ValueError(\"ignore_metadata_file not supported in ArrowLegacyEngine\")\n\n if metadata_task_size:\n raise ValueError(\"metadata_task_size not supported in ArrowLegacyEngine\")\n\n # Extract \"supported\" key-word arguments from `kwargs`\n (\n dataset_kwargs,\n read_kwargs,\n user_kwargs,\n ) = _split_user_options(**kwargs)\n\n (\n schema,\n metadata,\n base,\n partition_info,\n split_row_groups,\n gather_statistics,\n ) = cls._gather_metadata(\n paths,\n fs,\n split_row_groups,\n gather_statistics,\n filters,\n index,\n dataset_kwargs,\n )\n\n # Check the `aggregate_files` setting\n aggregation_depth = _get_aggregation_depth(\n aggregate_files,\n partition_info[\"partition_names\"],\n )\n\n return {\n \"schema\": schema,\n \"metadata\": metadata,\n \"fs\": fs,\n \"base_path\": base,\n \"gather_statistics\": gather_statistics,\n \"categories\": categories,\n \"index\": index,\n \"filters\": filters,\n \"split_row_groups\": split_row_groups,\n \"chunksize\": chunksize,\n \"aggregate_files\": aggregate_files,\n \"aggregation_depth\": aggregation_depth,\n \"partition_keys\": partition_info[\"partition_keys\"],\n \"partition_names\": partition_info[\"partition_names\"],\n \"partitions\": partition_info[\"partitions\"],\n \"kwargs\": {\n \"dataset\": dataset_kwargs,\n \"read\": read_kwargs,\n **user_kwargs,\n },\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._construct_collection_plan_ArrowLegacyEngine._construct_collection_plan.return.cls__construct_parts_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._construct_collection_plan_ArrowLegacyEngine._construct_collection_plan.return.cls__construct_parts_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1826, "end_line": 1852, "span_ids": ["ArrowLegacyEngine._construct_collection_plan"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _construct_collection_plan(cls, dataset_info):\n \"\"\"pyarrow-legacy version of _construct_collection_plan\n\n This method overrides the `ArrowDatasetEngine` implementation.\n \"\"\"\n\n # Wrap legacy `_construct_parts` implementation\n return cls._construct_parts(\n dataset_info[\"fs\"],\n dataset_info[\"metadata\"],\n dataset_info[\"schema\"],\n dataset_info[\"filters\"],\n dataset_info[\"index_cols\"],\n dataset_info[\"base_path\"],\n {\n \"partition_keys\": dataset_info[\"partition_keys\"],\n \"partition_names\": dataset_info[\"partition_names\"],\n \"partitions\": dataset_info[\"partitions\"],\n },\n dataset_info[\"categories\"],\n dataset_info[\"split_row_groups\"],\n dataset_info[\"gather_statistics\"],\n dataset_info[\"chunksize\"],\n dataset_info[\"aggregation_depth\"],\n dataset_info[\"kwargs\"],\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._gather_metadata_ArrowLegacyEngine._gather_metadata.metadata.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._gather_metadata_ArrowLegacyEngine._gather_metadata.metadata.None", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1785, "end_line": 1861, "span_ids": ["ArrowLegacyEngine._gather_metadata"], "tokens": 697}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _gather_metadata(\n cls,\n paths,\n fs,\n split_row_groups,\n gather_statistics,\n filters,\n index,\n dataset_kwargs,\n ):\n \"\"\"Gather parquet metadata into a single data structure.\n\n Use _metadata or aggregate footer metadata into a single\n object. Also, collect other information necessary for\n parquet-to-ddf mapping (e.g. schema, partition_info).\n\n This method overrides `ArrowDatasetEngine._gather_metadata`.\n \"\"\"\n\n # Step 1: Create a ParquetDataset object\n dataset, base, fns = _get_dataset_object(paths, fs, filters, dataset_kwargs)\n if fns == [None]:\n # This is a single file. No danger in gathering statistics\n # and/or splitting row-groups without a \"_metadata\" file\n if gather_statistics is None:\n gather_statistics = True\n if split_row_groups is None:\n split_row_groups = True\n\n # Step 2: Construct necessary (parquet) partitioning information\n partition_info = {\n \"partitions\": None,\n \"partition_keys\": {},\n \"partition_names\": [],\n }\n # The `partition_info` dict summarizes information needed to handle\n # nested-directory (hive) partitioning.\n #\n # - \"partitions\" : (ParquetPartitions) PyArrow-specific object\n # needed to read in each partition correctly\n # - \"partition_keys\" : (dict) The keys and values correspond to\n # file paths and partition values, respectively. The partition\n # values (or partition \"keys\") will be represented as a list\n # of tuples. E.g. `[(\"year\", 2020), (\"state\", \"CA\")]`\n # - \"partition_names\" : (list) This is a list containing the names\n # of partitioned columns. This list must be ordered correctly\n # by partition level.\n fn_partitioned = False\n if dataset.partitions is not None:\n fn_partitioned = True\n partition_info[\"partition_names\"] = [\n n.name for n in list(dataset.partitions) if n.name is not None\n ]\n partition_info[\"partitions\"] = dataset.partitions\n for piece in dataset.pieces:\n partition_info[\"partition_keys\"][piece.path] = piece.partition_keys\n\n # Make sure gather_statistics allows filtering\n # (if filters are desired)\n if filters:\n # Filters may require us to gather statistics\n if gather_statistics is False and partition_info[\"partition_names\"]:\n warnings.warn(\n \"Filtering with gather_statistics=False. \"\n \"Only partition columns will be filtered correctly.\"\n )\n elif gather_statistics is False:\n raise ValueError(\"Cannot apply filters with gather_statistics=False\")\n elif not gather_statistics:\n gather_statistics = True\n\n # Step 3: Construct a single `metadata` object. We can\n # directly use dataset.metadata if it is available.\n # Otherwise, if `gather_statistics` or `split_row_groups`,\n # we need to gether the footer metadata manually\n metadata = None\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._construct_parts_ArrowLegacyEngine._construct_parts.return.cls__process_metadata_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._construct_parts_ArrowLegacyEngine._construct_parts.return.cls__process_metadata_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2012, "end_line": 2093, "span_ids": ["ArrowLegacyEngine._construct_parts"], "tokens": 443}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _construct_parts(\n cls,\n fs,\n metadata,\n schema,\n filters,\n index_cols,\n data_path,\n partition_info,\n categories,\n split_row_groups,\n gather_statistics,\n chunksize,\n aggregation_depth,\n kwargs,\n ):\n \"\"\"Construct ``parts`` for ddf construction\n\n Use metadata (along with other data) to define a tuple\n for each ddf partition. Also gather statistics if\n ``gather_statistics=True``, and other criteria is met.\n\n This method is only used by `ArrowLegacyEngine`.\n \"\"\"\n\n partition_keys = partition_info[\"partition_keys\"]\n partition_obj = partition_info[\"partitions\"]\n\n # Check if `metadata` is just a list of paths\n # (not splitting by row-group or collecting statistics)\n if (\n isinstance(metadata, list)\n and len(metadata)\n and isinstance(metadata[0], str)\n ):\n parts = []\n stats = []\n for full_path in metadata:\n part = {\"piece\": (full_path, None, partition_keys.get(full_path, None))}\n parts.append(part)\n common_kwargs = {\n \"partitions\": partition_obj,\n \"categories\": categories,\n **kwargs,\n }\n return parts, stats, common_kwargs\n\n # Use final metadata info to update our options for\n # `parts`/`stats` construnction\n (\n gather_statistics,\n split_row_groups,\n stat_col_indices,\n ) = cls._update_metadata_options(\n gather_statistics,\n split_row_groups,\n metadata,\n schema,\n index_cols,\n filters,\n partition_info,\n chunksize,\n aggregation_depth,\n )\n\n # Convert metadata into `parts` and `stats`\n return cls._process_metadata(\n metadata,\n schema,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n categories,\n partition_info,\n data_path,\n fs,\n chunksize,\n aggregation_depth,\n kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._update_metadata_options_ArrowLegacyEngine._update_metadata_options.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._update_metadata_options_ArrowLegacyEngine._update_metadata_options.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2020, "end_line": 2085, "span_ids": ["ArrowLegacyEngine._update_metadata_options"], "tokens": 514}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _update_metadata_options(\n cls,\n gather_statistics,\n split_row_groups,\n metadata,\n schema,\n index_cols,\n filters,\n partition_info,\n chunksize,\n aggregation_depth,\n ):\n \"\"\"Update read_parquet options given up-to-data metadata.\n\n The primary focus here is `gather_statistics`. We want to\n avoid setting this option to `True` if it is unnecessary.\n\n This method is only used by `ArrowLegacyEngine`.\n \"\"\"\n\n # Cannot gather_statistics if our `metadata` is a list\n # of paths, or if we are building a multiindex (for now).\n # We also don't \"need\" to gather statistics if we don't\n # want to apply any filters or calculate divisions. Note\n # that the `ArrowDatasetEngine` doesn't even require\n # `gather_statistics=True` for filtering.\n if split_row_groups is None:\n split_row_groups = False\n _need_aggregation_stats = chunksize or (\n int(split_row_groups) > 1 and aggregation_depth\n )\n if (\n isinstance(metadata, list)\n and len(metadata)\n and isinstance(metadata[0], str)\n ) or len(index_cols) > 1:\n gather_statistics = False\n elif not _need_aggregation_stats and filters is None and len(index_cols) == 0:\n gather_statistics = False\n\n # Determine which columns need statistics.\n flat_filters = _flatten_filters(filters)\n stat_col_indices = {}\n for i, name in enumerate(schema.names):\n if name in index_cols or name in flat_filters:\n if name in partition_info[\"partition_names\"]:\n # Partition columns wont have statistics\n continue\n stat_col_indices[name] = i\n\n # If the user has not specified `gather_statistics`,\n # we will only do so if there are specific columns in\n # need of statistics.\n # NOTE: We cannot change `gather_statistics` from True\n # to False (even if `stat_col_indices` is empty), in\n # case a `chunksize` was specified, and the row-group\n # statistics are needed for part aggregation.\n if gather_statistics is None:\n gather_statistics = bool(stat_col_indices)\n\n return (\n gather_statistics,\n split_row_groups,\n stat_col_indices,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._organize_row_groups_ArrowLegacyEngine._organize_row_groups.cmax_last._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._organize_row_groups_ArrowLegacyEngine._organize_row_groups.cmax_last._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2087, "end_line": 2115, "span_ids": ["ArrowLegacyEngine._organize_row_groups"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _organize_row_groups(\n cls,\n metadata,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n chunksize,\n aggregation_depth,\n ):\n \"\"\"Organize row-groups by file.\n\n This method is used by ArrowLegacyEngine._process_metadata\n \"\"\"\n\n sorted_row_group_indices = range(metadata.num_row_groups)\n if aggregation_depth:\n sorted_row_group_indices = sorted(\n range(metadata.num_row_groups),\n key=lambda x: metadata.row_group(x).column(0).file_path,\n )\n\n # Get the number of row groups per file\n single_rg_parts = int(split_row_groups) == 1\n file_row_groups = defaultdict(list)\n file_row_group_stats = defaultdict(list)\n file_row_group_column_stats = defaultdict(list)\n cmax_last = {}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._organize_row_groups.for_rg_in_sorted_row_grou_ArrowLegacyEngine._organize_row_groups.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._organize_row_groups.for_rg_in_sorted_row_grou_ArrowLegacyEngine._organize_row_groups.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2116, "end_line": 2207, "span_ids": ["ArrowLegacyEngine._organize_row_groups"], "tokens": 755}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _organize_row_groups(\n cls,\n metadata,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n chunksize,\n aggregation_depth,\n ):\n # ... other code\n for rg in sorted_row_group_indices:\n row_group = metadata.row_group(rg)\n\n # NOTE: Here we assume that all column chunks are stored\n # in the same file. This is not strictly required by the\n # parquet spec.\n fpath = row_group.column(0).file_path\n if fpath is None:\n raise ValueError(\n \"Global metadata structure is missing a file_path string. \"\n \"If the dataset includes a _metadata file, that file may \"\n \"have one or more missing file_path fields.\"\n )\n if file_row_groups[fpath]:\n file_row_groups[fpath].append(file_row_groups[fpath][-1] + 1)\n else:\n file_row_groups[fpath].append(0)\n if gather_statistics:\n if single_rg_parts:\n s = {\n \"file_path_0\": fpath,\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n \"columns\": [],\n }\n else:\n s = {\n \"num-rows\": row_group.num_rows,\n \"total_byte_size\": row_group.total_byte_size,\n }\n cstats = []\n for name, i in stat_col_indices.items():\n column = row_group.column(i)\n if column.statistics:\n cmin = column.statistics.min\n cmax = column.statistics.max\n last = cmax_last.get(name, None)\n if not (filters or chunksize or aggregation_depth):\n # Only think about bailing if we don't need\n # stats for filtering\n if cmin is None or (last and cmin < last):\n # We are collecting statistics for divisions\n # only (no filters) - Column isn't sorted, or\n # we have an all-null partition, so lets bail.\n #\n # Note: This assumes ascending order.\n #\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n to_ts = column.statistics.logical_type.type == \"TIMESTAMP\"\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": cmin if not to_ts else pd.Timestamp(cmin),\n \"max\": cmax if not to_ts else pd.Timestamp(cmax),\n }\n )\n else:\n cstats += [cmin, cmax]\n cmax_last[name] = cmax\n else:\n\n if (\n not (filters or chunksize or aggregation_depth)\n and column.num_values > 0\n ):\n # We are collecting statistics for divisions\n # only (no filters) - Lets bail.\n gather_statistics = False\n file_row_group_stats = {}\n file_row_group_column_stats = {}\n break\n\n if single_rg_parts:\n s[\"columns\"].append({\"name\": name})\n else:\n cstats += [None, None, None]\n if gather_statistics:\n file_row_group_stats[fpath].append(s)\n if not single_rg_parts:\n file_row_group_column_stats[fpath].append(tuple(cstats))\n\n return (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._read_table_ArrowLegacyEngine.multi_support.return.cls_ArrowLegacyEngine": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowLegacyEngine._read_table_ArrowLegacyEngine.multi_support.return.cls_ArrowLegacyEngine", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2350, "end_line": 2383, "span_ids": ["ArrowLegacyEngine.multi_support", "ArrowLegacyEngine._read_table"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowLegacyEngine(ArrowDatasetEngine):\n\n #\n # Private Class Methods\n\n @classmethod\n def _read_table(\n cls,\n path,\n fs,\n row_groups,\n columns,\n schema,\n filters,\n partitions,\n partition_keys,\n **kwargs,\n ):\n \"\"\"Read in a pyarrow table.\n\n This method is overrides the `ArrowDatasetEngine` implementation.\n \"\"\"\n\n return _read_table_from_path(\n path,\n fs,\n row_groups,\n columns,\n schema,\n filters,\n partitions,\n partition_keys,\n cls._parquet_piece_as_arrow,\n **kwargs,\n )\n\n @classmethod\n def multi_support(cls):\n return cls == ArrowLegacyEngine", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.__read_parquet._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.__read_parquet._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 184, "end_line": 333, "span_ids": ["read_parquet"], "tokens": 2035}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_parquet(\n path,\n columns=None,\n filters=None,\n categories=None,\n index=None,\n storage_options=None,\n engine=\"auto\",\n gather_statistics=None,\n ignore_metadata_file=False,\n metadata_task_size=None,\n split_row_groups=None,\n chunksize=None,\n aggregate_files=None,\n **kwargs,\n):\n \"\"\"\n Read a Parquet file into a Dask DataFrame\n\n This reads a directory of Parquet data into a Dask.dataframe, one file per\n partition. It selects the index among the sorted columns if any exist.\n\n Parameters\n ----------\n path : str or list\n Source directory for data, or path(s) to individual parquet files.\n Prefix with a protocol like ``s3://`` to read from alternative\n filesystems. To read from multiple files you can pass a globstring or a\n list of paths, with the caveat that they must all have the same\n protocol.\n columns : str or list, default None\n Field name(s) to read in as columns in the output. By default all\n non-index fields will be read (as determined by the pandas parquet\n metadata, if present). Provide a single field name instead of a list to\n read in the data as a Series.\n filters : Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]], default None\n List of filters to apply, like ``[[('col1', '==', 0), ...], ...]``.\n Using this argument will NOT result in row-wise filtering of the final\n partitions unless ``engine=\"pyarrow-dataset\"`` is also specified. For\n other engines, filtering is only performed at the partition level, i.e.,\n to prevent the loading of some row-groups and/or files.\n\n For the \"pyarrow\" engines, predicates can be expressed in disjunctive\n normal form (DNF). This means that the innermost tuple describes a single\n column predicate. These inner predicates are combined with an AND\n conjunction into a larger predicate. The outer-most list then combines all\n of the combined filters with an OR disjunction.\n\n Predicates can also be expressed as a List[Tuple]. These are evaluated\n as an AND conjunction. To express OR in predictates, one must use the\n (preferred for \"pyarrow\") List[List[Tuple]] notation.\n\n Note that the \"fastparquet\" engine does not currently support DNF for\n the filtering of partitioned columns (List[Tuple] is required).\n index : str, list or False, default None\n Field name(s) to use as the output frame index. By default will be\n inferred from the pandas parquet file metadata (if present). Use False\n to read all fields as columns.\n categories : list or dict, default None\n For any fields listed here, if the parquet encoding is Dictionary,\n the column will be created with dtype category. Use only if it is\n guaranteed that the column is encoded as dictionary in all row-groups.\n If a list, assumes up to 2**16-1 labels; if a dict, specify the number\n of labels expected; if None, will load categories automatically for\n data written by dask/fastparquet, not otherwise.\n storage_options : dict, default None\n Key/value pairs to be passed on to the file-system backend, if any.\n open_file_options : dict, default None\n Key/value arguments to be passed along to ``AbstractFileSystem.open``\n when each parquet data file is open for reading. Experimental\n (optimized) \"precaching\" for remote file systems (e.g. S3, GCS) can\n be enabled by adding ``{\"method\": \"parquet\"}`` under the\n ``\"precache_options\"`` key. Also, a custom file-open function can be\n used (instead of ``AbstractFileSystem.open``), by specifying the\n desired function under the ``\"open_file_func\"`` key.\n engine : str, default 'auto'\n Parquet reader library to use. Options include: 'auto', 'fastparquet',\n and 'pyarrow'. Defaults to 'auto', which selects FastParquetEngine\n if fastparquet is installed (and ArrowDatasetEngine otherwise). If\n 'pyarrow' is specified, ArrowDatasetEngine (which leverages the\n pyarrow.dataset API) will be used.\n NOTE: The 'pyarrow-legacy' option (ArrowLegacyEngine) is deprecated.\n gather_statistics : bool, default None\n Gather the statistics for each dataset partition. By default,\n this will only be done if the _metadata file is available. Otherwise,\n statistics will only be gathered if True, because the footer of\n every file will be parsed (which is very slow on some systems).\n ignore_metadata_file : bool, default False\n Whether to ignore the global ``_metadata`` file (when one is present).\n If ``True``, or if the global ``_metadata`` file is missing, the parquet\n metadata may be gathered and processed in parallel. Parallel metadata\n processing is currently supported for ``ArrowDatasetEngine`` only.\n metadata_task_size : int, default configurable\n If parquet metadata is processed in parallel (see ``ignore_metadata_file``\n description above), this argument can be used to specify the number of\n dataset files to be processed by each task in the Dask graph. If this\n argument is set to ``0``, parallel metadata processing will be disabled.\n The default values for local and remote filesystems can be specified\n with the \"metadata-task-size-local\" and \"metadata-task-size-remote\"\n config fields, respectively (see \"dataframe.parquet\").\n split_row_groups : bool or int, default None\n Default is True if a _metadata file is available or if\n the dataset is composed of a single file (otherwise defult is False).\n If True, then each output dataframe partition will correspond to a single\n parquet-file row-group. If False, each partition will correspond to a\n complete file. If a positive integer value is given, each dataframe\n partition will correspond to that number of parquet row-groups (or fewer).\n chunksize : int or str, default None\n The desired size of each output ``DataFrame`` partition in terms of total\n (uncompressed) parquet storage space. If specified, adjacent row-groups\n and/or files will be aggregated into the same output partition until the\n cumulative ``total_byte_size`` parquet-metadata statistic reaches this\n value. Use `aggregate_files` to enable/disable inter-file aggregation.\n aggregate_files : bool or str, default None\n Whether distinct file paths may be aggregated into the same output\n partition. This parameter requires `gather_statistics=True`, and is\n only used when `chunksize` is specified or when `split_row_groups` is\n an integer >1. A setting of True means that any two file paths may be\n aggregated into the same output partition, while False means that\n inter-file aggregation is prohibited.\n\n For \"hive-partitioned\" datasets, a \"partition\"-column name can also be\n specified. In this case, we allow the aggregation of any two files\n sharing a file path up to, and including, the corresponding directory name.\n For example, if ``aggregate_files`` is set to ``\"section\"`` for the\n directory structure below, ``03.parquet`` and ``04.parquet`` may be\n aggregated together, but ``01.parquet`` and ``02.parquet`` cannot be.\n If, however, ``aggregate_files`` is set to ``\"region\"``, ``01.parquet``\n may be aggregated with ``02.parquet``, and ``03.parquet`` may be aggregated\n with ``04.parquet``::\n\n dataset-path/\n \u251c\u2500\u2500 region=1/\n \u2502 \u251c\u2500\u2500 section=a/\n \u2502 \u2502 \u2514\u2500\u2500 01.parquet\n \u2502 \u251c\u2500\u2500 section=b/\n \u2502 \u2514\u2500\u2500 \u2514\u2500\u2500 02.parquet\n \u2514\u2500\u2500 region=2/\n \u251c\u2500\u2500 section=a/\n \u2502 \u251c\u2500\u2500 03.parquet\n \u2514\u2500\u2500 \u2514\u2500\u2500 04.parquet\n\n Note that the default behavior of ``aggregate_files`` is False.\n **kwargs: dict (of dicts)\n Passthrough key-word arguments for read backend.\n The top-level keys correspond to the appropriate operation type, and\n the second level corresponds to the kwargs that will be passed on to\n the underlying ``pyarrow`` or ``fastparquet`` function.\n Supported top-level keys: 'dataset' (for opening a ``pyarrow`` dataset),\n 'file' or 'dataset' (for opening a ``fastparquet.ParquetFile``), 'read'\n (for the backend read function), 'arrow_to_pandas' (for controlling the\n arguments passed to convert from a ``pyarrow.Table.to_pandas()``).\n Any element of kwargs that is not defined under these top-level keys\n will be passed through to the `engine.read_partitions` classmethod as a\n stand-alone argument (and will be ignored by the engine implementations\n defined in ``dask.dataframe``).\n\n Examples\n --------\n >>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP\n\n See Also\n --------\n to_parquet\n pyarrow.parquet.ParquetDataset\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.if_read_from_paths_in_k_read_parquet._Parse_dataset_statistic": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.if_read_from_paths_in_k_read_parquet._Parse_dataset_statistic", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 335, "end_line": 427, "span_ids": ["read_parquet"], "tokens": 789}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_parquet(\n path,\n columns=None,\n filters=None,\n categories=None,\n index=None,\n storage_options=None,\n engine=\"auto\",\n gather_statistics=None,\n ignore_metadata_file=False,\n metadata_task_size=None,\n split_row_groups=None,\n chunksize=None,\n aggregate_files=None,\n **kwargs,\n):\n\n if \"read_from_paths\" in kwargs:\n kwargs.pop(\"read_from_paths\")\n warnings.warn(\n \"`read_from_paths` is no longer supported and will be ignored.\",\n FutureWarning,\n )\n\n # Store initial function arguments\n input_kwargs = {\n \"columns\": columns,\n \"filters\": filters,\n \"categories\": categories,\n \"index\": index,\n \"storage_options\": storage_options,\n \"engine\": engine,\n \"gather_statistics\": gather_statistics,\n \"ignore_metadata_file\": ignore_metadata_file,\n \"metadata_task_size\": metadata_task_size,\n \"split_row_groups\": split_row_groups,\n \"chunksize=\": chunksize,\n \"aggregate_files\": aggregate_files,\n **kwargs,\n }\n\n if isinstance(columns, str):\n input_kwargs[\"columns\"] = [columns]\n df = read_parquet(path, **input_kwargs)\n return df[columns]\n\n if columns is not None:\n columns = list(columns)\n\n if isinstance(engine, str):\n engine = get_engine(engine)\n\n if hasattr(path, \"name\"):\n path = stringify_path(path)\n\n # Update input_kwargs and tokenize inputs\n label = \"read-parquet-\"\n input_kwargs.update({\"columns\": columns, \"engine\": engine})\n output_name = label + tokenize(path, **input_kwargs)\n\n fs, _, paths = get_fs_token_paths(path, mode=\"rb\", storage_options=storage_options)\n paths = sorted(paths, key=natural_sort_key) # numeric rather than glob ordering\n\n auto_index_allowed = False\n if index is None:\n # User is allowing auto-detected index\n auto_index_allowed = True\n if index and isinstance(index, str):\n index = [index]\n\n if chunksize or (\n split_row_groups and int(split_row_groups) > 1 and aggregate_files\n ):\n # Require `gather_statistics=True` if `chunksize` is used,\n # or if `split_row_groups>1` and we are aggregating files.\n if gather_statistics is False:\n raise ValueError(\"read_parquet options require gather_statistics=True\")\n gather_statistics = True\n\n read_metadata_result = engine.read_metadata(\n fs,\n paths,\n categories=categories,\n index=index,\n gather_statistics=gather_statistics,\n filters=filters,\n split_row_groups=split_row_groups,\n chunksize=chunksize,\n aggregate_files=aggregate_files,\n ignore_metadata_file=ignore_metadata_file,\n metadata_task_size=metadata_task_size,\n **kwargs,\n )\n\n # In the future, we may want to give the engine the\n # option to return a dedicated element for `common_kwargs`.\n # However, to avoid breaking the API, we just embed this\n # data in the first element of `parts` for now.\n # The logic below is inteded to handle backward and forward\n # compatibility with a user-defined engine.\n meta, statistics, parts, index = read_metadata_result[:4]\n common_kwargs = {}\n aggregation_depth = False\n if len(parts):\n # For now, `common_kwargs` and `aggregation_depth`\n # may be stored in the first element of `parts`\n common_kwargs = parts[0].pop(\"common_kwargs\", {})\n aggregation_depth = parts[0].pop(\"aggregation_depth\", aggregation_depth)\n\n # Parse dataset statistics from metadata (if available)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet.compute_kwargs_to_parquet._below_if_index_cols_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet.compute_kwargs_to_parquet._below_if_index_cols_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 651, "end_line": 727, "span_ids": ["to_parquet"], "tokens": 813}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_parquet(\n df,\n path,\n engine=\"auto\",\n compression=\"default\",\n write_index=True,\n append=False,\n overwrite=False,\n ignore_divisions=False,\n partition_on=None,\n storage_options=None,\n custom_metadata=None,\n write_metadata_file=True,\n compute=True,\n compute_kwargs=None,\n schema=None,\n name_function=None,\n **kwargs,\n):\n compute_kwargs = compute_kwargs or {}\n\n if compression == \"default\":\n if snappy is not None:\n compression = \"snappy\"\n else:\n compression = None\n\n partition_on = partition_on or []\n if isinstance(partition_on, str):\n partition_on = [partition_on]\n\n if set(partition_on) - set(df.columns):\n raise ValueError(\n \"Partitioning on non-existent column. \"\n \"partition_on=%s .\"\n \"columns=%s\" % (str(partition_on), str(list(df.columns)))\n )\n\n if isinstance(engine, str):\n engine = get_engine(engine)\n\n if hasattr(path, \"name\"):\n path = stringify_path(path)\n fs, _, _ = get_fs_token_paths(path, mode=\"wb\", storage_options=storage_options)\n # Trim any protocol information from the path before forwarding\n path = fs._strip_protocol(path)\n\n if overwrite:\n if _is_local_fs(fs):\n working_dir = fs.expand_path(\".\")[0]\n if path.rstrip(\"/\") == working_dir.rstrip(\"/\"):\n raise ValueError(\n \"Cannot clear the contents of the current working directory!\"\n )\n if append:\n raise ValueError(\"Cannot use both `overwrite=True` and `append=True`!\")\n if fs.exists(path) and fs.isdir(path):\n # Only remove path contents if\n # (1) The path exists\n # (2) The path is a directory\n # (3) The path is not the current working directory\n fs.rm(path, recursive=True)\n\n # Save divisions and corresponding index name. This is necessary,\n # because we may be resetting the index to write the file\n division_info = {\"divisions\": df.divisions, \"name\": df.index.name}\n if division_info[\"name\"] is None:\n # As of 0.24.2, pandas will rename an index with name=None\n # when df.reset_index() is called. The default name is \"index\",\n # but dask will always change the name to the NONE_LABEL constant\n if NONE_LABEL not in df.columns:\n division_info[\"name\"] = NONE_LABEL\n elif write_index:\n raise ValueError(\n \"Index must have a name if __null_dask_index__ is a column.\"\n )\n else:\n warnings.warn(\n \"If read back by Dask, column named __null_dask_index__ \"\n \"will be set to the index (and renamed to None).\"\n )\n\n # There are some \"resrved\" names that may be used as the default column\n # name after resetting the index. However, we don't want to treat it as\n # a \"special\" name if the string is already used as a \"real\" column name.\n reserved_names = []\n for name in [\"index\", \"level_0\"]:\n if name not in df.columns:\n reserved_names.append(name)\n\n # If write_index==True (default), reset the index and record the\n # name of the original index in `index_cols` (we will set the name\n # to the NONE_LABEL constant if it is originally `None`).\n # `fastparquet` will use `index_cols` to specify the index column(s)\n # in the metadata. `pyarrow` will revert the `reset_index` call\n # below if `index_cols` is populated (because pyarrow will want to handle\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_copy__FP_FILE_LOCK.threading_RLock_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_copy__FP_FILE_LOCK.threading_RLock_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 44, "span_ids": ["imports"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import copy\nimport pickle\nimport threading\nimport warnings\nfrom collections import OrderedDict, defaultdict\nfrom contextlib import ExitStack\n\nimport numpy as np\nimport pandas as pd\nimport tlz as toolz\nfrom packaging.version import parse as parse_version\n\ntry:\n import fastparquet\n from fastparquet import ParquetFile\n from fastparquet.util import ex_from_sep, get_file_scheme, groupby_types, val_to_num\n from fastparquet.writer import make_part_file, partition_on_columns\nexcept ImportError:\n pass\n\nfrom ....base import tokenize\nfrom ....delayed import Delayed\nfrom ....utils import natural_sort_key\nfrom ...utils import UNKNOWN_CATEGORIES\nfrom ..utils import _is_local_fs, _meta_from_dtypes, _open_input_files\n\n#########################\n# Fastparquet interface #\n#########################\nfrom .utils import (\n Engine,\n _flatten_filters,\n _get_aggregation_depth,\n _normalize_index_columns,\n _parse_pandas_metadata,\n _process_open_file_options,\n _row_groups_to_parts,\n _set_metadata_task_size,\n _sort_and_analyze_paths,\n _split_user_options,\n)\n\n# Thread lock required to reset row-groups\n_FP_FILE_LOCK = threading.RLock()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py__paths_to_cats__paths_to_cats.return.cats": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py__paths_to_cats__paths_to_cats.return.cats", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 104, "span_ids": ["_paths_to_cats"], "tokens": 538}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _paths_to_cats(paths, file_scheme):\n \"\"\"\n Extract categorical fields and labels from hive- or drill-style paths.\n FixMe: This has been pasted from https://github.com/dask/fastparquet/pull/471\n Use fastparquet.api.paths_to_cats from fastparquet>0.3.2 instead.\n\n Parameters\n ----------\n paths (Iterable[str]): file paths relative to root\n file_scheme (str):\n\n Returns\n -------\n cats (OrderedDict[str, List[Any]]): a dict of field names and their values\n \"\"\"\n if file_scheme in [\"simple\", \"flat\", \"other\"]:\n cats = {}\n return cats\n\n cats = OrderedDict()\n raw_cats = OrderedDict()\n s = ex_from_sep(\"/\")\n paths = toolz.unique(paths)\n if file_scheme == \"hive\":\n partitions = toolz.unique((k, v) for path in paths for k, v in s.findall(path))\n for key, val in partitions:\n cats.setdefault(key, set()).add(val_to_num(val))\n raw_cats.setdefault(key, set()).add(val)\n else:\n i_val = toolz.unique(\n (i, val) for path in paths for i, val in enumerate(path.split(\"/\")[:-1])\n )\n for i, val in i_val:\n key = \"dir%i\" % i\n cats.setdefault(key, set()).add(val_to_num(val))\n raw_cats.setdefault(key, set()).add(val)\n\n for key, v in cats.items():\n # Check that no partition names map to the same value after transformation by val_to_num\n raw = raw_cats[key]\n if len(v) != len(raw):\n conflicts_by_value = OrderedDict()\n for raw_val in raw_cats[key]:\n conflicts_by_value.setdefault(val_to_num(raw_val), set()).add(raw_val)\n conflicts = [\n c for k in conflicts_by_value.values() if len(k) > 1 for c in k\n ]\n raise ValueError(\"Partition names map to the same value: %s\" % conflicts)\n vals_by_type = groupby_types(v)\n\n # Check that all partition names map to the same type after transformation by val_to_num\n if len(vals_by_type) > 1:\n examples = [x[0] for x in vals_by_type.values()]\n warnings.warn(\n \"Partition names coerce to values of different types, e.g. %s\"\n % examples\n )\n\n cats = OrderedDict([(key, list(v)) for key, v in cats.items()])\n return cats", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_paths_to_cats_FastParquetEngine._organize_row_groups.cmax_last._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_paths_to_cats_FastParquetEngine._organize_row_groups.cmax_last._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 107, "end_line": 157, "span_ids": ["FastParquetEngine._organize_row_groups", "FastParquetEngine", "impl:6"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "paths_to_cats = (\n _paths_to_cats # FixMe: use fastparquet.api.paths_to_cats for fastparquet>0.3.2\n)\n\n\nclass FastParquetEngine(Engine):\n @classmethod\n def _organize_row_groups(\n cls,\n pf,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n dtypes,\n base_path,\n has_metadata_file,\n chunksize,\n aggregation_depth,\n ):\n \"\"\"Organize row-groups by file.\"\"\"\n\n # Get partitioning metadata\n pqpartitions = list(pf.cats)\n\n # Fastparquet does not use a natural sorting\n # order for partitioned data. Re-sort by path\n if (\n pqpartitions\n and aggregation_depth\n and pf.row_groups\n and pf.row_groups[0].columns[0].file_path\n ):\n pf.row_groups = sorted(\n pf.row_groups,\n key=lambda x: natural_sort_key(x.columns[0].file_path),\n )\n\n # Store types specified in pandas metadata\n pandas_type = {}\n if pf.row_groups and pf.pandas_metadata:\n for c in pf.pandas_metadata.get(\"columns\", []):\n if \"field_name\" in c:\n pandas_type[c[\"field_name\"]] = c.get(\"pandas_type\", None)\n\n # Get the number of row groups per file\n single_rg_parts = int(split_row_groups) == 1\n file_row_groups = defaultdict(list)\n file_row_group_stats = defaultdict(list)\n file_row_group_column_stats = defaultdict(list)\n cmax_last = {}\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._get_thrift_row_groups_FastParquetEngine._get_thrift_row_groups.return.real_row_groups": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._get_thrift_row_groups_FastParquetEngine._get_thrift_row_groups.return.real_row_groups", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 295, "end_line": 330, "span_ids": ["FastParquetEngine._get_thrift_row_groups"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def _get_thrift_row_groups(\n cls,\n pf,\n filename,\n row_groups,\n ):\n \"\"\"Turn a set of row-groups into bytes-serialized form\n using thrift via pickle.\n \"\"\"\n\n real_row_groups = []\n for rg, rg_global in row_groups:\n row_group = pf.row_groups[rg_global]\n columns = row_group.columns\n for c, col in enumerate(columns):\n if c:\n col.file_path = None\n md = col.meta_data\n md.key_value_metadata = None\n # NOTE: Fastparquet may need the null count in the\n # statistics, so we cannot just set statistics\n # to none. Set attributes separately:\n st = md.statistics\n if st:\n st.distinct_count = None\n st.max = None\n st.min = None\n st.max_value = None\n st.min_value = None\n md.encodings = None\n md.total_uncompressed_size = None\n md.encoding_stats = None\n row_group.columns = columns\n real_row_groups.append(row_group)\n return real_row_groups", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_dataset_info.if_len_paths_1_and_fs_FastParquetEngine._collect_dataset_info._Ensure_that_there_is_no": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_dataset_info.if_len_paths_1_and_fs_FastParquetEngine._collect_dataset_info._Ensure_that_there_is_no", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 397, "end_line": 487, "span_ids": ["FastParquetEngine._collect_dataset_info"], "tokens": 838}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def _collect_dataset_info(\n cls,\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n ):\n # ... other code\n if len(paths) == 1 and fs.isdir(paths[0]):\n\n # This is a directory.\n # Check if _metadata and/or _common_metadata files exists\n base = paths[0]\n _metadata_exists = True\n if not ignore_metadata_file:\n _metadata_exists = fs.isfile(fs.sep.join([base, \"_metadata\"]))\n\n # Find all files if we are not using a _metadata file\n if ignore_metadata_file or not _metadata_exists:\n # For now, we need to discover every file under paths[0]\n paths, base, fns = _sort_and_analyze_paths(fs.find(base), fs)\n _update_paths = False\n for fn in [\"_metadata\", \"_common_metadata\"]:\n try:\n fns.remove(fn)\n _update_paths = True\n except ValueError:\n pass\n if _update_paths:\n paths = [fs.sep.join([base, fn]) for fn in fns]\n _metadata_exists = False\n if _metadata_exists:\n # Using _metadata file (best-case scenario)\n pf = ParquetFile(\n fs.sep.join([base, \"_metadata\"]),\n open_with=fs.open,\n **dataset_kwargs,\n )\n if gather_statistics is None:\n gather_statistics = True\n else:\n # Use 0th file\n # Note that \"_common_metadata\" can cause issues for\n # partitioned datasets.\n if require_extension:\n # Raise error if all files have been filtered by extension\n len0 = len(paths)\n paths = [path for path in paths if path.endswith(require_extension)]\n if len0 and paths == []:\n raise ValueError(\n \"No files satisfy the `require_extension` criteria \"\n f\"(files must end with {require_extension}).\"\n )\n pf = ParquetFile(\n paths[:1], open_with=fs.open, root=base, **dataset_kwargs\n )\n scheme = get_file_scheme(fns)\n pf.file_scheme = scheme\n pf.cats = paths_to_cats(fns, scheme)\n if not gather_statistics:\n parts = [fs.sep.join([base, fn]) for fn in fns]\n else:\n # This is a list of files\n paths, base, fns = _sort_and_analyze_paths(paths, fs)\n\n # Check if _metadata is in paths, and\n # remove it if ignore_metadata_file=True\n _metadata_exists = \"_metadata\" in fns\n if _metadata_exists and ignore_metadata_file:\n fns.remove(\"_metadata\")\n _metadata_exists = False\n paths = [fs.sep.join([base, fn]) for fn in fns]\n\n if _metadata_exists:\n # We have a _metadata file, lets use it\n pf = ParquetFile(\n fs.sep.join([base, \"_metadata\"]),\n open_with=fs.open,\n **dataset_kwargs,\n )\n else:\n # Rely on metadata for 0th file.\n # Will need to pass a list of paths to read_partition\n scheme = get_file_scheme(fns)\n pf = ParquetFile(\n paths[:1], open_with=fs.open, root=base, **dataset_kwargs\n )\n pf.file_scheme = scheme\n pf.cats = paths_to_cats(fns, scheme)\n if not gather_statistics:\n parts = paths.copy()\n\n # Check the `aggregate_files` setting\n aggregation_depth = _get_aggregation_depth(\n aggregate_files,\n list(pf.cats),\n )\n\n # Ensure that there is no overlap between partition columns\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_dataset_info._and_explicit_columns_in_FastParquetEngine._collect_dataset_info.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_dataset_info._and_explicit_columns_in_FastParquetEngine._collect_dataset_info.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 488, "end_line": 521, "span_ids": ["FastParquetEngine._collect_dataset_info"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def _collect_dataset_info(\n cls,\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n ):\n # and explicit columns in `pf`\n if pf.cats:\n _partitions = [p for p in pf.cats if p not in pf.columns]\n if not _partitions:\n pf.cats = {}\n elif len(_partitions) != len(pf.cats):\n raise ValueError(\n \"No partition-columns should be written in the \\n\"\n \"file unless they are ALL written in the file.\\n\"\n \"columns: {} | partitions: {}\".format(pf.columns, pf.cats.keys())\n )\n\n return {\n \"pf\": pf,\n \"paths\": paths,\n \"has_metadata_file\": _metadata_exists,\n \"parts\": parts,\n \"base\": base,\n \"fs\": fs,\n \"gather_statistics\": gather_statistics,\n \"categories\": categories,\n \"index\": index,\n \"filters\": filters,\n \"split_row_groups\": split_row_groups,\n \"chunksize\": chunksize,\n \"aggregate_files\": aggregate_files,\n \"aggregation_depth\": aggregation_depth,\n \"metadata_task_size\": metadata_task_size,\n \"kwargs\": {\n \"dataset\": dataset_kwargs,\n \"read\": read_kwargs,\n **user_kwargs,\n },\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._create_dd_meta_FastParquetEngine._create_dd_meta.return.meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._create_dd_meta_FastParquetEngine._create_dd_meta.return.meta", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 507, "end_line": 592, "span_ids": ["FastParquetEngine._create_dd_meta"], "tokens": 628}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def _create_dd_meta(cls, dataset_info):\n\n # Collect necessary information from dataset_info\n pf = dataset_info[\"pf\"]\n index = dataset_info[\"index\"]\n categories = dataset_info[\"categories\"]\n\n columns = None\n pandas_md = pf.pandas_metadata\n\n if pandas_md:\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(pandas_md)\n # auto-ranges should not be created by fastparquet\n column_names.extend(pf.cats)\n\n else:\n index_names = []\n column_names = pf.columns + list(pf.cats)\n storage_name_mapping = {k: k for k in column_names}\n column_index_names = [None]\n\n if index is None and len(index_names) > 0:\n if len(index_names) == 1 and index_names[0] is not None:\n index = index_names[0]\n else:\n index = index_names\n\n # Normalize user inputs\n column_names, index_names = _normalize_index_columns(\n columns, column_names, index, index_names\n )\n\n all_columns = index_names + column_names\n\n categories_dict = None\n if isinstance(categories, dict):\n categories_dict = categories\n\n if categories is None:\n categories = pf.categories\n elif isinstance(categories, str):\n categories = [categories]\n else:\n categories = list(categories)\n\n # Check that categories are included in columns\n if categories and not set(categories).intersection(all_columns):\n raise ValueError(\n \"categories not in available columns.\\n\"\n \"categories: {} | columns: {}\".format(categories, list(all_columns))\n )\n\n dtypes = pf._dtypes(categories)\n dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}\n\n index_cols = index or ()\n if isinstance(index_cols, str):\n index_cols = [index_cols]\n for ind in index_cols:\n if getattr(dtypes.get(ind), \"numpy_dtype\", None):\n # index does not support masked types\n dtypes[ind] = dtypes[ind].numpy_dtype\n for cat in categories:\n if cat in all_columns:\n dtypes[cat] = pd.CategoricalDtype(categories=[UNKNOWN_CATEGORIES])\n\n for catcol in pf.cats:\n if catcol in all_columns:\n dtypes[catcol] = pd.CategoricalDtype(categories=pf.cats[catcol])\n\n meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)\n\n # Update `dataset_info` and return `meta`\n dataset_info[\"dtypes\"] = dtypes\n dataset_info[\"index\"] = index\n dataset_info[\"index_cols\"] = index_cols\n dataset_info[\"categories\"] = categories\n dataset_info[\"categories_dict\"] = categories_dict\n\n return meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._construct_collection_plan_FastParquetEngine._construct_collection_plan.if_.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._construct_collection_plan_FastParquetEngine._construct_collection_plan.if_.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 610, "end_line": 706, "span_ids": ["FastParquetEngine._construct_collection_plan"], "tokens": 786}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def _construct_collection_plan(cls, dataset_info):\n\n # Collect necessary information from dataset_info\n fs = dataset_info[\"fs\"]\n parts = dataset_info[\"parts\"]\n paths = dataset_info[\"paths\"]\n filters = dataset_info[\"filters\"]\n pf = dataset_info[\"pf\"]\n split_row_groups = dataset_info[\"split_row_groups\"]\n chunksize = dataset_info[\"chunksize\"]\n gather_statistics = dataset_info[\"gather_statistics\"]\n base_path = dataset_info[\"base\"]\n aggregation_depth = dataset_info[\"aggregation_depth\"]\n index_cols = dataset_info[\"index_cols\"]\n categories = dataset_info[\"categories\"]\n dtypes = dataset_info[\"dtypes\"]\n categories_dict = dataset_info[\"categories_dict\"]\n has_metadata_file = dataset_info[\"has_metadata_file\"]\n metadata_task_size = dataset_info[\"metadata_task_size\"]\n kwargs = dataset_info[\"kwargs\"]\n\n # Ensure metadata_task_size is set\n # (Using config file or defaults)\n metadata_task_size = _set_metadata_task_size(\n dataset_info[\"metadata_task_size\"], fs\n )\n\n # We don't \"need\" to gather statistics if we don't\n # want to apply filters, aggregate files, or calculate\n # divisions.\n if split_row_groups is None:\n split_row_groups = False\n _need_aggregation_stats = chunksize or (\n int(split_row_groups) > 1 and aggregation_depth\n )\n if len(index_cols) > 1:\n gather_statistics = False\n elif not _need_aggregation_stats and filters is None and len(index_cols) == 0:\n gather_statistics = False\n\n # Make sure gather_statistics allows filtering\n # (if filters are desired)\n if filters:\n # Filters may require us to gather statistics\n if gather_statistics is False and pf.cats:\n warnings.warn(\n \"Filtering with gather_statistics=False. \"\n \"Only partition columns will be filtered correctly.\"\n )\n elif gather_statistics is False:\n raise ValueError(\"Cannot apply filters with gather_statistics=False\")\n elif not gather_statistics:\n gather_statistics = True\n\n # Determine which columns need statistics.\n flat_filters = _flatten_filters(filters)\n stat_col_indices = {}\n for i, name in enumerate(pf.columns):\n if name in index_cols or name in flat_filters:\n stat_col_indices[name] = i\n\n # If the user has not specified `gather_statistics`,\n # we will only do so if there are specific columns in\n # need of statistics.\n # NOTE: We cannot change `gather_statistics` from True\n # to False (even if `stat_col_indices` is empty), in\n # case a `chunksize` was specified, and the row-group\n # statistics are needed for part aggregation.\n if gather_statistics is None:\n gather_statistics = bool(stat_col_indices)\n\n # Define common_kwargs\n common_kwargs = {\n \"categories\": categories_dict or categories,\n \"root_cats\": pf.cats,\n \"root_file_scheme\": pf.file_scheme,\n \"base_path\": base_path,\n **kwargs,\n }\n\n # Check if this is a very simple case where we can just\n # return the path names. This requires that `parts`\n # already be a list of paths. Also, we cannot be splitting\n # by row-group or collecting statistics.\n if (\n gather_statistics is False\n and not split_row_groups\n and isinstance(parts, list)\n and len(parts)\n and isinstance(parts[0], str)\n ):\n return (\n [{\"piece\": (full_path, None)} for full_path in parts],\n [],\n common_kwargs,\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._construct_collection_plan.dataset_info_kwargs_FastParquetEngine._construct_collection_plan.return.parts_stats_common_kwar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._construct_collection_plan.dataset_info_kwargs_FastParquetEngine._construct_collection_plan.return.parts_stats_common_kwar", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 690, "end_line": 747, "span_ids": ["FastParquetEngine._construct_collection_plan"], "tokens": 506}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def _construct_collection_plan(cls, dataset_info):\n # ... other code\n\n dataset_info_kwargs = {\n \"fs\": fs,\n \"split_row_groups\": split_row_groups,\n \"gather_statistics\": gather_statistics,\n \"filters\": filters,\n \"dtypes\": dtypes,\n \"stat_col_indices\": stat_col_indices,\n \"aggregation_depth\": aggregation_depth,\n \"chunksize\": chunksize,\n \"root_cats\": pf.cats,\n \"root_file_scheme\": pf.file_scheme,\n \"base_path\": \"\" if base_path is None else base_path,\n \"has_metadata_file\": has_metadata_file,\n }\n\n if (\n has_metadata_file\n or metadata_task_size == 0\n or metadata_task_size > len(paths)\n ):\n # Construct the output-partitioning plan on the\n # client process (in serial). This means we have\n # a global _metadata file, or that `metadata_task_size`\n # is zero or larger than the number of files.\n pf_or_paths = pf if has_metadata_file else paths\n parts, stats = cls._collect_file_parts(pf_or_paths, dataset_info_kwargs)\n\n else:\n # We DON'T have a global _metadata file to work with.\n # We should loop over files in parallel\n parts, stats = [], []\n if paths:\n # Build and compute a task graph to construct stats/parts\n gather_parts_dsk = {}\n name = \"gather-pq-parts-\" + tokenize(paths, dataset_info_kwargs)\n finalize_list = []\n for task_i, file_i in enumerate(\n range(0, len(paths), metadata_task_size)\n ):\n finalize_list.append((name, task_i))\n gather_parts_dsk[finalize_list[-1]] = (\n cls._collect_file_parts,\n paths[file_i : file_i + metadata_task_size],\n dataset_info_kwargs,\n )\n\n def _combine_parts(parts_and_stats):\n parts, stats = [], []\n for part, stat in parts_and_stats:\n parts += part\n if stat:\n stats += stat\n return parts, stats\n\n gather_parts_dsk[\"final-\" + name] = (_combine_parts, finalize_list)\n parts, stats = Delayed(\"final-\" + name, gather_parts_dsk).compute()\n\n return parts, stats, common_kwargs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_file_parts_FastParquetEngine._collect_file_parts.return.parts_stats": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_file_parts_FastParquetEngine._collect_file_parts.return.parts_stats", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 749, "end_line": 824, "span_ids": ["FastParquetEngine._collect_file_parts"], "tokens": 507}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def _collect_file_parts(\n cls,\n pf_or_files,\n dataset_info_kwargs,\n ):\n\n # Collect necessary information from dataset_info\n fs = dataset_info_kwargs[\"fs\"]\n split_row_groups = dataset_info_kwargs[\"split_row_groups\"]\n gather_statistics = dataset_info_kwargs[\"gather_statistics\"]\n stat_col_indices = dataset_info_kwargs[\"stat_col_indices\"]\n filters = dataset_info_kwargs[\"filters\"]\n dtypes = dataset_info_kwargs[\"dtypes\"]\n chunksize = dataset_info_kwargs[\"chunksize\"]\n aggregation_depth = dataset_info_kwargs[\"aggregation_depth\"]\n base_path = dataset_info_kwargs.get(\"base_path\", None)\n root_cats = dataset_info_kwargs.get(\"root_cats\", None)\n root_file_scheme = dataset_info_kwargs.get(\"root_file_scheme\", None)\n has_metadata_file = dataset_info_kwargs[\"has_metadata_file\"]\n\n # Get ParquetFile\n if not isinstance(pf_or_files, fastparquet.api.ParquetFile):\n # Construct local `ParquetFile` object\n pf = ParquetFile(\n pf_or_files,\n open_with=fs.open,\n root=base_path,\n )\n # Update hive-partitioning to match global cats/scheme\n pf.cats = root_cats or {}\n if root_cats:\n pf.file_scheme = root_file_scheme\n else:\n # We already have a ParquetFile object to work with\n pf = pf_or_files\n\n # Organize row-groups by file\n (\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n gather_statistics,\n base_path,\n ) = cls._organize_row_groups(\n pf,\n split_row_groups,\n gather_statistics,\n stat_col_indices,\n filters,\n dtypes,\n base_path,\n has_metadata_file,\n chunksize,\n aggregation_depth,\n )\n\n # Convert organized row-groups to parts\n parts, stats = _row_groups_to_parts(\n gather_statistics,\n split_row_groups,\n aggregation_depth,\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n cls._make_part,\n make_part_kwargs={\n \"fs\": fs,\n \"pf\": pf,\n \"base_path\": base_path,\n \"partitions\": list(pf.cats),\n },\n )\n\n return parts, stats", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.multi_support_FastParquetEngine.read_partition.sample.pieces_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.multi_support_FastParquetEngine.read_partition.sample.pieces_0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 893, "end_line": 934, "span_ids": ["FastParquetEngine.read_partition", "FastParquetEngine.multi_support"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def multi_support(cls):\n return cls == FastParquetEngine\n\n @classmethod\n def read_partition(\n cls,\n fs,\n pieces,\n columns,\n index,\n categories=(),\n root_cats=None,\n root_file_scheme=None,\n base_path=None,\n **kwargs,\n ):\n\n null_index_name = False\n base_path = False if not root_cats else base_path\n if isinstance(index, list):\n if index == [None]:\n # Handling a None-labeled index...\n # The pandas metadata told us to read in an index\n # labeled `None`. If this corresponds to a `RangeIndex`,\n # fastparquet will need use the pandas metadata to\n # construct the index. Otherwise, the index will correspond\n # to a column named \"__index_level_0__\". We will need to\n # check the `ParquetFile` object for this column below.\n index = []\n null_index_name = True\n columns += index\n\n # Use global `parquet_file` object. Need to reattach\n # the desired row_group\n parquet_file = kwargs.pop(\"parquet_file\", None)\n\n # Always convert pieces to list\n if not isinstance(pieces, list):\n pieces = [pieces]\n\n sample = pieces[0]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_partition.if_isinstance_sample_tup_FastParquetEngine.read_partition.if_isinstance_sample_tup.else_.raise_ValueError_f_Expect": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.read_partition.if_isinstance_sample_tup_FastParquetEngine.read_partition.if_isinstance_sample_tup.else_.raise_ValueError_f_Expect", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 949, "end_line": 1038, "span_ids": ["FastParquetEngine.read_partition"], "tokens": 704}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def read_partition(\n cls,\n fs,\n pieces,\n columns,\n index,\n categories=(),\n root_cats=None,\n root_file_scheme=None,\n base_path=None,\n **kwargs,\n ):\n # ... other code\n if isinstance(sample, tuple):\n if isinstance(sample[0], str):\n # We have paths to read from\n assert parquet_file is None\n\n row_groups = []\n rg_offset = 0\n parquet_file = ParquetFile(\n [p[0] for p in pieces],\n open_with=fs.open,\n root=base_path or False,\n **kwargs.get(\"dataset\", {}),\n )\n for piece in pieces:\n _pf = (\n parquet_file\n if len(pieces) == 1\n else ParquetFile(\n piece[0],\n open_with=fs.open,\n root=base_path or False,\n **kwargs.get(\"dataset\", {}),\n )\n )\n n_local_row_groups = len(_pf.row_groups)\n local_rg_indices = piece[1] or list(range(n_local_row_groups))\n row_groups += [\n parquet_file.row_groups[rg + rg_offset]\n for rg in local_rg_indices\n ]\n rg_offset += n_local_row_groups\n update_parquet_file = len(row_groups) < len(parquet_file.row_groups)\n\n elif parquet_file:\n\n row_groups = []\n for piece in pieces:\n # `piece[1]` will contain actual row-group objects,\n # but they may be pickled\n rgs = piece[0]\n if isinstance(rgs, bytes):\n rgs = pickle.loads(rgs)\n row_groups += rgs\n update_parquet_file = True\n\n else:\n raise ValueError(\"Neither path nor ParquetFile detected!\")\n\n if update_parquet_file:\n with _FP_FILE_LOCK:\n for rg in row_groups:\n for chunk in rg.columns:\n s = chunk.file_path\n if s and isinstance(s, bytes):\n chunk.file_path = s.decode()\n\n parquet_file.fmd.row_groups = row_groups\n # NOTE: May lose cats after `_set_attrs` call\n save_cats = parquet_file.cats\n parquet_file._set_attrs()\n parquet_file.cats = save_cats\n\n if null_index_name:\n if \"__index_level_0__\" in parquet_file.columns:\n # See \"Handling a None-labeled index\" comment above\n index = [\"__index_level_0__\"]\n columns += index\n\n # Update hive-partitioning information if necessary\n parquet_file.cats = root_cats or {}\n if root_cats:\n parquet_file.file_scheme = root_file_scheme\n\n parquet_file._dtypes = (\n lambda *args: parquet_file.dtypes\n ) # ugly patch, could be fixed\n\n # Convert ParquetFile to pandas\n return cls.pf_to_pandas(\n parquet_file,\n fs=fs,\n columns=columns,\n categories=categories,\n index=index,\n **kwargs.get(\"read\", {}),\n )\n\n else:\n # `sample` is NOT a tuple\n raise ValueError(f\"Expected tuple, got {type(sample)}\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__row_groups_to_parts__row_groups_to_parts.return.parts_stats": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__row_groups_to_parts__row_groups_to_parts.return.parts_stats", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 565, "end_line": 643, "span_ids": ["_row_groups_to_parts"], "tokens": 454}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _row_groups_to_parts(\n gather_statistics,\n split_row_groups,\n aggregation_depth,\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n make_part_func,\n make_part_kwargs,\n):\n\n # Construct `parts` and `stats`\n parts = []\n stats = []\n if split_row_groups:\n # Create parts from each file,\n # limiting the number of row_groups in each piece\n split_row_groups = int(split_row_groups)\n residual = 0\n for filename, row_groups in file_row_groups.items():\n row_group_count = len(row_groups)\n if residual:\n _rgs = [0] + list(range(residual, row_group_count, split_row_groups))\n else:\n _rgs = list(range(residual, row_group_count, split_row_groups))\n\n for i in _rgs:\n\n i_end = i + split_row_groups\n if aggregation_depth is True:\n if residual and i == 0:\n i_end = residual\n residual = 0\n _residual = i_end - row_group_count\n if _residual > 0:\n residual = _residual\n\n rg_list = row_groups[i:i_end]\n\n part = make_part_func(\n filename,\n rg_list,\n **make_part_kwargs,\n )\n if part is None:\n continue\n\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename][i:i_end],\n file_row_group_column_stats[filename][i:i_end],\n stat_col_indices,\n )\n stats.append(stat)\n else:\n for filename, row_groups in file_row_groups.items():\n\n part = make_part_func(\n filename,\n row_groups,\n **make_part_kwargs,\n )\n if part is None:\n continue\n\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename],\n file_row_group_column_stats[filename],\n stat_col_indices,\n )\n stats.append(stat)\n\n return parts, stats", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__get_aggregation_depth__get_aggregation_depth.return.aggregation_depth": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__get_aggregation_depth__get_aggregation_depth.return.aggregation_depth", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 646, "end_line": 676, "span_ids": ["_get_aggregation_depth"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_aggregation_depth(aggregate_files, partition_names):\n # Use `aggregate_files` to set `aggregation_depth`\n #\n # Note that `partition_names` must be ordered. `True` means that we allow\n # aggregation of any two files. `False` means that we will never aggregate\n # files. If a string is specified, it must be the name of a partition\n # column, and the \"partition depth\" of that column will be used for\n # aggregation. Note that we always convert the string into the partition\n # \"depth\" to simplify the aggregation logic.\n\n # Summary of output `aggregation_depth` settings:\n #\n # True : Free-for-all aggregation (any two files may be aggregated)\n # False : No file aggregation allowed\n # : Allow aggregation within this partition-hierarchy depth\n\n aggregation_depth = aggregate_files\n if isinstance(aggregate_files, str):\n if aggregate_files in partition_names:\n # aggregate_files corresponds to a partition column. Reset the\n # value of this variable to reflect the partition \"depth\" (in the\n # range of 1 to the total number of partition levels)\n aggregation_depth = len(partition_names) - partition_names.index(\n aggregate_files\n )\n else:\n raise ValueError(\n f\"{aggregate_files} is not a recognized directory partition.\"\n )\n\n return aggregation_depth", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_comment_test_comment.with_filetexts_files_mod.assert_eq_df_expected_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_comment_test_comment.with_filetexts_files_mod.assert_eq_df_expected_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 252, "end_line": 266, "span_ids": ["test_comment"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files\",\n [(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],\n)\ndef test_comment(dd_read, pd_read, files):\n files = {\n name: comment_header\n + b\"\\n\"\n + content.replace(b\"\\n\", b\" # just some comment\\n\", 1)\n for name, content in files.items()\n }\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\", comment=\"#\")\n expected_df = pd.concat([pd_read(n, comment=\"#\") for n in sorted(files)])\n assert_eq(df, expected_df, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skipfooter_test_skipfooter.with_filetexts_files_mod.assert_eq_df_expected_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_skipfooter_test_skipfooter.with_filetexts_files_mod.assert_eq_df_expected_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 281, "span_ids": ["test_skipfooter"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"dd_read,pd_read,files\",\n [(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],\n)\ndef test_skipfooter(dd_read, pd_read, files):\n files = {name: content + b\"\\n\" + comment_footer for name, content in files.items()}\n skip = len(comment_footer.splitlines())\n with filetexts(files, mode=\"b\"):\n df = dd_read(\"2014-01-*.csv\", skipfooter=skip, engine=\"python\")\n expected_df = pd.concat(\n [pd_read(n, skipfooter=skip, engine=\"python\") for n in sorted(files)]\n )\n assert_eq(df, expected_df, check_dtype=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_header_int_test_header_int.with_filetexts_test_hea.assert_eq_df_expected_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_header_int_test_header_int.with_filetexts_test_hea.assert_eq_df_expected_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 795, "end_line": 808, "span_ids": ["test_header_int"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_header_int():\n text = (\n \"id0,name0,x0,y0\\n\"\n \"id,name,x,y\\n\"\n \"1034,Victor,-0.25,0.84\\n\"\n \"998,Xavier,-0.48,-0.13\\n\"\n \"999,Zelda,0.00,0.47\\n\"\n \"980,Alice,0.67,-0.98\\n\"\n \"989,Zelda,-0.04,0.03\\n\"\n )\n with filetexts({\"test_header_int.csv\": text}):\n df = dd.read_csv(\"test_header_int.csv\", header=1, blocksize=64)\n expected = pd.read_csv(\"test_header_int.csv\", header=1)\n assert_eq(df, expected, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_with_datetime_index_partitions_n_xfail_pandas_100.pytest_mark_xfail_reason_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_with_datetime_index_partitions_n_xfail_pandas_100.pytest_mark_xfail_reason_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1101, "end_line": 1113, "span_ids": ["test_read_csv_with_datetime_index_partitions_n", "impl:37"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_with_datetime_index_partitions_n():\n with filetext(timeseries) as fn:\n df = pd.read_csv(\n fn, index_col=0, header=0, usecols=[0, 4], parse_dates=[\"Date\"]\n )\n # because fn is so small, by default, set chunksize small\n ddf = dd.read_csv(\n fn, header=0, usecols=[0, 4], parse_dates=[\"Date\"], blocksize=400\n ).set_index(\"Date\")\n assert_eq(df, ddf)\n\n\nxfail_pandas_100 = pytest.mark.xfail(reason=\"https://github.com/dask/dask/issues/5787\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_sep_test_read_csv_singleton_dtype.with_filetext_data_mode_.assert_eq_pd_read_csv_fn_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_read_csv_sep_test_read_csv_singleton_dtype.with_filetext_data_mode_.assert_eq_pd_read_csv_fn_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1185, "end_line": 1218, "span_ids": ["test_read_csv_slash_r", "test_read_csv_singleton_dtype", "test_read_csv_sep"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_csv_sep():\n sep_text = normalize_text(\n \"\"\"\n name###amount\n alice###100\n bob###200\n charlie###300\"\"\"\n )\n\n with filetext(sep_text) as fn:\n ddf = dd.read_csv(fn, sep=\"###\", engine=\"python\")\n df = pd.read_csv(fn, sep=\"###\", engine=\"python\")\n\n assert (df.columns == ddf.columns).all()\n assert len(df) == len(ddf)\n\n\ndef test_read_csv_slash_r():\n data = b\"0,my\\n1,data\\n\" * 1000 + b\"2,foo\\rbar\"\n with filetext(data, mode=\"wb\") as fn:\n dd.read_csv(\n fn,\n header=None,\n sep=\",\",\n lineterminator=\"\\n\",\n names=[\"a\", \"b\"],\n blocksize=200,\n ).compute(scheduler=\"sync\")\n\n\ndef test_read_csv_singleton_dtype():\n data = b\"a,b\\n1,2\\n3,4\\n5,6\"\n with filetext(data, mode=\"wb\") as fn:\n assert_eq(pd.read_csv(fn, dtype=float), dd.read_csv(fn, dtype=float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_robust_column_mismatch_test_robust_column_mismatch.with_filetexts_files_mod.assert_eq_ddf_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_robust_column_mismatch_test_robust_column_mismatch.with_filetexts_files_mod.assert_eq_ddf_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1221, "end_line": 1231, "span_ids": ["test_robust_column_mismatch"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_robust_column_mismatch():\n files = csv_files.copy()\n k = sorted(files)[-1]\n files[k] = files[k].replace(b\"name\", b\"Name\")\n with filetexts(files, mode=\"b\"):\n ddf = dd.read_csv(\n \"2014-01-*.csv\", header=None, skiprows=1, names=[\"name\", \"amount\", \"id\"]\n )\n df = pd.read_csv(\"2014-01-01.csv\")\n assert (df.columns == ddf.columns).all()\n assert_eq(ddf, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_different_columns_are_allowed_test_different_columns_are_allowed.with_filetexts_files_mod.assert_ddf_compute_col": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_different_columns_are_allowed_test_different_columns_are_allowed.with_filetexts_files_mod.assert_ddf_compute_col", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1234, "end_line": 1243, "span_ids": ["test_different_columns_are_allowed"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_different_columns_are_allowed():\n files = csv_files.copy()\n k = sorted(files)[-1]\n files[k] = files[k].replace(b\"name\", b\"address\")\n with filetexts(files, mode=\"b\"):\n ddf = dd.read_csv(\"2014-01-*.csv\")\n\n # since enforce is False, meta doesn't have to match computed\n assert (ddf.columns == [\"name\", \"amount\", \"id\"]).all()\n assert (ddf.compute().columns == [\"name\", \"amount\", \"id\", \"address\"]).all()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_None_6_test_to_csv.for_npartitions_in_1_2_.None_2.assert_eq_result_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_None_6_test_to_csv.for_npartitions_in_1_2_.None_2.assert_eq_result_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1291, "end_line": 1323, "span_ids": ["test_to_csv", "test_read_csv_names_not_none"], "tokens": 320}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "############\n# to_csv #\n############\n\n\ndef test_to_csv():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\n\n for npartitions in [1, 2]:\n a = dd.from_pandas(df, npartitions)\n with tmpdir() as dn:\n a.to_csv(dn, index=False)\n result = dd.read_csv(os.path.join(dn, \"*\")).compute().reset_index(drop=True)\n assert_eq(result, df)\n\n with tmpdir() as dn:\n r = a.to_csv(dn, index=False, compute=False)\n paths = dask.compute(*r, scheduler=\"sync\")\n # this is a tuple rather than a list since it's the output of dask.compute\n assert paths == tuple(\n os.path.join(dn, f\"{n}.part\") for n in range(npartitions)\n )\n result = dd.read_csv(os.path.join(dn, \"*\")).compute().reset_index(drop=True)\n assert_eq(result, df)\n\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.csv\")\n paths = a.to_csv(fn, index=False)\n assert paths == [\n os.path.join(dn, f\"data_{n}.csv\") for n in range(npartitions)\n ]\n result = dd.read_csv(fn).compute().reset_index(drop=True)\n assert_eq(result, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_csv_parse_fail_test_csv_parse_fail.assert_eq_df_expected_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_csv_parse_fail_test_csv_parse_fail.assert_eq_df_expected_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1711, "end_line": 1721, "span_ids": ["test_csv_parse_fail"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_csv_parse_fail(tmpdir):\n # See GH #7680\n path = os.path.join(str(tmpdir), \"test.csv\")\n data = b'a,b\\n1,\"hi\\n\"\\n2,\"oi\\n\"\\n'\n expected = pd.read_csv(BytesIO(data))\n with open(path, \"wb\") as f:\n f.write(data)\n with pytest.raises(ValueError, match=\"EOF encountered\"):\n dd.read_csv(path, sample=13)\n df = dd.read_csv(path, sample=13, sample_rows=1)\n assert_eq(df, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_csv_name_should_be_different_even_if_head_is_same_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_csv_name_should_be_different_even_if_head_is_same_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1724, "end_line": 1756, "span_ids": ["test_csv_name_should_be_different_even_if_head_is_same"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_csv_name_should_be_different_even_if_head_is_same(tmpdir):\n # https://github.com/dask/dask/issues/7904\n import random\n from shutil import copyfile\n\n old_csv_path = os.path.join(str(tmpdir), \"old.csv\")\n new_csv_path = os.path.join(str(tmpdir), \"new_csv\")\n\n # Create random CSV\n with open(old_csv_path, \"w\") as f:\n for _ in range(10):\n f.write(\n f\"{random.randrange(1, 10**9):09}, {random.randrange(1, 10**9):09}, {random.randrange(1, 10**9):09}\\n\"\n )\n\n copyfile(old_csv_path, new_csv_path)\n\n # Add three new rows\n with open(new_csv_path, \"a\") as f:\n for _ in range(3):\n f.write(\n f\"{random.randrange(1, 10**9):09}, {random.randrange(1, 10**9):09}, {random.randrange(1, 10**9):09}\\n\"\n )\n\n new_df = dd.read_csv(\n new_csv_path, header=None, delimiter=\",\", dtype=str, blocksize=None\n )\n old_df = dd.read_csv(\n old_csv_path, header=None, delimiter=\",\", dtype=str, blocksize=None\n )\n\n assert new_df.dask.keys() != old_df.dask.keys()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_no_overlaps_test_make_timeseries_keywords.assert_1_bb_100": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_demo.py_test_no_overlaps_test_make_timeseries_keywords.assert_1_bb_100", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_demo.py", "file_name": "test_demo.py", "file_type": "text/x-python", "category": "test", "start_line": 102, "end_line": 130, "span_ids": ["test_make_timeseries_keywords", "test_no_overlaps"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_overlaps():\n df = dd.demo.make_timeseries(\n \"2000\", \"2001\", {\"A\": float}, freq=\"3H\", partition_freq=\"3M\"\n )\n\n assert all(\n df.get_partition(i).index.max().compute()\n < df.get_partition(i + 1).index.min().compute()\n for i in range(df.npartitions - 2)\n )\n\n\ndef test_make_timeseries_keywords():\n df = dd.demo.make_timeseries(\n \"2000\",\n \"2001\",\n {\"A\": int, \"B\": int, \"C\": str},\n freq=\"1D\",\n partition_freq=\"6M\",\n A_lam=1000000,\n B_lam=2,\n )\n a_cardinality = df.A.nunique()\n b_cardinality = df.B.nunique()\n\n aa, bb = dask.compute(a_cardinality, b_cardinality, scheduler=\"single-threaded\")\n\n assert 100 < aa <= 10000000\n assert 1 < bb <= 100", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_bag_test_to_bag.assert_ddf_x_to_bag_forma": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_to_bag_test_to_bag.assert_ddf_x_to_bag_forma", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 527, "end_line": 562, "span_ids": ["test_to_bag"], "tokens": 414}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_bag():\n a = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(a, 2)\n\n assert ddf.to_bag().compute() == list(a.itertuples(False))\n assert ddf.to_bag(True).compute() == list(a.itertuples(True))\n assert ddf.to_bag(format=\"dict\").compute() == [\n {\"x\": \"a\", \"y\": 2},\n {\"x\": \"b\", \"y\": 3},\n {\"x\": \"c\", \"y\": 4},\n {\"x\": \"d\", \"y\": 5},\n ]\n assert ddf.to_bag(True, format=\"dict\").compute() == [\n {\"index\": 1.0, \"x\": \"a\", \"y\": 2},\n {\"index\": 2.0, \"x\": \"b\", \"y\": 3},\n {\"index\": 3.0, \"x\": \"c\", \"y\": 4},\n {\"index\": 4.0, \"x\": \"d\", \"y\": 5},\n ]\n assert ddf.x.to_bag(True).compute() == list(a.x.items())\n assert ddf.x.to_bag().compute() == list(a.x)\n\n assert ddf.x.to_bag(True, format=\"dict\").compute() == [\n {\"x\": \"a\"},\n {\"x\": \"b\"},\n {\"x\": \"c\"},\n {\"x\": \"d\"},\n ]\n assert ddf.x.to_bag(format=\"dict\").compute() == [\n {\"x\": \"a\"},\n {\"x\": \"b\"},\n {\"x\": \"c\"},\n {\"x\": \"d\"},\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_preserves_hlgs_test_from_delayed_preserves_hlgs.for_d_in_chained_.for_layer_name_layer_in_.assert_hlg_dependencies_l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_delayed_preserves_hlgs_test_from_delayed_preserves_hlgs.for_d_in_chained_.for_layer_name_layer_in_.assert_hlg_dependencies_l", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 628, "end_line": 639, "span_ids": ["test_from_delayed_preserves_hlgs"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_delayed_preserves_hlgs():\n df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list(\"abcd\"))\n parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]\n dfs = [delayed(parts.__getitem__)(i) for i in range(4)]\n meta = dfs[0].compute()\n\n chained = [d.a for d in dfs]\n hlg = dd.from_delayed(chained, meta=meta).dask\n for d in chained:\n for layer_name, layer in d.dask.layers.items():\n assert hlg.layers[layer_name] == layer\n assert hlg.dependencies[layer_name] == d.dask.dependencies[layer_name]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_to_json_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_to_json_results_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 141, "end_line": 153, "span_ids": ["test_to_json_results"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_json_results():\n with tmpfile(\"json\") as f:\n paths = ddf.to_json(f)\n assert paths == [os.path.join(f, f\"{n}.part\") for n in range(ddf.npartitions)]\n\n with tmpfile(\"json\") as f:\n list_of_delayed = ddf.to_json(f, compute=False)\n paths = dask.compute(*list_of_delayed)\n # this is a tuple rather than a list since it's the output of dask.compute\n assert paths == tuple(\n os.path.join(f, f\"{n}.part\") for n in range(ddf.npartitions)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_glob_orc_files.try_.finally_.shutil_rmtree_d_ignore_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_glob_orc_files.try_.finally_.shutil_rmtree_d_ignore_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_orc.py", "file_name": "test_orc.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 47, "span_ids": ["imports", "orc_files", "test_orc_with_backend"], "tokens": 310}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import glob\nimport os\nimport shutil\nimport tempfile\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom packaging.version import parse as parse_version\n\nimport dask.dataframe as dd\nfrom dask.dataframe.optimize import optimize_dataframe_getitem\nfrom dask.dataframe.utils import assert_eq\n\npytest.importorskip(\"pyarrow.orc\")\npa = pytest.importorskip(\"pyarrow\")\n\n\nurl = (\n \"https://www.googleapis.com/download/storage/v1/b/anaconda-public-data/o\"\n \"/orc%2FTestOrcFile.testDate1900.orc?generation=1522611448751555&alt=\"\n \"media\"\n)\ncolumns = [\"time\", \"date\"]\n\n\n@pytest.mark.network\ndef test_orc_with_backend():\n pytest.importorskip(\"requests\")\n d = dd.read_orc(url)\n assert set(d.columns) == {\"time\", \"date\"} # order is not guaranteed\n assert len(d) == 70000\n\n\n@pytest.fixture(scope=\"module\")\ndef orc_files():\n requests = pytest.importorskip(\"requests\")\n data = requests.get(url).content\n d = tempfile.mkdtemp()\n files = [os.path.join(d, fn) for fn in [\"test1.orc\", \"test2.orc\"]]\n for fn in files:\n with open(fn, \"wb\") as f:\n f.write(data)\n try:\n yield files\n finally:\n shutil.rmtree(d, ignore_errors=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_single_test_orc_multiple.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_single_test_orc_multiple.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_orc.py", "file_name": "test_orc.py", "file_type": "text/x-python", "category": "test", "start_line": 50, "end_line": 76, "span_ids": ["test_orc_multiple", "test_orc_single"], "tokens": 344}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"split_stripes\", [1, 2])\ndef test_orc_single(orc_files, split_stripes):\n fn = orc_files[0]\n d = dd.read_orc(fn, split_stripes=split_stripes)\n assert len(d) == 70000\n assert d.npartitions == 8 / split_stripes\n d2 = dd.read_orc(fn, columns=[\"time\", \"date\"])\n assert_eq(d[columns], d2[columns], check_index=False)\n\n with pytest.raises(ValueError, match=\"nonexist\"):\n dd.read_orc(fn, columns=[\"time\", \"nonexist\"])\n\n # Check that `optimize_dataframe_getitem` changes the\n # `columns` attribute of the \"read-orc\" layer\n d3 = d[columns]\n keys = [(d3._name, i) for i in range(d3.npartitions)]\n graph = optimize_dataframe_getitem(d3.__dask_graph__(), keys)\n key = [k for k in graph.layers.keys() if k.startswith(\"read-orc-\")][0]\n assert set(graph.layers[key].columns) == set(columns)\n\n\ndef test_orc_multiple(orc_files):\n d = dd.read_orc(orc_files[0])\n d2 = dd.read_orc(orc_files)\n assert_eq(d2[columns], dd.concat([d, d])[columns], check_index=False)\n d2 = dd.read_orc(os.path.dirname(orc_files[0]) + \"/*.orc\")\n assert_eq(d2[columns], dd.concat([d, d])[columns], check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_roundtrip_test_orc_roundtrip.assert_eq_data_df2_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_roundtrip_test_orc_roundtrip.assert_eq_data_df2_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_orc.py", "file_name": "test_orc.py", "file_type": "text/x-python", "category": "test", "start_line": 79, "end_line": 108, "span_ids": ["test_orc_roundtrip"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n parse_version(pa.__version__) < parse_version(\"4.0.0\"),\n reason=(\"PyArrow>=4.0.0 required for ORC write support.\"),\n)\n@pytest.mark.parametrize(\"index\", [None, \"i32\"])\n@pytest.mark.parametrize(\"columns\", [None, [\"i32\", \"i64\", \"f\"]])\ndef test_orc_roundtrip(tmpdir, index, columns):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n if index:\n data.set_index(index, inplace=True)\n df = dd.from_pandas(data, chunksize=500)\n if columns:\n data = data[[c for c in columns if c != index]]\n\n # Write\n df.to_orc(tmp, write_index=bool(index))\n\n # Read\n df2 = dd.read_orc(tmp, index=index, columns=columns)\n assert_eq(data, df2, check_index=bool(index))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_roundtrip_aggregate_files_test_orc_roundtrip_aggregate_files.assert_eq_data_df2_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_roundtrip_aggregate_files_test_orc_roundtrip_aggregate_files.assert_eq_data_df2_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_orc.py", "file_name": "test_orc.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 134, "span_ids": ["test_orc_roundtrip_aggregate_files"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n parse_version(pa.__version__) < parse_version(\"4.0.0\"),\n reason=(\"PyArrow>=4.0.0 required for ORC write support.\"),\n)\n@pytest.mark.parametrize(\"split_stripes\", [True, False, 2, 4])\ndef test_orc_roundtrip_aggregate_files(tmpdir, split_stripes):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\n \"a\": np.arange(100, dtype=np.float64),\n \"b\": np.random.choice([\"cat\", \"dog\", \"mouse\"], size=100),\n }\n )\n df = dd.from_pandas(data, npartitions=8)\n df.to_orc(tmp, write_index=False)\n df2 = dd.read_orc(tmp, split_stripes=split_stripes, aggregate_files=True)\n\n # Check that we have the expected partition count\n # and that the data is correct\n if split_stripes:\n assert df2.npartitions == df.npartitions / int(split_stripes)\n else:\n assert df2.npartitions == df.npartitions\n assert_eq(data, df2, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_aggregate_files_offset_test_orc_names.assert_out__name_startswi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_orc_aggregate_files_offset_test_orc_names.assert_out__name_startswi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_orc.py", "file_name": "test_orc.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 155, "span_ids": ["test_orc_aggregate_files_offset", "test_orc_names"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_orc_aggregate_files_offset(orc_files):\n # Default read should give back 16 partitions. Therefore,\n # specifying split_stripes=11 & aggregate_files=True should\n # produce 2 partitions (with the first being larger than\n # the second)\n df2 = dd.read_orc(orc_files[:2], split_stripes=11, aggregate_files=True)\n assert df2.npartitions == 2\n assert len(df2.partitions[0].index) > len(df2.index) // 2\n\n\n@pytest.mark.skipif(\n parse_version(pa.__version__) < parse_version(\"4.0.0\"),\n reason=(\"PyArrow>=4.0.0 required for ORC write support.\"),\n)\ndef test_orc_names(orc_files, tmp_path):\n df = dd.read_orc(orc_files)\n assert df._name.startswith(\"read-orc\")\n out = df.to_orc(tmp_path, compute=False)\n assert out._name.startswith(\"to-orc\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_to_orc_delayed_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_orc.py_test_to_orc_delayed_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_orc.py", "file_name": "test_orc.py", "file_type": "text/x-python", "category": "test", "start_line": 158, "end_line": 175, "span_ids": ["test_to_orc_delayed"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n parse_version(pa.__version__) < parse_version(\"4.0.0\"),\n reason=(\"PyArrow>=4.0.0 required for ORC write support.\"),\n)\ndef test_to_orc_delayed(tmp_path):\n # See: https://github.com/dask/dask/issues/8022\n df = pd.DataFrame(np.random.randn(100, 4), columns=[\"a\", \"b\", \"c\", \"d\"])\n ddf = dd.from_pandas(df, npartitions=4)\n\n eager_path = os.path.join(tmp_path, \"eager_orc_dataset\")\n ddf.to_orc(eager_path)\n assert len(glob.glob(os.path.join(eager_path, \"*\"))) == 4\n\n delayed_path = os.path.join(tmp_path, \"delayed_orc_dataset\")\n dataset = ddf.to_orc(delayed_path, compute=False)\n dataset.compute()\n assert len(glob.glob(os.path.join(delayed_path, \"*\"))) == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_getengine_test_pyarrow_getengine.if_SKIP_PYARROW_LE_.with_pytest_warns_FutureW.get_engine_pyarrow_legac": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_getengine_test_pyarrow_getengine.if_SKIP_PYARROW_LE_.with_pytest_warns_FutureW.get_engine_pyarrow_legac", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 176, "end_line": 188, "span_ids": ["test_pyarrow_getengine"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_pyarrow_getengine():\n from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine\n from dask.dataframe.io.parquet.core import get_engine\n\n # Check that the default engine for \"pyarrow\"/\"arrow\"\n # is the `pyarrow.dataset`-based engine\n assert get_engine(\"pyarrow\") == ArrowDatasetEngine\n assert get_engine(\"arrow\") == ArrowDatasetEngine\n\n if SKIP_PYARROW_LE:\n with pytest.warns(FutureWarning):\n get_engine(\"pyarrow-legacy\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_test_gather_statistics_false.assert_eq_ddf_ddf2_chec": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_read_glob_test_gather_statistics_false.assert_eq_ddf_ddf2_chec", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 272, "end_line": 301, "span_ids": ["test_read_glob", "test_gather_statistics_false"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_read_glob(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, engine=write_engine)\n if os.path.exists(os.path.join(tmp_path, \"_metadata\")):\n os.unlink(os.path.join(tmp_path, \"_metadata\"))\n files = os.listdir(tmp_path)\n assert \"_metadata\" not in files\n\n ddf2 = dd.read_parquet(\n os.path.join(tmp_path, \"*.parquet\"),\n engine=read_engine,\n index=\"myindex\", # Must specify index without _metadata\n gather_statistics=True,\n )\n assert_eq(ddf, ddf2)\n\n\n@write_read_engines()\ndef test_gather_statistics_false(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n ddf.to_parquet(tmp_path, write_index=False, engine=write_engine)\n\n ddf2 = dd.read_parquet(\n tmp_path,\n engine=read_engine,\n index=False,\n gather_statistics=False,\n )\n assert_eq(ddf, ddf2, check_index=False, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_test_partition_on.for_val_in_df_a2_unique_.assert_set_df_d_df_a2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partition_on_test_partition_on.for_val_in_df_a2_unique_.assert_set_df_d_df_a2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1255, "end_line": 1279, "span_ids": ["test_partition_on"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_partition_on(tmpdir, engine):\n tmpdir = str(tmpdir)\n df = pd.DataFrame(\n {\n \"a1\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"a2\": np.random.choice([\"X\", \"Y\", \"Z\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n \"d\": np.arange(0, 100),\n }\n )\n d = dd.from_pandas(df, npartitions=2)\n d.to_parquet(tmpdir, partition_on=[\"a1\", \"a2\"], engine=engine)\n # Note #1: Cross-engine functionality is missing\n # Note #2: The index is not preserved in pyarrow when partition_on is used\n out = dd.read_parquet(\n tmpdir, engine=engine, index=False, gather_statistics=False\n ).compute()\n for val in df.a1.unique():\n assert set(df.d[df.a1 == val]) == set(out.d[out.a1 == val])\n\n # Now specify the columns and allow auto-index detection\n out = dd.read_parquet(tmpdir, engine=engine, columns=[\"d\", \"a2\"]).compute()\n for val in df.a2.unique():\n assert set(df.d[df.a2 == val]) == set(out.d[out.a2 == val])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_test_filters.assert_f_y_c_all_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_test_filters.assert_f_y_c_all_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1356, "end_line": 1399, "span_ids": ["test_filters"], "tokens": 452}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_filters(tmpdir, write_engine, read_engine):\n tmp_path = str(tmpdir)\n df = pd.DataFrame({\"x\": range(10), \"y\": list(\"aabbccddee\")})\n ddf = dd.from_pandas(df, npartitions=5)\n assert ddf.npartitions == 5\n\n ddf.to_parquet(tmp_path, engine=write_engine)\n\n a = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"x\", \">\", 4)])\n assert a.npartitions == 3\n assert (a.x > 3).all().compute()\n\n b = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"y\", \"==\", \"c\")])\n assert b.npartitions == 1\n assert (b.y == \"c\").all().compute()\n\n c = dd.read_parquet(\n tmp_path, engine=read_engine, filters=[(\"y\", \"==\", \"c\"), (\"x\", \">\", 6)]\n )\n assert c.npartitions <= 1\n assert not len(c)\n assert_eq(c, c)\n\n d = dd.read_parquet(\n tmp_path,\n engine=read_engine,\n filters=[\n # Select two overlapping ranges\n [(\"x\", \">\", 1), (\"x\", \"<\", 6)],\n [(\"x\", \">\", 3), (\"x\", \"<\", 8)],\n ],\n )\n assert d.npartitions == 3\n assert ((d.x > 1) & (d.x < 8)).all().compute()\n\n e = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"x\", \"in\", (0, 9))])\n assert e.npartitions == 2\n assert ((e.x < 2) | (e.x > 7)).all().compute()\n\n f = dd.read_parquet(tmp_path, engine=read_engine, filters=[(\"y\", \"=\", \"c\")])\n assert f.npartitions == 1\n assert len(f)\n assert (f.y == \"c\").all().compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_v0_test_filters_v0.assert_eq_ddf2_ddf3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_filters_v0_test_filters_v0.assert_eq_ddf2_ddf3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1402, "end_line": 1455, "span_ids": ["test_filters_v0"], "tokens": 639}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_filters_v0(tmpdir, write_engine, read_engine):\n if write_engine == \"fastparquet\" or read_engine == \"fastparquet\":\n pytest.importorskip(\"fastparquet\", minversion=\"0.3.1\")\n\n # Recent versions of pyarrow support full row-wise filtering\n # (fastparquet and older pyarrow versions do not)\n pyarrow_row_filtering = read_engine == \"pyarrow-dataset\"\n\n fn = str(tmpdir)\n df = pd.DataFrame({\"at\": [\"ab\", \"aa\", \"ba\", \"da\", \"bb\"]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n # Ok with 1 partition and filters\n ddf.repartition(npartitions=1, force=True).to_parquet(\n fn, write_index=False, engine=write_engine\n )\n ddf2 = dd.read_parquet(\n fn, index=False, engine=read_engine, filters=[(\"at\", \"==\", \"aa\")]\n ).compute()\n ddf3 = dd.read_parquet(\n fn, index=False, engine=read_engine, filters=[(\"at\", \"=\", \"aa\")]\n ).compute()\n if pyarrow_row_filtering:\n assert_eq(ddf2, ddf[ddf[\"at\"] == \"aa\"], check_index=False)\n assert_eq(ddf3, ddf[ddf[\"at\"] == \"aa\"], check_index=False)\n else:\n assert_eq(ddf2, ddf)\n assert_eq(ddf3, ddf)\n\n # with >1 partition and no filters\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(fn, engine=read_engine).compute()\n assert_eq(ddf2, ddf)\n\n # with >1 partition and filters using base fastparquet\n if read_engine == \"fastparquet\":\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[(\"at\", \"==\", \"aa\")])\n df3 = fastparquet.ParquetFile(fn).to_pandas(filters=[(\"at\", \"=\", \"aa\")])\n assert len(df2) > 0\n assert len(df3) > 0\n\n # with >1 partition and filters\n ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)\n ddf2 = dd.read_parquet(\n fn, engine=read_engine, filters=[(\"at\", \"==\", \"aa\")]\n ).compute()\n ddf3 = dd.read_parquet(\n fn, engine=read_engine, filters=[(\"at\", \"=\", \"aa\")]\n ).compute()\n assert len(ddf2) > 0\n assert len(ddf3) > 0\n assert_eq(ddf2, ddf3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_filter_divisions_test_pyarrow_filter_divisions.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_filter_divisions_test_pyarrow_filter_divisions.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 1507, "end_line": 1541, "span_ids": ["test_pyarrow_filter_divisions"], "tokens": 350}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pyarrow_filter_divisions(tmpdir):\n pytest.importorskip(\"pyarrow\")\n\n # Write simple dataset with an index that will only\n # have a sorted index if certain row-groups are filtered out.\n # In this case, we filter \"a\" <= 3 to get a sorted\n # index. Otherwise, \"a\" is NOT monotonically increasing.\n df = pd.DataFrame({\"a\": [0, 1, 10, 12, 2, 3, 8, 9], \"b\": range(8)}).set_index(\"a\")\n df.iloc[:4].to_parquet(\n str(tmpdir.join(\"file.0.parquet\")), engine=\"pyarrow\", row_group_size=2\n )\n df.iloc[4:].to_parquet(\n str(tmpdir.join(\"file.1.parquet\")), engine=\"pyarrow\", row_group_size=2\n )\n\n # Only works for ArrowDatasetEngine.\n # Legacy code will not apply filters on individual row-groups\n # when `split_row_groups=False`.\n ddf = dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-dataset\",\n split_row_groups=False,\n gather_statistics=True,\n filters=[(\"a\", \"<=\", 3)],\n )\n assert ddf.divisions == (0, 2, 3)\n\n ddf = dd.read_parquet(\n str(tmpdir),\n engine=\"pyarrow-dataset\",\n split_row_groups=True,\n gather_statistics=True,\n filters=[(\"a\", \"<=\", 3)],\n )\n assert ddf.divisions == (0, 2, 3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_test_timeseries_nulls_in_schema.assert_eq_ddf_read_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_timeseries_nulls_in_schema_test_timeseries_nulls_in_schema.assert_eq_ddf_read_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2324, "end_line": 2343, "span_ids": ["test_timeseries_nulls_in_schema"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"schema\", [\"infer\", None])\ndef test_timeseries_nulls_in_schema(tmpdir, engine, schema):\n # GH#5608: relative path failing _metadata/_common_metadata detection.\n tmp_path = str(tmpdir.mkdir(\"files\"))\n tmp_path = os.path.join(tmp_path, \"../\", \"files\")\n\n ddf2 = (\n dask.datasets.timeseries(start=\"2000-01-01\", end=\"2000-01-03\", freq=\"1h\")\n .reset_index()\n .map_partitions(lambda x: x.loc[:5])\n )\n ddf2 = ddf2.set_index(\"x\").reset_index().persist()\n ddf2.name = ddf2.name.where(ddf2.timestamp == \"2000-01-01\", None)\n\n # Note: `append_row_groups` will fail with pyarrow>0.17.1 for _metadata write\n dataset = {\"validate_schema\": False} if engine == \"pyarrow-legacy\" else {}\n ddf2.to_parquet(tmp_path, engine=engine, write_metadata_file=False, schema=schema)\n ddf_read = dd.read_parquet(tmp_path, engine=engine, dataset=dataset)\n\n assert_eq(ddf_read, ddf2, check_divisions=False, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_int_aggregate_files_test_split_row_groups_int_aggregate_files.assert_eq_df_ddf2_check": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_split_row_groups_int_aggregate_files_test_split_row_groups_int_aggregate_files.assert_eq_df_ddf2_check", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2639, "end_line": 2669, "span_ids": ["test_split_row_groups_int_aggregate_files"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\"split_row_groups\", [8, 25])\ndef test_split_row_groups_int_aggregate_files(tmpdir, engine, split_row_groups):\n # Use pyarrow to write a multi-file dataset with\n # multiple row-groups per file\n row_group_size = 10\n size = 800\n df = pd.DataFrame(\n {\n \"i32\": np.arange(size, dtype=np.int32),\n \"f\": np.arange(size, dtype=np.float64),\n }\n )\n dd.from_pandas(df, npartitions=4).to_parquet(\n str(tmpdir), engine=\"pyarrow\", row_group_size=row_group_size, write_index=False\n )\n\n # Read back with both `split_row_groups>1` and\n # `aggregate_files=True`\n ddf2 = dd.read_parquet(\n str(tmpdir),\n engine=engine,\n split_row_groups=split_row_groups,\n aggregate_files=True,\n )\n\n # Check that we are aggregating files as expected\n npartitions_expected = math.ceil((size / row_group_size) / split_row_groups)\n assert ddf2.npartitions == npartitions_expected\n assert len(ddf2) == size\n assert_eq(df, ddf2, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_empty_test_chunksize_empty.assert_eq_ddf1_ddf2_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_empty_test_chunksize_empty.assert_eq_ddf1_ddf2_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2741, "end_line": 2747, "span_ids": ["test_chunksize_empty"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\ndef test_chunksize_empty(tmpdir, write_engine, read_engine):\n df = pd.DataFrame({\"a\": pd.Series(dtype=\"int\"), \"b\": pd.Series(dtype=\"float\")})\n ddf1 = dd.from_pandas(df, npartitions=1)\n ddf1.to_parquet(tmpdir, engine=write_engine)\n ddf2 = dd.read_parquet(tmpdir, engine=read_engine, chunksize=\"1MiB\")\n assert_eq(ddf1, ddf2, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_files_test_chunksize_files.if_partition_on_.else_.assert_eq_ddf1_ddf2_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_files_test_chunksize_files.if_partition_on_.else_.assert_eq_ddf1_ddf2_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2750, "end_line": 2802, "span_ids": ["test_chunksize_files"], "tokens": 450}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\"metadata\", [True, False])\n@pytest.mark.parametrize(\"partition_on\", [None, \"a\"])\n@pytest.mark.parametrize(\"chunksize\", [4096, \"1MiB\"])\n@write_read_engines()\ndef test_chunksize_files(\n tmpdir, chunksize, partition_on, write_engine, read_engine, metadata\n):\n\n if partition_on and read_engine == \"fastparquet\" and not metadata:\n pytest.skip(\"Fastparquet requires _metadata for partitioned data.\")\n\n df_size = 100\n df1 = pd.DataFrame(\n {\n \"a\": np.random.choice([\"apple\", \"banana\", \"carrot\"], size=df_size),\n \"b\": np.random.random(size=df_size),\n \"c\": np.random.randint(1, 5, size=df_size),\n }\n )\n ddf1 = dd.from_pandas(df1, npartitions=9)\n\n ddf1.to_parquet(\n str(tmpdir),\n engine=write_engine,\n partition_on=partition_on,\n write_metadata_file=metadata,\n write_index=False,\n )\n\n ddf2 = dd.read_parquet(\n str(tmpdir),\n engine=read_engine,\n chunksize=chunksize,\n aggregate_files=partition_on if partition_on else True,\n )\n\n # Check that files where aggregated as expected\n if chunksize == 4096:\n assert ddf2.npartitions < ddf1.npartitions\n elif chunksize == \"1MiB\":\n if partition_on:\n assert ddf2.npartitions == 3\n else:\n assert ddf2.npartitions == 1\n\n # Check that the final data is correct\n if partition_on:\n df2 = ddf2.compute().sort_values([\"b\", \"c\"])\n df1 = df1.sort_values([\"b\", \"c\"])\n assert_eq(df1[[\"b\", \"c\"]], df2[[\"b\", \"c\"]], check_index=False)\n else:\n assert_eq(ddf1, ddf2, check_divisions=False, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_aggregate_files_test_chunksize_aggregate_files.assert_eq_df1_c_d_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_aggregate_files_test_chunksize_aggregate_files.assert_eq_df1_c_d_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2805, "end_line": 2844, "span_ids": ["test_chunksize_aggregate_files"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@write_read_engines()\n@pytest.mark.parametrize(\"aggregate_files\", [\"a\", \"b\"])\ndef test_chunksize_aggregate_files(tmpdir, write_engine, read_engine, aggregate_files):\n\n chunksize = \"1MiB\"\n partition_on = [\"a\", \"b\"]\n df_size = 100\n df1 = pd.DataFrame(\n {\n \"a\": np.random.choice([\"apple\", \"banana\", \"carrot\"], size=df_size),\n \"b\": np.random.choice([\"small\", \"large\"], size=df_size),\n \"c\": np.random.random(size=df_size),\n \"d\": np.random.randint(1, 100, size=df_size),\n }\n )\n ddf1 = dd.from_pandas(df1, npartitions=9)\n\n ddf1.to_parquet(\n str(tmpdir),\n engine=write_engine,\n partition_on=partition_on,\n write_index=False,\n )\n ddf2 = dd.read_parquet(\n str(tmpdir),\n engine=read_engine,\n chunksize=chunksize,\n aggregate_files=aggregate_files,\n )\n\n # Check that files where aggregated as expected\n if aggregate_files == \"a\":\n assert ddf2.npartitions == 3\n elif aggregate_files == \"b\":\n assert ddf2.npartitions == 6\n\n # Check that the final data is correct\n df2 = ddf2.compute().sort_values([\"c\", \"d\"])\n df1 = df1.sort_values([\"c\", \"d\"])\n assert_eq(df1[[\"c\", \"d\"]], df2[[\"c\", \"d\"]], check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_test_chunksize.if_not_chunksize_.else_.if_chunksize_1MiB_.assert_ddf2_npartitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_chunksize_test_chunksize.if_not_chunksize_.else_.if_chunksize_1MiB_.assert_ddf2_npartitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2847, "end_line": 2900, "span_ids": ["test_chunksize"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"metadata\", [True, False])\n@pytest.mark.parametrize(\"chunksize\", [None, 1024, 4096, \"1MiB\"])\ndef test_chunksize(tmpdir, chunksize, engine, metadata):\n nparts = 2\n df_size = 100\n row_group_size = 5\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"apple\", \"banana\", \"carrot\"], size=df_size),\n \"b\": np.random.random(size=df_size),\n \"c\": np.random.randint(1, 5, size=df_size),\n \"index\": np.arange(0, df_size),\n }\n ).set_index(\"index\")\n\n ddf1 = dd.from_pandas(df, npartitions=nparts)\n ddf1.to_parquet(\n str(tmpdir),\n engine=\"pyarrow\",\n row_group_size=row_group_size,\n write_metadata_file=metadata,\n )\n\n if metadata:\n path = str(tmpdir)\n else:\n dirname = str(tmpdir)\n files = os.listdir(dirname)\n assert \"_metadata\" not in files\n path = os.path.join(dirname, \"*.parquet\")\n\n ddf2 = dd.read_parquet(\n path,\n engine=engine,\n chunksize=chunksize,\n split_row_groups=True,\n gather_statistics=True,\n index=\"index\",\n aggregate_files=True,\n )\n\n assert_eq(ddf1, ddf2, check_divisions=False)\n\n num_row_groups = df_size // row_group_size\n if not chunksize:\n assert ddf2.npartitions == num_row_groups\n else:\n # Check that we are really aggregating\n assert ddf2.npartitions < num_row_groups\n if chunksize == \"1MiB\":\n # Largest chunksize will result in\n # a single output partition\n assert ddf2.npartitions == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow_test_pandas_timestamp_overflow_pyarrow.if_pa_version_parse_ve.else_.from_dask_dataframe_io_pa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pandas_timestamp_overflow_pyarrow_test_pandas_timestamp_overflow_pyarrow.if_pa_version_parse_ve.else_.from_dask_dataframe_io_pa", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3004, "end_line": 3025, "span_ids": ["test_pandas_timestamp_overflow_pyarrow"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_pandas_timestamp_overflow_pyarrow(tmpdir):\n info = np.iinfo(np.dtype(\"int64\"))\n arr_numeric = np.linspace(\n start=info.min + 2, stop=info.max, num=1024, dtype=\"int64\"\n )\n arr_dates = arr_numeric.astype(\"datetime64[ms]\")\n\n table = pa.Table.from_arrays([pa.array(arr_dates)], names=[\"ts\"])\n pa.parquet.write_table(\n table, f\"{tmpdir}/file.parquet\", use_deprecated_int96_timestamps=False\n )\n\n # This will raise by default due to overflow\n with pytest.raises(pa.lib.ArrowInvalid) as e:\n dd.read_parquet(str(tmpdir), engine=\"pyarrow\").compute()\n assert \"out of bounds\" in str(e.value)\n\n if pa_version >= parse_version(\"5.0.0\"):\n from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine as ArrowEngine\n else:\n from dask.dataframe.io.parquet.arrow import ArrowEngine\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_no_pandas_metadata_test_partitioned_no_pandas_metadata.assert_eq_result_list_exp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_partitioned_no_pandas_metadata_test_partitioned_no_pandas_metadata.assert_eq_result_list_exp", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3113, "end_line": 3147, "span_ids": ["test_partitioned_no_pandas_metadata"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.parametrize(\n \"write_cols\",\n [[\"col\"], [\"part\", \"col\"]],\n)\ndef test_partitioned_no_pandas_metadata(tmpdir, engine, write_cols):\n # See: https://github.com/dask/dask/issues/8087\n\n # Manually construct directory-partitioned dataset\n path1 = tmpdir.mkdir(\"part=a\")\n path2 = tmpdir.mkdir(\"part=b\")\n path1 = os.path.join(path1, \"data.parquet\")\n path2 = os.path.join(path2, \"data.parquet\")\n\n # Write partitions without parquet metadata.\n # Note that we always use pyarrow to do this\n # (regardless of the `engine`)\n _df1 = pd.DataFrame({\"part\": \"a\", \"col\": range(5)})\n _df2 = pd.DataFrame({\"part\": \"b\", \"col\": range(5)})\n t1 = pa.Table.from_pandas(\n _df1[write_cols],\n preserve_index=False,\n ).replace_schema_metadata(metadata={})\n pq.write_table(t1, path1)\n t2 = pa.Table.from_pandas(\n _df2[write_cols],\n preserve_index=False,\n ).replace_schema_metadata(metadata={})\n pq.write_table(t2, path2)\n\n # Check results\n expect = pd.concat([_df1, _df2], ignore_index=True)\n result = dd.read_parquet(str(tmpdir), engine=engine)\n result[\"part\"] = result[\"part\"].astype(\"object\")\n assert_eq(result[list(expect.columns)], expect, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_read_from_paths_test_pyarrow_dataset_read_from_paths.assert_eq_ddf_ddf_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_pyarrow_dataset_read_from_paths_test_pyarrow_dataset_read_from_paths.assert_eq_ddf_ddf_b_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3284, "end_line": 3307, "span_ids": ["test_pyarrow_dataset_read_from_paths"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\ndef test_pyarrow_dataset_read_from_paths(tmpdir):\n fn = str(tmpdir)\n df = pd.DataFrame({\"a\": [4, 5, 6], \"b\": [\"a\", \"b\", \"b\"]})\n df[\"b\"] = df[\"b\"].astype(\"category\")\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(fn, engine=\"pyarrow\", partition_on=\"b\")\n\n with pytest.warns(FutureWarning):\n read_df_1 = dd.read_parquet(\n fn,\n engine=\"pyarrow\",\n filters=[(\"b\", \"==\", \"a\")],\n read_from_paths=False,\n )\n\n read_df_2 = dd.read_parquet(\n fn,\n engine=\"pyarrow\",\n filters=[(\"b\", \"==\", \"a\")],\n )\n\n assert_eq(read_df_1, read_df_2)\n assert_eq(ddf[ddf[\"b\"] == \"a\"].compute(), read_df_2.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_metadata_test_custom_metadata.assert_User_defined_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_metadata_test_custom_metadata.assert_User_defined_key_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3619, "end_line": 3657, "span_ids": ["test_custom_metadata"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_custom_metadata(tmpdir, engine):\n # Write a parquet dataset with custom metadata\n\n # Define custom metadata\n custom_metadata = {b\"my_key\": b\"my_data\"}\n\n # Write parquet dataset\n path = str(tmpdir)\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10)})\n dd.from_pandas(df, npartitions=2).to_parquet(\n path,\n engine=engine,\n custom_metadata=custom_metadata,\n )\n\n # Check that data is correct\n assert_eq(df, dd.read_parquet(path, engine=engine))\n\n # Require pyarrow.parquet to check key/value metadata\n if pq:\n # Read footer metadata and _metadata.\n # Check that it contains keys/values from `custom_metadata`\n files = glob.glob(os.path.join(path, \"*.parquet\"))\n files += [os.path.join(path, \"_metadata\")]\n for fn in files:\n _md = pq.ParquetFile(fn).metadata.metadata\n for k, v in custom_metadata.items():\n assert _md[k] == custom_metadata[k]\n\n # Make sure we raise an error if the custom metadata\n # includes a b\"pandas\" key\n custom_metadata = {b\"pandas\": b\"my_new_pandas_md\"}\n with pytest.raises(ValueError) as e:\n dd.from_pandas(df, npartitions=2).to_parquet(\n path,\n engine=engine,\n custom_metadata=custom_metadata,\n )\n assert \"User-defined key/value\" in str(e.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_ignore_metadata_file_test_ignore_metadata_file.if_engine_pyarrow_leg.else_.with_pytest_raises_ValueE.dd_read_parquet_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_ignore_metadata_file_test_ignore_metadata_file.if_engine_pyarrow_leg.else_.with_pytest_raises_ValueE.dd_read_parquet_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3660, "end_line": 3706, "span_ids": ["test_ignore_metadata_file"], "tokens": 426}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"gather_statistics\", [True, False, None])\ndef test_ignore_metadata_file(tmpdir, engine, gather_statistics):\n tmpdir = str(tmpdir)\n dataset_with_bad_metadata = os.path.join(tmpdir, \"data1\")\n dataset_without_metadata = os.path.join(tmpdir, \"data2\")\n\n # Write two identical datasets without any _metadata file\n df1 = pd.DataFrame({\"a\": range(100), \"b\": [\"dog\", \"cat\"] * 50})\n ddf1 = dd.from_pandas(df1, npartitions=2)\n ddf1.to_parquet(\n path=dataset_with_bad_metadata, engine=engine, write_metadata_file=False\n )\n ddf1.to_parquet(\n path=dataset_without_metadata, engine=engine, write_metadata_file=False\n )\n\n # Copy \"bad\" metadata into `dataset_with_bad_metadata`\n assert \"_metadata\" not in os.listdir(dataset_with_bad_metadata)\n with open(os.path.join(dataset_with_bad_metadata, \"_metadata\"), \"w\") as f:\n f.write(\"INVALID METADATA\")\n assert \"_metadata\" in os.listdir(dataset_with_bad_metadata)\n assert \"_metadata\" not in os.listdir(dataset_without_metadata)\n\n # Read back the datasets with `ignore_metadata_file=True`, and\n # test that the results are the same\n if engine != \"pyarrow-legacy\":\n ddf2a = dd.read_parquet(\n dataset_with_bad_metadata,\n engine=engine,\n ignore_metadata_file=True,\n gather_statistics=gather_statistics,\n )\n ddf2b = dd.read_parquet(\n dataset_without_metadata,\n engine=engine,\n ignore_metadata_file=True,\n gather_statistics=gather_statistics,\n )\n assert_eq(ddf2a, ddf2b)\n else:\n # Check that \"pyarrow-legacy\" raises a ValueError\n with pytest.raises(ValueError):\n dd.read_parquet(\n dataset_with_bad_metadata,\n engine=engine,\n ignore_metadata_file=True,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_metadata_task_size_test_metadata_task_size.if_engine_pyarrow_leg.else_.with_pytest_raises_ValueE.dd_read_parquet_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_metadata_task_size_test_metadata_task_size.if_engine_pyarrow_leg.else_.with_pytest_raises_ValueE.dd_read_parquet_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3709, "end_line": 3754, "span_ids": ["test_metadata_task_size"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"write_metadata_file\", [True, False])\n@pytest.mark.parametrize(\"metadata_task_size\", [2, 0])\ndef test_metadata_task_size(tmpdir, engine, write_metadata_file, metadata_task_size):\n\n # Write simple dataset\n tmpdir = str(tmpdir)\n df1 = pd.DataFrame({\"a\": range(100), \"b\": [\"dog\", \"cat\"] * 50})\n ddf1 = dd.from_pandas(df1, npartitions=10)\n ddf1.to_parquet(\n path=str(tmpdir), engine=engine, write_metadata_file=write_metadata_file\n )\n\n # Read back\n if engine != \"pyarrow-legacy\" or not metadata_task_size:\n ddf2a = dd.read_parquet(\n str(tmpdir),\n engine=engine,\n gather_statistics=True,\n )\n ddf2b = dd.read_parquet(\n str(tmpdir),\n engine=engine,\n gather_statistics=True,\n metadata_task_size=metadata_task_size,\n )\n assert_eq(ddf2a, ddf2b)\n\n with dask.config.set(\n {\"dataframe.parquet.metadata-task-size-local\": metadata_task_size}\n ):\n ddf2c = dd.read_parquet(\n str(tmpdir),\n engine=engine,\n gather_statistics=True,\n )\n assert_eq(ddf2b, ddf2c)\n\n else:\n # Check that other engines raise a ValueError\n with pytest.raises(ValueError):\n dd.read_parquet(\n str(tmpdir),\n engine=engine,\n gather_statistics=True,\n metadata_task_size=metadata_task_size,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_extra_file_test_extra_file.if_engine_pyarrow_leg.with_pytest_raises_ValueE.dd_read_parquet_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_extra_file_test_extra_file.if_engine_pyarrow_leg.with_pytest_raises_ValueE.dd_read_parquet_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3757, "end_line": 3792, "span_ids": ["test_extra_file"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_extra_file(tmpdir, engine):\n # Check that read_parquet can handle spark output\n # See: https://github.com/dask/dask/issues/8087\n tmpdir = str(tmpdir)\n df = pd.DataFrame({\"a\": range(100), \"b\": [\"dog\", \"cat\"] * 50})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(tmpdir, engine=engine)\n open(os.path.join(tmpdir, \"_SUCCESS\"), \"w\").close()\n open(os.path.join(tmpdir, \"part.0.parquet.crc\"), \"w\").close()\n os.remove(os.path.join(tmpdir, \"_metadata\"))\n out = dd.read_parquet(tmpdir, engine=engine)\n assert_eq(out, df)\n\n if engine != \"pyarrow-legacy\":\n # For \"fastparquet\" and \"pyarrow-dataset\", we can pass the\n # expected file extension, or avoid checking file extensions\n # by passing False. Check here that this works:\n\n # Should Work\n out = dd.read_parquet(\n tmpdir, engine=engine, dataset={\"require_extension\": \".parquet\"}\n )\n assert_eq(out, df)\n\n # Should Fail (for not capturing the _SUCCESS and crc files)\n with pytest.raises((OSError, pa.lib.ArrowInvalid)):\n dd.read_parquet(\n tmpdir, engine=engine, dataset={\"require_extension\": False}\n ).compute()\n\n # Should Fail (for filtering out all files)\n # (Related to: https://github.com/dask/dask/issues/8349)\n with pytest.raises(ValueError):\n dd.read_parquet(\n tmpdir, engine=engine, dataset={\"require_extension\": \".foo\"}\n ).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_unsupported_extension_file_test_unsupported_extension_file.assert_eq_df0_dd_read_pa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_unsupported_extension_file_test_unsupported_extension_file.assert_eq_df0_dd_read_pa", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3795, "end_line": 3802, "span_ids": ["test_unsupported_extension_file"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unsupported_extension_file(tmpdir, engine):\n # File extension shouldn't matter when we are only\n # reading a single file.\n # (See: https://github.com/dask/dask/issues/8349)\n fn = os.path.join(str(tmpdir), \"multi.foo\")\n df0 = pd.DataFrame({\"a\": range(10)})\n df0.to_parquet(fn, engine=engine.split(\"-\")[0])\n assert_eq(df0, dd.read_parquet(fn, engine=engine, index=False))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_unsupported_extension_dir_test_unsupported_extension_dir.assert_eq_ddf0_dd_read_p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_unsupported_extension_dir_test_unsupported_extension_dir.assert_eq_ddf0_dd_read_p", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3805, "end_line": 3812, "span_ids": ["test_unsupported_extension_dir"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unsupported_extension_dir(tmpdir, engine):\n # File extensions shouldn't matter when we have\n # a _metadata file\n # (Related to: https://github.com/dask/dask/issues/8349)\n path = str(tmpdir)\n ddf0 = dd.from_pandas(pd.DataFrame({\"a\": range(10)}), 1)\n ddf0.to_parquet(path, engine=engine, name_function=lambda i: f\"part.{i}.foo\")\n assert_eq(ddf0, dd.read_parquet(path, engine=engine))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_filename_test_custom_filename.assert_eq_df_dd_read_par": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_filename_test_custom_filename.assert_eq_df_dd_read_par", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3815, "end_line": 3828, "span_ids": ["test_custom_filename"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_custom_filename(tmpdir, engine):\n fn = str(tmpdir)\n pdf = pd.DataFrame(\n {\"num1\": [1, 2, 3, 4], \"num2\": [7, 8, 9, 10]},\n )\n df = dd.from_pandas(pdf, npartitions=2)\n df.to_parquet(fn, name_function=lambda x: f\"hi-{x}.parquet\", engine=engine)\n\n files = os.listdir(fn)\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"hi-0.parquet\" in files\n assert \"hi-1.parquet\" in files\n assert_eq(df, dd.read_parquet(fn, engine=engine))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_filename_works_with_pyarrow_when_append_is_true_test_custom_filename_works_with_pyarrow_when_append_is_true.assert_eq_actual_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_filename_works_with_pyarrow_when_append_is_true_test_custom_filename_works_with_pyarrow_when_append_is_true.assert_eq_actual_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3831, "end_line": 3866, "span_ids": ["test_custom_filename_works_with_pyarrow_when_append_is_true"], "tokens": 413}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_custom_filename_works_with_pyarrow_when_append_is_true(tmpdir, engine):\n fn = str(tmpdir)\n pdf = pd.DataFrame(\n {\"num1\": [1, 2, 3, 4], \"num2\": [7, 8, 9, 10]},\n )\n df = dd.from_pandas(pdf, npartitions=2)\n df.to_parquet(fn, name_function=lambda x: f\"hi-{x * 2}.parquet\", engine=engine)\n\n pdf = pd.DataFrame(\n {\"num1\": [33], \"num2\": [44]},\n )\n df = dd.from_pandas(pdf, npartitions=1)\n if engine == \"fastparquet\":\n pytest.xfail(\n \"fastparquet errors our with IndexError when ``name_function`` is customized \"\n \"and append is set to True. We didn't do a detailed investigation for expediency. \"\n \"See this comment for the conversation: https://github.com/dask/dask/pull/7682#issuecomment-845243623\"\n )\n df.to_parquet(\n fn,\n name_function=lambda x: f\"hi-{x * 2}.parquet\",\n engine=engine,\n append=True,\n ignore_divisions=True,\n )\n files = os.listdir(fn)\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"hi-0.parquet\" in files\n assert \"hi-2.parquet\" in files\n assert \"hi-4.parquet\" in files\n expected_pdf = pd.DataFrame(\n {\"num1\": [1, 2, 3, 4, 33], \"num2\": [7, 8, 9, 10, 44]},\n )\n actual = dd.read_parquet(fn, engine=engine, index=False)\n assert_eq(actual, expected_pdf, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_throws_error_if_custom_filename_is_invalid_test_throws_error_if_custom_filename_is_invalid.None_1.df_to_parquet_fn_name_fu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_throws_error_if_custom_filename_is_invalid_test_throws_error_if_custom_filename_is_invalid.None_1.df_to_parquet_fn_name_fu", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3869, "end_line": 3883, "span_ids": ["test_throws_error_if_custom_filename_is_invalid"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_throws_error_if_custom_filename_is_invalid(tmpdir, engine):\n fn = str(tmpdir)\n pdf = pd.DataFrame(\n {\"num1\": [1, 2, 3, 4], \"num2\": [7, 8, 9, 10]},\n )\n df = dd.from_pandas(pdf, npartitions=2)\n with pytest.raises(\n ValueError, match=\"``name_function`` must be a callable with one argument.\"\n ):\n df.to_parquet(fn, name_function=\"whatever.parquet\", engine=engine)\n\n with pytest.raises(\n ValueError, match=\"``name_function`` must produce unique filenames.\"\n ):\n df.to_parquet(fn, name_function=lambda x: \"whatever.parquet\", engine=engine)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_single_partition_join_single_partition_join.return.joined": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/multi.py_single_partition_join_single_partition_join.return.joined", "embedding": null, "metadata": {"file_path": "dask/dataframe/multi.py", "file_name": "multi.py", "file_type": "text/x-python", "category": "implementation", "start_line": 389, "end_line": 444, "span_ids": ["single_partition_join"], "tokens": 410}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def single_partition_join(left, right, **kwargs):\n # if the merge is performed on_index, divisions can be kept, otherwise the\n # new index will not necessarily correspond with the current divisions\n\n meta = left._meta_nonempty.merge(right._meta_nonempty, **kwargs)\n\n use_left = kwargs.get(\"right_index\") or right._contains_index_name(\n kwargs.get(\"right_on\")\n )\n use_right = kwargs.get(\"left_index\") or left._contains_index_name(\n kwargs.get(\"left_on\")\n )\n\n if len(meta) == 0:\n if use_left:\n meta.index = meta.index.astype(left.index.dtype)\n elif use_right:\n meta.index = meta.index.astype(right.index.dtype)\n else:\n meta.index = meta.index.astype(\"int64\")\n\n kwargs[\"empty_index_dtype\"] = meta.index.dtype\n kwargs[\"categorical_columns\"] = meta.select_dtypes(include=\"category\").columns\n\n if right.npartitions == 1 and kwargs[\"how\"] in allowed_left:\n if use_left:\n divisions = left.divisions\n elif use_right and len(right.divisions) == len(left.divisions):\n divisions = right.divisions\n else:\n divisions = [None for _ in left.divisions]\n\n elif left.npartitions == 1 and kwargs[\"how\"] in allowed_right:\n if use_right:\n divisions = right.divisions\n elif use_left and len(left.divisions) == len(right.divisions):\n divisions = left.divisions\n else:\n divisions = [None for _ in right.divisions]\n else:\n raise NotImplementedError(\n \"single_partition_join has no fallback for invalid calls\"\n )\n\n joined = map_partitions(\n merge_chunk,\n left,\n right,\n meta=meta,\n enforce_metadata=False,\n transform_divisions=False,\n align_dataframes=False,\n **kwargs,\n )\n joined.divisions = tuple(divisions)\n return joined", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py__head_timedelta__tail_timedelta.return.selected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py__head_timedelta__tail_timedelta.return.selected", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 214, "end_line": 248, "span_ids": ["_head_timedelta", "_tail_timedelta"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _head_timedelta(current, next_, after):\n \"\"\"Return rows of ``next_`` whose index is before the last\n observation in ``current`` + ``after``.\n\n Parameters\n ----------\n current : DataFrame\n next_ : DataFrame\n after : timedelta\n\n Returns\n -------\n overlapped : DataFrame\n \"\"\"\n return next_[next_.index < (current.index.max() + after)]\n\n\ndef _tail_timedelta(prevs, current, before):\n \"\"\"Return the concatenated rows of each dataframe in ``prevs`` whose\n index is after the first observation in ``current`` - ``before``.\n\n Parameters\n ----------\n current : DataFrame\n prevs : list of DataFrame objects\n before : timedelta\n\n Returns\n -------\n overlapped : DataFrame\n \"\"\"\n selected = methods.concat(\n [prev[prev.index > (current.index.min() - before)] for prev in prevs]\n )\n return selected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling_Rolling.pandas_rolling_method.return.getattr_rolling_name_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling_Rolling.pandas_rolling_method.return.getattr_rolling_name_a", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 251, "end_line": 296, "span_ids": ["Rolling._rolling_kwargs", "Rolling", "Rolling._has_single_partition", "Rolling.__init__", "Rolling.pandas_rolling_method"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling:\n \"\"\"Provides rolling window calculations.\"\"\"\n\n def __init__(\n self, obj, window=None, min_periods=None, center=False, win_type=None, axis=0\n ):\n self.obj = obj # dataframe or series\n self.window = window\n self.min_periods = min_periods\n self.center = center\n self.axis = axis\n self.win_type = win_type\n # Allow pandas to raise if appropriate\n obj._meta.rolling(**self._rolling_kwargs())\n # Using .rolling(window='2s'), pandas will convert the\n # offset str to a window in nanoseconds. But pandas doesn't\n # accept the integer window with win_type='freq', so we store\n # that information here.\n # See https://github.com/pandas-dev/pandas/issues/15969\n self._win_type = None if isinstance(self.window, int) else \"freq\"\n\n def _rolling_kwargs(self):\n return {\n \"window\": self.window,\n \"min_periods\": self.min_periods,\n \"center\": self.center,\n \"win_type\": self.win_type,\n \"axis\": self.axis,\n }\n\n @property\n def _has_single_partition(self):\n \"\"\"\n Indicator for whether the object has a single partition (True)\n or multiple (False).\n \"\"\"\n return (\n self.axis in (1, \"columns\")\n or (isinstance(self.window, Integral) and self.window <= 1)\n or self.obj.npartitions == 1\n )\n\n @staticmethod\n def pandas_rolling_method(df, rolling_kwargs, name, *args, **kwargs):\n rolling = df.rolling(**rolling_kwargs)\n return getattr(rolling, name)(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.aggregate_Rolling.__repr__.return._Rolling_format_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_Rolling.aggregate_Rolling.__repr__.return._Rolling_format_", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 411, "end_line": 438, "span_ids": ["Rolling:3", "Rolling.aggregate", "Rolling.__repr__"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Rolling:\n\n @derived_from(pd_Rolling)\n def aggregate(self, func, args=(), kwargs={}, **kwds):\n return self._call_method(\"agg\", func, args=args, kwargs=kwargs, **kwds)\n\n agg = aggregate\n\n def __repr__(self):\n def order(item):\n k, v = item\n _order = {\n \"window\": 0,\n \"min_periods\": 1,\n \"center\": 2,\n \"win_type\": 3,\n \"axis\": 4,\n }\n return _order[k]\n\n rolling_kwargs = self._rolling_kwargs()\n rolling_kwargs[\"window\"] = self.window\n rolling_kwargs[\"win_type\"] = self._win_type\n return \"Rolling [{}]\".format(\n \",\".join(\n f\"{k}={v}\"\n for k, v in sorted(rolling_kwargs.items(), key=order)\n if v is not None\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_RollingGroupby_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/rolling.py_RollingGroupby_", "embedding": null, "metadata": {"file_path": "dask/dataframe/rolling.py", "file_name": "rolling.py", "file_type": "text/x-python", "category": "implementation", "start_line": 441, "end_line": 499, "span_ids": ["RollingGroupby.__init__", "RollingGroupby.pandas_rolling_method", "RollingGroupby._call_method", "RollingGroupby"], "tokens": 355}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RollingGroupby(Rolling):\n def __init__(\n self,\n groupby,\n window=None,\n min_periods=None,\n center=False,\n win_type=None,\n axis=0,\n ):\n self._groupby_kwargs = groupby._groupby_kwargs\n self._groupby_slice = groupby._slice\n\n obj = groupby.obj\n if self._groupby_slice is not None:\n if isinstance(self._groupby_slice, str):\n sliced_plus = [self._groupby_slice]\n else:\n sliced_plus = list(self._groupby_slice)\n if isinstance(groupby.by, str):\n sliced_plus.append(groupby.by)\n else:\n sliced_plus.extend(groupby.by)\n obj = obj[sliced_plus]\n\n super().__init__(\n obj,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n axis=axis,\n )\n\n @staticmethod\n def pandas_rolling_method(\n df,\n rolling_kwargs,\n name,\n *args,\n groupby_kwargs=None,\n groupby_slice=None,\n **kwargs,\n ):\n groupby = df.groupby(**groupby_kwargs)\n if groupby_slice:\n groupby = groupby[groupby_slice]\n rolling = groupby.rolling(**rolling_kwargs)\n return getattr(rolling, name)(*args, **kwargs).sort_index(level=-1)\n\n def _call_method(self, method_name, *args, **kwargs):\n return super()._call_method(\n method_name,\n *args,\n groupby_kwargs=self._groupby_kwargs,\n groupby_slice=self._groupby_slice,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes_test_reductions_frame_dtypes.assert_eq_df_numerics_var": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes_test_reductions_frame_dtypes.assert_eq_df_numerics_var", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1156, "end_line": 1218, "span_ids": ["test_reductions_frame_dtypes"], "tokens": 733}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\n \"ignore:Dropping of nuisance columns:FutureWarning\"\n) # https://github.com/dask/dask/issues/7714\ndef test_reductions_frame_dtypes():\n df = pd.DataFrame(\n {\n \"int\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"float\": [1.0, 2.0, 3.0, 4.0, np.nan, 6.0, 7.0, 8.0],\n \"dt\": [pd.NaT] + [datetime(2011, i, 1) for i in range(1, 8)],\n \"str\": list(\"abcdefgh\"),\n \"timedelta\": pd.to_timedelta([1, 2, 3, 4, 5, 6, 7, np.nan]),\n \"bool\": [True, False] * 4,\n }\n )\n\n ddf = dd.from_pandas(df, 3)\n\n # TODO: std and mean do not support timedelta dtype\n df_no_timedelta = df.drop(\"timedelta\", axis=1, inplace=False)\n ddf_no_timedelta = dd.from_pandas(df_no_timedelta, 3)\n\n assert_eq(df.drop(columns=\"dt\").sum(), ddf.drop(columns=\"dt\").sum())\n assert_eq(\n df_no_timedelta.drop(columns=\"dt\").mean(),\n ddf_no_timedelta.drop(columns=\"dt\").mean(),\n )\n\n assert_eq(df.prod(), ddf.prod())\n assert_eq(df.product(), ddf.product())\n assert_eq(df.min(), ddf.min())\n assert_eq(df.max(), ddf.max())\n assert_eq(df.count(), ddf.count())\n assert_eq(df.sem(), ddf.sem())\n assert_eq(df.sem(ddof=0), ddf.sem(ddof=0))\n\n assert_eq(df_no_timedelta.std(), ddf_no_timedelta.std())\n assert_eq(df_no_timedelta.std(skipna=False), ddf_no_timedelta.std(skipna=False))\n assert_eq(df_no_timedelta.std(ddof=0), ddf_no_timedelta.std(ddof=0))\n assert_eq(df_no_timedelta.var(), ddf_no_timedelta.var())\n assert_eq(df_no_timedelta.var(skipna=False), ddf_no_timedelta.var(skipna=False))\n assert_eq(df_no_timedelta.var(ddof=0), ddf_no_timedelta.var(ddof=0))\n assert_eq(\n df_no_timedelta.var(ddof=0, skipna=False),\n ddf_no_timedelta.var(ddof=0, skipna=False),\n )\n\n assert_eq(df._get_numeric_data(), ddf._get_numeric_data())\n\n numerics = ddf[[\"int\", \"float\"]]\n assert numerics._get_numeric_data().dask == numerics.dask\n\n # test var corner cases\n\n # only timedelta\n df_td = df[[\"timedelta\"]]\n ddf_td = dd.from_pandas(df_td, 3)\n assert_eq(df_td.var(ddof=0), ddf_td.var(ddof=0))\n assert_eq(df_td.var(), ddf_td.var())\n\n # only numercis\n df_numerics = df[[\"int\", \"float\", \"bool\"]]\n ddf_numerics = dd.from_pandas(df_numerics, 3)\n assert_eq(df_numerics.var(), ddf_numerics.var())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes_numeric_only_test_reductions_frame_dtypes_numeric_only.None_1.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_reductions_frame_dtypes_numeric_only_test_reductions_frame_dtypes_numeric_only.None_1.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1233, "end_line": 1291, "span_ids": ["test_reductions_frame_dtypes_numeric_only"], "tokens": 601}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_frame_dtypes_numeric_only():\n df = pd.DataFrame(\n {\n \"int\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"float\": [1.0, 2.0, 3.0, 4.0, np.nan, 6.0, 7.0, 8.0],\n \"dt\": [pd.NaT] + [datetime(2011, i, 1) for i in range(1, 8)],\n \"str\": list(\"abcdefgh\"),\n \"timedelta\": pd.to_timedelta([1, 2, 3, 4, 5, 6, 7, np.nan]),\n \"bool\": [True, False] * 4,\n }\n )\n\n ddf = dd.from_pandas(df, 3)\n kwargs = {\"numeric_only\": True}\n funcs = [\n \"sum\",\n \"prod\",\n \"product\",\n \"min\",\n \"max\",\n \"mean\",\n \"var\",\n \"std\",\n \"count\",\n \"sem\",\n ]\n\n for func in funcs:\n assert_eq(\n getattr(df, func)(**kwargs),\n getattr(ddf, func)(**kwargs),\n check_dtype=func in [\"mean\", \"max\"] and PANDAS_GT_120,\n )\n with pytest.raises(NotImplementedError, match=\"'numeric_only=False\"):\n getattr(ddf, func)(numeric_only=False)\n\n assert_eq(df.sem(ddof=0, **kwargs), ddf.sem(ddof=0, **kwargs))\n assert_eq(df.std(ddof=0, **kwargs), ddf.std(ddof=0, **kwargs))\n assert_eq(df.var(ddof=0, **kwargs), ddf.var(ddof=0, **kwargs))\n assert_eq(df.var(skipna=False, **kwargs), ddf.var(skipna=False, **kwargs))\n assert_eq(\n df.var(skipna=False, ddof=0, **kwargs), ddf.var(skipna=False, ddof=0, **kwargs)\n )\n\n # ------ only include numerics columns ------ #\n assert_eq(df._get_numeric_data(), ddf._get_numeric_data())\n\n df_numerics = df[[\"int\", \"float\", \"bool\"]]\n ddf_numerics = ddf[[\"int\", \"float\", \"bool\"]]\n\n assert_eq(df_numerics, ddf._get_numeric_data())\n assert ddf_numerics._get_numeric_data().dask == ddf_numerics.dask\n\n for func in funcs:\n assert_eq(\n getattr(df_numerics, func)(),\n getattr(ddf_numerics, func)(),\n check_dtype=func in [\"mean\", \"max\"] and PANDAS_GT_120,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_warnings_test_scalar_raises.with_pytest_raises_TypeEr.bool_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_warnings_test_scalar_raises.with_pytest_raises_TypeEr.bool_s_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 230, "span_ids": ["test_Index", "test_head_tail", "test_index_head", "test_axes", "test_Dataframe", "imports", "test_Scalar", "test_scalar_raises", "test_dataframe_doc_from_non_pandas", "test_head_npartitions_warn", "test_Series", "test_head_npartitions", "test_series_axes", "test_dataframe_doc"], "tokens": 2086}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\nimport weakref\nimport xml.etree.ElementTree\nfrom itertools import product\nfrom operator import add\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.errors import PerformanceWarning\nfrom pandas.io.formats import format as pandas_format\n\nimport dask\nimport dask.array as da\nimport dask.dataframe as dd\nimport dask.dataframe.groupby\nfrom dask.base import compute_as_if_collection\nfrom dask.blockwise import fuse_roots\nfrom dask.dataframe import _compat, methods\nfrom dask.dataframe._compat import (\n PANDAS_GT_110,\n PANDAS_GT_120,\n PANDAS_GT_140,\n PANDAS_GT_150,\n tm,\n)\nfrom dask.dataframe.core import (\n Scalar,\n _concat,\n _map_freq_to_period_start,\n aca,\n has_parallel_type,\n is_broadcastable,\n repartition_divisions,\n total_mem_usage,\n)\nfrom dask.dataframe.utils import assert_eq, assert_max_deps, make_meta\nfrom dask.datasets import timeseries\nfrom dask.utils import M, is_dataframe_like, is_series_like, put_lines\nfrom dask.utils_test import _check_warning, hlg_layer\n\ndsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}, index=[0, 1, 3]),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}, index=[5, 6, 8]),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}, index=[9, 9, 9]),\n}\nmeta = make_meta(\n {\"a\": \"i8\", \"b\": \"i8\"}, index=pd.Index([], \"i8\"), parent_meta=pd.DataFrame()\n)\nd = dd.DataFrame(dsk, \"x\", meta, [0, 5, 9, 9])\nfull = d.compute()\nCHECK_FREQ = {}\nif dd._compat.PANDAS_GT_110:\n CHECK_FREQ[\"check_freq\"] = False\n\n\ndef test_dataframe_doc():\n doc = d.add.__doc__\n disclaimer = \"Some inconsistencies with the Dask version may exist.\"\n assert disclaimer in doc\n\n\ndef test_dataframe_doc_from_non_pandas():\n class Foo:\n def foo(self):\n \"\"\"This is a new docstring that I just made up\n\n Parameters:\n ----------\n None\n \"\"\"\n\n d._bind_operator_method(\"foo\", Foo.foo, original=Foo)\n try:\n doc = d.foo.__doc__\n disclaimer = \"Some inconsistencies with the Dask version may exist.\"\n assert disclaimer in doc\n assert \"new docstring that I just made up\" in doc\n finally:\n # make sure to clean up this alteration of the dd.DataFrame class\n del dd.DataFrame.foo\n\n\ndef test_Dataframe():\n expected = pd.Series(\n [2, 3, 4, 5, 6, 7, 8, 9, 10], index=[0, 1, 3, 5, 6, 8, 9, 9, 9], name=\"a\"\n )\n\n assert_eq(d[\"a\"] + 1, expected)\n\n tm.assert_index_equal(d.columns, pd.Index([\"a\", \"b\"]))\n\n assert_eq(d[d[\"b\"] > 2], full[full[\"b\"] > 2])\n assert_eq(d[[\"a\", \"b\"]], full[[\"a\", \"b\"]])\n assert_eq(d.a, full.a)\n assert d.b.mean().compute() == full.b.mean()\n assert np.allclose(d.b.var().compute(), full.b.var())\n assert np.allclose(d.b.std().compute(), full.b.std())\n\n assert d.index._name == d.index._name # this is deterministic\n\n assert repr(d)\n\n\ndef test_head_tail():\n assert_eq(d.head(2), full.head(2))\n assert_eq(d.head(3), full.head(3))\n assert_eq(d.head(2), dsk[(\"x\", 0)].head(2))\n assert_eq(d[\"a\"].head(2), full[\"a\"].head(2))\n assert_eq(d[\"a\"].head(3), full[\"a\"].head(3))\n assert_eq(d[\"a\"].head(2), dsk[(\"x\", 0)][\"a\"].head(2))\n assert sorted(d.head(2, compute=False).dask) == sorted(\n d.head(2, compute=False).dask\n )\n assert sorted(d.head(2, compute=False).dask) != sorted(\n d.head(3, compute=False).dask\n )\n\n assert_eq(d.tail(2), full.tail(2))\n assert_eq(d.tail(3), full.tail(3))\n assert_eq(d.tail(2), dsk[(\"x\", 2)].tail(2))\n assert_eq(d[\"a\"].tail(2), full[\"a\"].tail(2))\n assert_eq(d[\"a\"].tail(3), full[\"a\"].tail(3))\n assert_eq(d[\"a\"].tail(2), dsk[(\"x\", 2)][\"a\"].tail(2))\n assert sorted(d.tail(2, compute=False).dask) == sorted(\n d.tail(2, compute=False).dask\n )\n assert sorted(d.tail(2, compute=False).dask) != sorted(\n d.tail(3, compute=False).dask\n )\n\n\ndef test_head_npartitions():\n assert_eq(d.head(5, npartitions=2), full.head(5))\n assert_eq(d.head(5, npartitions=2, compute=False), full.head(5))\n assert_eq(d.head(5, npartitions=-1), full.head(5))\n assert_eq(d.head(7, npartitions=-1), full.head(7))\n assert_eq(d.head(2, npartitions=-1), full.head(2))\n with pytest.raises(ValueError):\n d.head(2, npartitions=5)\n\n\ndef test_head_npartitions_warn():\n match = \"5 elements requested, only 3 elements\"\n with pytest.warns(UserWarning, match=match):\n d.head(5)\n\n match = \"Insufficient elements\"\n with pytest.warns(UserWarning, match=match):\n d.head(100)\n\n with pytest.warns(UserWarning, match=match):\n d.head(7)\n\n with pytest.warns(UserWarning, match=match):\n d.head(7, npartitions=2)\n\n # No warn if all partitions are inspected\n for n in [3, -1]:\n with warnings.catch_warnings(record=True) as record:\n d.head(10, npartitions=n)\n assert not record\n\n # With default args, this means that a 1 partition dataframe won't warn\n d2 = dd.from_pandas(pd.DataFrame({\"x\": [1, 2, 3]}), npartitions=1)\n with warnings.catch_warnings(record=True) as record:\n d2.head()\n assert not record\n\n\ndef test_index_head():\n assert_eq(d.index.head(2), full.index[:2])\n assert_eq(d.index.head(3), full.index[:3])\n\n\ndef test_Series():\n assert isinstance(d.a, dd.Series)\n assert isinstance(d.a + 1, dd.Series)\n assert_eq((d + 1), full + 1)\n\n\ndef test_Index():\n for case in [\n pd.DataFrame(np.random.randn(10, 5), index=list(\"abcdefghij\")),\n pd.DataFrame(\n np.random.randn(10, 5),\n index=pd.date_range(\"2011-01-01\", freq=\"D\", periods=10),\n ),\n ]:\n ddf = dd.from_pandas(case, 3)\n assert_eq(ddf.index, case.index)\n pytest.raises(AttributeError, lambda: ddf.index.index)\n\n\ndef test_axes():\n pdf = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]})\n df = dd.from_pandas(pdf, npartitions=2)\n assert len(df.axes) == len(pdf.axes)\n assert all(assert_eq(d, p) for d, p in zip(df.axes, pdf.axes))\n\n\ndef test_series_axes():\n ps = pd.Series([\"abcde\"])\n ds = dd.from_pandas(ps, npartitions=2)\n assert len(ds.axes) == len(ps.axes)\n assert all(assert_eq(d, p) for d, p in zip(ds.axes, ps.axes))\n\n\ndef test_Scalar():\n val = np.int64(1)\n s = Scalar({(\"a\", 0): val}, \"a\", \"i8\")\n assert hasattr(s, \"dtype\")\n assert \"dtype\" in dir(s)\n assert_eq(s, val)\n assert repr(s) == \"dd.Scalar\"\n\n val = pd.Timestamp(\"2001-01-01\")\n s = Scalar({(\"a\", 0): val}, \"a\", val)\n assert not hasattr(s, \"dtype\")\n assert \"dtype\" not in dir(s)\n assert_eq(s, val)\n assert repr(s) == \"dd.Scalar\"\n\n\ndef test_scalar_raises():\n val = np.int64(1)\n s = Scalar({(\"a\", 0): val}, \"a\", \"i8\")\n msg = \"cannot be converted to a boolean value\"\n with pytest.raises(TypeError, match=msg):\n bool(s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attributes_test_describe_numeric.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_attributes_test_describe_numeric.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 232, "end_line": 431, "span_ids": ["test_attributes", "test_rename_series_method_2", "test_column_names", "test_columns_named_divisions_and_meta", "test_describe_numeric", "test_timezone_freq", "test_index_names", "test_rename_series_method", "test_rename_series", "test_rename_columns"], "tokens": 1953}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_attributes():\n assert \"a\" in dir(d)\n assert \"foo\" not in dir(d)\n pytest.raises(AttributeError, lambda: d.foo)\n\n df = dd.from_pandas(pd.DataFrame({\"a b c\": [1, 2, 3]}), npartitions=2)\n assert \"a b c\" not in dir(df)\n df = dd.from_pandas(pd.DataFrame({\"a\": [1, 2], 5: [1, 2]}), npartitions=2)\n assert \"a\" in dir(df)\n assert 5 not in dir(df)\n\n df = dd.from_pandas(_compat.makeTimeDataFrame(), npartitions=3)\n pytest.raises(AttributeError, lambda: df.foo)\n\n\ndef test_column_names():\n tm.assert_index_equal(d.columns, pd.Index([\"a\", \"b\"]))\n tm.assert_index_equal(d[[\"b\", \"a\"]].columns, pd.Index([\"b\", \"a\"]))\n assert d[\"a\"].name == \"a\"\n assert (d[\"a\"] + 1).name == \"a\"\n assert (d[\"a\"] + d[\"b\"]).name is None\n\n\ndef test_columns_named_divisions_and_meta():\n # https://github.com/dask/dask/issues/7599\n df = pd.DataFrame(\n {\"_meta\": [1, 2, 3, 4], \"divisions\": [\"a\", \"b\", \"c\", \"d\"]},\n index=[0, 1, 3, 5],\n )\n ddf = dd.from_pandas(df, 2)\n\n assert ddf.divisions == (0, 3, 5)\n assert_eq(ddf[\"divisions\"], df.divisions)\n assert all(ddf._meta.columns == [\"_meta\", \"divisions\"])\n assert_eq(ddf[\"_meta\"], df._meta)\n\n\ndef test_index_names():\n assert d.index.name is None\n\n idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name=\"x\")\n df = pd.DataFrame(np.random.randn(10, 5), idx)\n ddf = dd.from_pandas(df, 3)\n assert ddf.index.name == \"x\"\n assert ddf.index.compute().name == \"x\"\n\n\n@pytest.mark.skipif(dd._compat.PANDAS_GT_130, reason=\"Freq no longer included in ts\")\n@pytest.mark.parametrize(\n \"npartitions\",\n [\n 1,\n pytest.param(\n 2,\n marks=pytest.mark.xfail(\n not dd._compat.PANDAS_GT_110, reason=\"Fixed upstream.\"\n ),\n ),\n ],\n)\ndef test_timezone_freq(npartitions):\n s_naive = pd.Series(pd.date_range(\"20130101\", periods=10))\n s_aware = pd.Series(pd.date_range(\"20130101\", periods=10, tz=\"US/Eastern\"))\n pdf = pd.DataFrame({\"tz\": s_aware, \"notz\": s_naive})\n ddf = dd.from_pandas(pdf, npartitions=npartitions)\n\n assert pdf.tz[0].freq == ddf.compute().tz[0].freq == ddf.tz.compute()[0].freq\n\n\ndef test_rename_columns():\n # GH 819\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5, 6, 7], \"b\": [7, 6, 5, 4, 3, 2, 1]})\n ddf = dd.from_pandas(df, 2)\n\n ddf.columns = [\"x\", \"y\"]\n df.columns = [\"x\", \"y\"]\n tm.assert_index_equal(ddf.columns, pd.Index([\"x\", \"y\"]))\n tm.assert_index_equal(ddf._meta.columns, pd.Index([\"x\", \"y\"]))\n assert_eq(ddf, df)\n\n msg = r\"Length mismatch: Expected axis has 2 elements, new values have 4 elements\"\n with pytest.raises(ValueError) as err:\n ddf.columns = [1, 2, 3, 4]\n assert msg in str(err.value)\n\n # Multi-index columns\n df = pd.DataFrame({(\"A\", \"0\"): [1, 2, 2, 3], (\"B\", 1): [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n df.columns = [\"x\", \"y\"]\n ddf.columns = [\"x\", \"y\"]\n tm.assert_index_equal(ddf.columns, pd.Index([\"x\", \"y\"]))\n tm.assert_index_equal(ddf._meta.columns, pd.Index([\"x\", \"y\"]))\n assert_eq(ddf, df)\n\n\ndef test_rename_series():\n # GH 819\n s = pd.Series([1, 2, 3, 4, 5, 6, 7], name=\"x\")\n ds = dd.from_pandas(s, 2)\n\n s.name = \"renamed\"\n ds.name = \"renamed\"\n assert s.name == \"renamed\"\n assert_eq(ds, s)\n\n ind = s.index\n dind = ds.index\n ind.name = \"renamed\"\n dind.name = \"renamed\"\n assert ind.name == \"renamed\"\n assert_eq(dind, ind)\n\n\ndef test_rename_series_method():\n # Series name\n s = pd.Series([1, 2, 3, 4, 5, 6, 7], name=\"x\")\n ds = dd.from_pandas(s, 2)\n\n assert_eq(ds.rename(\"y\"), s.rename(\"y\"))\n assert ds.name == \"x\" # no mutation\n assert_eq(ds.rename(), s.rename())\n\n assert_eq(ds, s)\n\n\ndef test_rename_series_method_2():\n # Series index\n s = pd.Series([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\"], name=\"x\")\n ds = dd.from_pandas(s, 2)\n\n for is_sorted in [True, False]:\n res = ds.rename(lambda x: x**2, sorted_index=is_sorted)\n assert_eq(res, s.rename(lambda x: x**2))\n assert res.known_divisions == is_sorted\n\n res = ds.rename(s, sorted_index=is_sorted)\n assert_eq(res, s.rename(s))\n assert res.known_divisions == is_sorted\n\n with pytest.raises(ValueError):\n ds.rename(lambda x: -x, sorted_index=True)\n assert_eq(ds.rename(lambda x: -x), s.rename(lambda x: -x))\n\n res = ds.rename(ds)\n assert_eq(res, s.rename(s))\n assert not res.known_divisions\n\n ds2 = ds.clear_divisions()\n res = ds2.rename(lambda x: x**2, sorted_index=True)\n assert_eq(res, s.rename(lambda x: x**2))\n assert not res.known_divisions\n\n res = ds.rename(lambda x: x**2, inplace=True, sorted_index=True)\n assert res is ds\n s.rename(lambda x: x**2, inplace=True)\n assert_eq(ds, s)\n\n\n@pytest.mark.parametrize(\n \"method,test_values\", [(\"tdigest\", (6, 10)), (\"dask\", (4, 20))]\n)\ndef test_describe_numeric(method, test_values):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n # prepare test case which approx quantiles will be the same as actuals\n s = pd.Series(list(range(test_values[1])) * test_values[0])\n df = pd.DataFrame(\n {\n \"a\": list(range(test_values[1])) * test_values[0],\n \"b\": list(range(test_values[0])) * test_values[1],\n }\n )\n\n ds = dd.from_pandas(s, test_values[0])\n ddf = dd.from_pandas(df, test_values[0])\n\n test_quantiles = [0.25, 0.75]\n\n assert_eq(df.describe(), ddf.describe(percentiles_method=method))\n assert_eq(s.describe(), ds.describe(percentiles_method=method))\n\n assert_eq(\n df.describe(percentiles=test_quantiles),\n ddf.describe(percentiles=test_quantiles, percentiles_method=method),\n )\n assert_eq(s.describe(), ds.describe(split_every=2, percentiles_method=method))\n assert_eq(df.describe(), ddf.describe(split_every=2, percentiles_method=method))\n\n # remove string columns\n df = pd.DataFrame(\n {\n \"a\": list(range(test_values[1])) * test_values[0],\n \"b\": list(range(test_values[0])) * test_values[1],\n \"c\": list(\"abcdef\"[: test_values[0]]) * test_values[1],\n }\n )\n ddf = dd.from_pandas(df, test_values[0])\n assert_eq(df.describe(), ddf.describe(percentiles_method=method))\n assert_eq(df.describe(), ddf.describe(split_every=2, percentiles_method=method))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_test_describe_for_possibly_unsorted_q.for_q_in_None_0_25_0_.for_f_convert_in_list_t.assert_eq_r_75_75_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_describe_test_describe_for_possibly_unsorted_q.for_q_in_None_0_25_0_.for_f_convert_in_list_t.assert_eq_r_75_75_0_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 435, "end_line": 631, "span_ids": ["test_describe_empty_tdigest", "test_describe", "test_describe_for_possibly_unsorted_q", "test_describe_without_datetime_is_numeric", "test_describe_empty"], "tokens": 1805}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"include,exclude,percentiles,subset\",\n [\n (None, None, None, [\"c\", \"d\"]), # numeric\n (None, None, None, [\"c\", \"d\", \"f\"]), # numeric + timedelta\n (None, None, None, [\"c\", \"d\", \"g\"]), # numeric + bool\n (None, None, None, [\"c\", \"d\", \"f\", \"g\"]), # numeric + bool + timedelta\n (None, None, None, [\"f\", \"g\"]), # bool + timedelta\n (\"all\", None, None, None),\n ([\"number\"], None, [0.25, 0.5], None),\n ([np.timedelta64], None, None, None),\n ([\"number\", \"object\"], None, [0.25, 0.75], None),\n (None, [\"number\", \"object\"], None, None),\n ([\"object\", \"datetime\", \"bool\"], None, None, None),\n ],\n)\ndef test_describe(include, exclude, percentiles, subset):\n data = {\n \"a\": [\"aaa\", \"bbb\", \"bbb\", None, None, \"zzz\"] * 2,\n \"c\": [None, 0, 1, 2, 3, 4] * 2,\n \"d\": [None, 0, 1] * 4,\n \"e\": [\n pd.Timestamp(\"2017-05-09 00:00:00.006000\"),\n pd.Timestamp(\"2017-05-09 00:00:00.006000\"),\n pd.Timestamp(\"2017-05-09 07:56:23.858694\"),\n pd.Timestamp(\"2017-05-09 05:59:58.938999\"),\n None,\n None,\n ]\n * 2,\n \"f\": [\n np.timedelta64(3, \"D\"),\n np.timedelta64(1, \"D\"),\n None,\n None,\n np.timedelta64(3, \"D\"),\n np.timedelta64(1, \"D\"),\n ]\n * 2,\n \"g\": [True, False, True] * 4,\n }\n\n # Arrange\n df = pd.DataFrame(data)\n\n if subset is not None:\n df = df.loc[:, subset]\n\n ddf = dd.from_pandas(df, 2)\n\n if PANDAS_GT_110:\n datetime_is_numeric_kwarg = {\"datetime_is_numeric\": True}\n else:\n datetime_is_numeric_kwarg = {}\n\n # Act\n actual = ddf.describe(\n include=include,\n exclude=exclude,\n percentiles=percentiles,\n **datetime_is_numeric_kwarg,\n )\n expected = df.describe(\n include=include,\n exclude=exclude,\n percentiles=percentiles,\n **datetime_is_numeric_kwarg,\n )\n\n if \"e\" in expected and datetime_is_numeric_kwarg:\n expected.at[\"mean\", \"e\"] = np.nan\n expected.dropna(how=\"all\", inplace=True)\n\n assert_eq(actual, expected)\n\n # Check series\n if subset is None:\n for col in [\"a\", \"c\", \"e\", \"g\"]:\n expected = df[col].describe(\n include=include, exclude=exclude, **datetime_is_numeric_kwarg\n )\n if col == \"e\" and datetime_is_numeric_kwarg:\n expected.drop(\"mean\", inplace=True)\n actual = ddf[col].describe(\n include=include, exclude=exclude, **datetime_is_numeric_kwarg\n )\n assert_eq(expected, actual)\n\n\ndef test_describe_without_datetime_is_numeric():\n data = {\n \"a\": [\"aaa\", \"bbb\", \"bbb\", None, None, \"zzz\"] * 2,\n \"c\": [None, 0, 1, 2, 3, 4] * 2,\n \"d\": [None, 0, 1] * 4,\n \"e\": [\n pd.Timestamp(\"2017-05-09 00:00:00.006000\"),\n pd.Timestamp(\"2017-05-09 00:00:00.006000\"),\n pd.Timestamp(\"2017-05-09 07:56:23.858694\"),\n pd.Timestamp(\"2017-05-09 05:59:58.938999\"),\n None,\n None,\n ]\n * 2,\n }\n # Arrange\n df = pd.DataFrame(data)\n ddf = dd.from_pandas(df, 2)\n\n # Assert\n assert_eq(ddf.describe(), df.describe())\n\n # Check series\n for col in [\"a\", \"c\"]:\n assert_eq(df[col].describe(), ddf[col].describe())\n\n if PANDAS_GT_110:\n with pytest.warns(\n FutureWarning,\n match=(\n \"Treating datetime data as categorical rather than numeric in `.describe` is deprecated\"\n ),\n ):\n ddf.e.describe()\n else:\n assert_eq(df.e.describe(), ddf.e.describe())\n with pytest.raises(\n NotImplementedError,\n match=\"datetime_is_numeric=True is only supported for pandas >= 1.1.0\",\n ):\n ddf.e.describe(datetime_is_numeric=True)\n\n\ndef test_describe_empty():\n df_none = pd.DataFrame({\"A\": [None, None]})\n ddf_none = dd.from_pandas(df_none, 2)\n df_len0 = pd.DataFrame({\"A\": [], \"B\": []})\n ddf_len0 = dd.from_pandas(df_len0, 2)\n ddf_nocols = dd.from_pandas(pd.DataFrame({}), 2)\n\n # Pandas have different dtypes for resulting describe dataframe if there are only\n # None-values, pre-compute dask df to bypass _meta check\n assert_eq(\n df_none.describe(), ddf_none.describe(percentiles_method=\"dask\").compute()\n )\n\n with pytest.warns(RuntimeWarning):\n ddf_len0.describe(percentiles_method=\"dask\").compute()\n\n with pytest.raises(ValueError):\n ddf_nocols.describe(percentiles_method=\"dask\").compute()\n\n\ndef test_describe_empty_tdigest():\n pytest.importorskip(\"crick\")\n\n df_none = pd.DataFrame({\"A\": [None, None]})\n ddf_none = dd.from_pandas(df_none, 2)\n df_len0 = pd.DataFrame({\"A\": []})\n ddf_len0 = dd.from_pandas(df_len0, 2)\n ddf_nocols = dd.from_pandas(pd.DataFrame({}), 2)\n\n # Pandas have different dtypes for resulting describe dataframe if there are only\n # None-values, pre-compute dask df to bypass _meta check\n assert_eq(\n df_none.describe(), ddf_none.describe(percentiles_method=\"tdigest\").compute()\n )\n with warnings.catch_warnings():\n # dask.dataframe should probably filter this, to match pandas, but\n # it seems quite difficult.\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n assert_eq(df_len0.describe(), ddf_len0.describe(percentiles_method=\"tdigest\"))\n assert_eq(df_len0.describe(), ddf_len0.describe(percentiles_method=\"tdigest\"))\n\n with pytest.raises(ValueError):\n ddf_nocols.describe(percentiles_method=\"tdigest\").compute()\n\n\ndef test_describe_for_possibly_unsorted_q():\n \"\"\"make sure describe is sorting percentiles parameter, q, properly and can\n handle lists, tuples and ndarrays.\n\n See https://github.com/dask/dask/issues/4642.\n \"\"\"\n # prepare test case where quantiles should equal values\n A = da.arange(0, 101)\n ds = dd.from_dask_array(A)\n\n for q in [None, [0.25, 0.50, 0.75], [0.25, 0.50, 0.75, 0.99], [0.75, 0.5, 0.25]]:\n for f_convert in [list, tuple, np.array]:\n if q is None:\n r = ds.describe(percentiles=q).compute()\n else:\n r = ds.describe(percentiles=f_convert(q)).compute()\n\n assert_eq(r[\"25%\"], 25.0)\n assert_eq(r[\"50%\"], 50.0)\n assert_eq(r[\"75%\"], 75.0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_test_clip.assert_eq_ds_clip_upper_u": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_cumulative_test_clip.assert_eq_ds_clip_upper_u", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 627, "end_line": 795, "span_ids": ["test_cumulative_empty_partitions", "test_clip", "test_dropna", "test_cumulative"], "tokens": 1971}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cumulative():\n index = [f\"row{i:03d}\" for i in range(100)]\n df = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"), index=index)\n df_out = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"), index=index)\n\n ddf = dd.from_pandas(df, 5)\n ddf_out = dd.from_pandas(df_out, 5)\n\n assert_eq(ddf.cumsum(), df.cumsum())\n assert_eq(ddf.cumprod(), df.cumprod())\n assert_eq(ddf.cummin(), df.cummin())\n assert_eq(ddf.cummax(), df.cummax())\n\n assert_eq(ddf.cumsum(axis=1), df.cumsum(axis=1))\n assert_eq(ddf.cumprod(axis=1), df.cumprod(axis=1))\n assert_eq(ddf.cummin(axis=1), df.cummin(axis=1))\n assert_eq(ddf.cummax(axis=1), df.cummax(axis=1))\n\n np.cumsum(ddf, out=ddf_out)\n assert_eq(ddf_out, df.cumsum())\n np.cumprod(ddf, out=ddf_out)\n assert_eq(ddf_out, df.cumprod())\n ddf.cummin(out=ddf_out)\n assert_eq(ddf_out, df.cummin())\n ddf.cummax(out=ddf_out)\n assert_eq(ddf_out, df.cummax())\n\n np.cumsum(ddf, out=ddf_out, axis=1)\n assert_eq(ddf_out, df.cumsum(axis=1))\n np.cumprod(ddf, out=ddf_out, axis=1)\n assert_eq(ddf_out, df.cumprod(axis=1))\n ddf.cummin(out=ddf_out, axis=1)\n assert_eq(ddf_out, df.cummin(axis=1))\n ddf.cummax(out=ddf_out, axis=1)\n assert_eq(ddf_out, df.cummax(axis=1))\n\n assert_eq(ddf.a.cumsum(), df.a.cumsum())\n assert_eq(ddf.a.cumprod(), df.a.cumprod())\n assert_eq(ddf.a.cummin(), df.a.cummin())\n assert_eq(ddf.a.cummax(), df.a.cummax())\n\n # With NaNs\n df = pd.DataFrame(\n {\n \"a\": [1, 2, np.nan, 4, 5, 6, 7, 8],\n \"b\": [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],\n \"c\": [np.nan] * 8,\n }\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(df.cumsum(), ddf.cumsum())\n assert_eq(df.cummin(), ddf.cummin())\n assert_eq(df.cummax(), ddf.cummax())\n assert_eq(df.cumprod(), ddf.cumprod())\n\n assert_eq(df.cumsum(skipna=False), ddf.cumsum(skipna=False))\n assert_eq(df.cummin(skipna=False), ddf.cummin(skipna=False))\n assert_eq(df.cummax(skipna=False), ddf.cummax(skipna=False))\n assert_eq(df.cumprod(skipna=False), ddf.cumprod(skipna=False))\n\n assert_eq(df.cumsum(axis=1), ddf.cumsum(axis=1))\n assert_eq(df.cummin(axis=1), ddf.cummin(axis=1))\n assert_eq(df.cummax(axis=1), ddf.cummax(axis=1))\n assert_eq(df.cumprod(axis=1), ddf.cumprod(axis=1))\n\n assert_eq(df.cumsum(axis=1, skipna=False), ddf.cumsum(axis=1, skipna=False))\n assert_eq(df.cummin(axis=1, skipna=False), ddf.cummin(axis=1, skipna=False))\n assert_eq(df.cummax(axis=1, skipna=False), ddf.cummax(axis=1, skipna=False))\n assert_eq(df.cumprod(axis=1, skipna=False), ddf.cumprod(axis=1, skipna=False))\n\n\n@pytest.mark.parametrize(\n \"func\",\n [\n M.cumsum,\n M.cumprod,\n pytest.param(\n M.cummin,\n marks=[\n pytest.mark.xfail(\n reason=\"ValueError: Can only compare identically-labeled Series objects\"\n )\n ],\n ),\n pytest.param(\n M.cummax,\n marks=[\n pytest.mark.xfail(\n reason=\"ValueError: Can only compare identically-labeled Series objects\"\n )\n ],\n ),\n ],\n)\ndef test_cumulative_empty_partitions(func):\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=4)\n assert_eq(func(df[df.x < 5]), func(ddf[ddf.x < 5]))\n\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, None, 5, 6, None, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=5)\n assert_eq(func(df[df.x < 5]), func(ddf[ddf.x < 5]))\n\n\ndef test_dropna():\n df = pd.DataFrame(\n {\n \"x\": [np.nan, 2, 3, 4, np.nan, 6],\n \"y\": [1, 2, np.nan, 4, np.nan, np.nan],\n \"z\": [1, 2, 3, 4, np.nan, 6],\n },\n index=[10, 20, 30, 40, 50, 60],\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.x.dropna(), df.x.dropna())\n assert_eq(ddf.y.dropna(), df.y.dropna())\n assert_eq(ddf.z.dropna(), df.z.dropna())\n\n assert_eq(ddf.dropna(), df.dropna())\n assert_eq(ddf.dropna(how=\"all\"), df.dropna(how=\"all\"))\n assert_eq(ddf.dropna(subset=[\"x\"]), df.dropna(subset=[\"x\"]))\n assert_eq(ddf.dropna(subset=[\"y\", \"z\"]), df.dropna(subset=[\"y\", \"z\"]))\n assert_eq(\n ddf.dropna(subset=[\"y\", \"z\"], how=\"all\"),\n df.dropna(subset=[\"y\", \"z\"], how=\"all\"),\n )\n\n # threshold\n assert_eq(df.dropna(thresh=None), df.loc[[20, 40]])\n assert_eq(ddf.dropna(thresh=None), df.dropna(thresh=None))\n\n assert_eq(df.dropna(thresh=0), df.loc[:])\n assert_eq(ddf.dropna(thresh=0), df.dropna(thresh=0))\n\n assert_eq(df.dropna(thresh=1), df.loc[[10, 20, 30, 40, 60]])\n assert_eq(ddf.dropna(thresh=1), df.dropna(thresh=1))\n\n assert_eq(df.dropna(thresh=2), df.loc[[10, 20, 30, 40, 60]])\n assert_eq(ddf.dropna(thresh=2), df.dropna(thresh=2))\n\n assert_eq(df.dropna(thresh=3), df.loc[[20, 40]])\n assert_eq(ddf.dropna(thresh=3), df.dropna(thresh=3))\n\n # Regression test for https://github.com/dask/dask/issues/6540\n df = pd.DataFrame({\"_0\": [0, 0, np.nan], \"_1\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.dropna(subset=[\"_0\"]), df.dropna(subset=[\"_0\"]))\n\n\n@pytest.mark.parametrize(\"lower, upper\", [(2, 5), (2.5, 3.5)])\ndef test_clip(lower, upper):\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [3, 5, 2, 5, 7, 2, 4, 2, 4]}\n )\n ddf = dd.from_pandas(df, 3)\n\n s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])\n ds = dd.from_pandas(s, 3)\n\n assert_eq(ddf.clip(lower=lower, upper=upper), df.clip(lower=lower, upper=upper))\n assert_eq(ddf.clip(lower=lower), df.clip(lower=lower))\n assert_eq(ddf.clip(upper=upper), df.clip(upper=upper))\n\n assert_eq(ds.clip(lower=lower, upper=upper), s.clip(lower=lower, upper=upper))\n assert_eq(ds.clip(lower=lower), s.clip(lower=lower))\n assert_eq(ds.clip(upper=upper), s.clip(upper=upper))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_squeeze_test_where_mask.ddf6.dd_from_pandas_pdf6_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_squeeze_test_where_mask.ddf6.dd_from_pandas_pdf6_2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 798, "end_line": 866, "span_ids": ["test_squeeze", "test_where_mask"], "tokens": 864}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_squeeze():\n df = pd.DataFrame({\"x\": [1, 3, 6]})\n df2 = pd.DataFrame({\"x\": [0]})\n s = pd.Series({\"test\": 0, \"b\": 100})\n\n ddf = dd.from_pandas(df, 3)\n ddf2 = dd.from_pandas(df2, 3)\n ds = dd.from_pandas(s, 2)\n\n assert_eq(df.squeeze(), ddf.squeeze())\n assert_eq(pd.Series([0], name=\"x\"), ddf2.squeeze())\n assert_eq(ds.squeeze(), s.squeeze())\n\n with pytest.raises(NotImplementedError) as info:\n ddf.squeeze(axis=0)\n msg = f\"{type(ddf)} does not support squeeze along axis 0\"\n assert msg in str(info.value)\n\n with pytest.raises(ValueError) as info:\n ddf.squeeze(axis=2)\n msg = f\"No axis {2} for object type {type(ddf)}\"\n assert msg in str(info.value)\n\n with pytest.raises(ValueError) as info:\n ddf.squeeze(axis=\"test\")\n msg = f\"No axis test for object type {type(ddf)}\"\n assert msg in str(info.value)\n\n\ndef test_where_mask():\n pdf1 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [3, 5, 2, 5, 7, 2, 4, 2, 4]}\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n pdf2 = pd.DataFrame({\"a\": [True, False, True] * 3, \"b\": [False, False, True] * 3})\n ddf2 = dd.from_pandas(pdf2, 2)\n\n # different index\n pdf3 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [3, 5, 2, 5, 7, 2, 4, 2, 4]},\n index=[0, 1, 2, 3, 4, 5, 6, 7, 8],\n )\n ddf3 = dd.from_pandas(pdf3, 2)\n pdf4 = pd.DataFrame(\n {\"a\": [True, False, True] * 3, \"b\": [False, False, True] * 3},\n index=[5, 6, 7, 8, 9, 10, 11, 12, 13],\n )\n ddf4 = dd.from_pandas(pdf4, 2)\n\n # different columns\n pdf5 = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"b\": [9, 4, 2, 6, 2, 3, 1, 6, 2],\n \"c\": [5, 6, 7, 8, 9, 10, 11, 12, 13],\n },\n index=[0, 1, 2, 3, 4, 5, 6, 7, 8],\n )\n ddf5 = dd.from_pandas(pdf5, 2)\n pdf6 = pd.DataFrame(\n {\n \"a\": [True, False, True] * 3,\n \"b\": [False, False, True] * 3,\n \"c\": [False] * 9,\n \"d\": [True] * 9,\n },\n index=[5, 6, 7, 8, 9, 10, 11, 12, 13],\n )\n ddf6 = dd.from_pandas(pdf6, 2)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_where_mask.cases_test_metadata_inference_single_partition_aligned_args.assert_eq_res_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_where_mask.cases_test_metadata_inference_single_partition_aligned_args.assert_eq_res_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 868, "end_line": 1067, "span_ids": ["test_map_partitions_column_info", "test_map_partitions_method_names", "test_map_partitions_with_delayed_collection", "test_map_partitions_names", "test_map_partitions_multi_argument", "test_map_partitions", "test_map_partitions_partition_info", "test_map_partitions_keeps_kwargs_readable", "test_metadata_inference_single_partition_aligned_args", "test_map_partitions_propagates_index_metadata", "test_map_partitions_type", "test_where_mask"], "tokens": 2069}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_where_mask():\n # ... other code\n\n cases = [\n (ddf1, ddf2, pdf1, pdf2),\n (ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),\n (ddf1, ddf4, pdf3, pdf4),\n (ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]), pdf3, pdf4),\n (ddf5, ddf6, pdf5, pdf6),\n (ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),\n # use pd.DataFrame as cond\n (ddf1, pdf2, pdf1, pdf2),\n (ddf1, pdf4, pdf3, pdf4),\n (ddf5, pdf6, pdf5, pdf6),\n ]\n\n for ddf, ddcond, pdf, pdcond in cases:\n assert isinstance(ddf, dd.DataFrame)\n assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))\n assert isinstance(pdf, pd.DataFrame)\n assert isinstance(pdcond, pd.DataFrame)\n\n assert_eq(ddf.where(ddcond), pdf.where(pdcond))\n assert_eq(ddf.mask(ddcond), pdf.mask(pdcond))\n assert_eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))\n assert_eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))\n\n assert_eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))\n assert_eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))\n assert_eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))\n assert_eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))\n assert_eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))\n assert_eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))\n\n\ndef test_map_partitions_multi_argument():\n assert_eq(dd.map_partitions(lambda a, b: a + b, d.a, d.b), full.a + full.b)\n assert_eq(\n dd.map_partitions(lambda a, b, c: a + b + c, d.a, d.b, 1), full.a + full.b + 1\n )\n\n\ndef test_map_partitions():\n assert_eq(d.map_partitions(lambda df: df, meta=d), full)\n assert_eq(d.map_partitions(lambda df: df), full)\n result = d.map_partitions(lambda df: df.sum(axis=1))\n layer = hlg_layer(result.dask, \"lambda-\")\n assert not layer.is_materialized(), layer\n assert_eq(result, full.sum(axis=1))\n\n assert_eq(\n d.map_partitions(lambda df: 1),\n pd.Series([1, 1, 1], dtype=np.int64),\n check_divisions=False,\n )\n x = Scalar({(\"x\", 0): 1}, \"x\", int)\n result = dd.map_partitions(lambda x: 2, x)\n assert result.dtype in (np.int32, np.int64) and result.compute() == 2\n result = dd.map_partitions(lambda x: 4.0, x)\n assert result.dtype == np.float64 and result.compute() == 4.0\n\n\ndef test_map_partitions_type():\n result = d.map_partitions(type).compute(scheduler=\"single-threaded\")\n assert isinstance(result, pd.Series)\n assert all(x == pd.DataFrame for x in result)\n\n\ndef test_map_partitions_partition_info():\n def f(df, partition_info=None):\n assert partition_info is not None\n assert \"number\" in partition_info\n assert \"division\" in partition_info\n assert dsk[(\"x\", partition_info[\"number\"])].equals(df)\n assert dsk[(\"x\", d.divisions.index(partition_info[\"division\"]))].equals(df)\n return df\n\n df = d.map_partitions(f, meta=d)\n layer = hlg_layer(df.dask, \"f-\")\n assert not layer.is_materialized()\n df.dask.validate()\n result = df.compute(scheduler=\"single-threaded\")\n assert type(result) == pd.DataFrame\n\n\ndef test_map_partitions_names():\n func = lambda x: x\n assert sorted(dd.map_partitions(func, d, meta=d).dask) == sorted(\n dd.map_partitions(func, d, meta=d).dask\n )\n assert sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask) == sorted(\n dd.map_partitions(lambda x: x, d, meta=d, token=1).dask\n )\n\n func = lambda x, y: x\n assert sorted(dd.map_partitions(func, d, d, meta=d).dask) == sorted(\n dd.map_partitions(func, d, d, meta=d).dask\n )\n\n\ndef test_map_partitions_column_info():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n b = dd.map_partitions(lambda x: x, a, meta=a)\n tm.assert_index_equal(b.columns, a.columns)\n assert_eq(df, b)\n\n b = dd.map_partitions(lambda x: x, a.x, meta=a.x)\n assert b.name == a.x.name\n assert_eq(df.x, b)\n\n b = dd.map_partitions(lambda x: x, a.x, meta=a.x)\n assert b.name == a.x.name\n assert_eq(df.x, b)\n\n b = dd.map_partitions(lambda df: df.x + df.y, a)\n assert isinstance(b, dd.Series)\n assert b.dtype == \"i8\"\n\n b = dd.map_partitions(lambda df: df.x + 1, a, meta=(\"x\", \"i8\"))\n assert isinstance(b, dd.Series)\n assert b.name == \"x\"\n assert b.dtype == \"i8\"\n\n\ndef test_map_partitions_method_names():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n b = a.map_partitions(lambda x: x)\n assert isinstance(b, dd.DataFrame)\n tm.assert_index_equal(b.columns, a.columns)\n\n b = a.map_partitions(lambda df: df.x + 1)\n assert isinstance(b, dd.Series)\n assert b.dtype == \"i8\"\n\n b = a.map_partitions(lambda df: df.x + 1, meta=(\"x\", \"i8\"))\n assert isinstance(b, dd.Series)\n assert b.name == \"x\"\n assert b.dtype == \"i8\"\n\n\ndef test_map_partitions_propagates_index_metadata():\n index = pd.Series(list(\"abcde\"), name=\"myindex\")\n df = pd.DataFrame(\n {\"A\": np.arange(5, dtype=np.int32), \"B\": np.arange(10, 15, dtype=np.int32)},\n index=index,\n )\n ddf = dd.from_pandas(df, npartitions=2)\n res = ddf.map_partitions(\n lambda df: df.assign(C=df.A + df.B),\n meta=[(\"A\", \"i4\"), (\"B\", \"i4\"), (\"C\", \"i4\")],\n )\n sol = df.assign(C=df.A + df.B)\n assert_eq(res, sol)\n\n res = ddf.map_partitions(lambda df: df.rename_axis(\"newindex\"))\n sol = df.rename_axis(\"newindex\")\n assert_eq(res, sol)\n\n\n@pytest.mark.xfail(reason=\"now we use SubgraphCallables\")\ndef test_map_partitions_keeps_kwargs_readable():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n def f(s, x=1):\n return s + x\n\n b = a.x.map_partitions(f, x=5)\n\n # NOTE: we'd like to ensure that we keep the keyword arguments readable\n # in the dask graph\n assert \"['x', 5]\" in str(dict(b.dask)) or \"{'x': 5}\" in str(dict(b.dask))\n assert_eq(df.x + 5, b)\n\n assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name\n\n\ndef test_map_partitions_with_delayed_collection():\n # https://github.com/dask/dask/issues/5854\n df = pd.DataFrame(columns=list(\"abcdefghijk\"))\n ddf = dd.from_pandas(df, 2)\n ddf.dropna(subset=list(\"abcdefghijk\")).compute()\n # no error!\n\n\ndef test_metadata_inference_single_partition_aligned_args():\n # https://github.com/dask/dask/issues/3034\n # Previously broadcastable series functionality broke this\n\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n def check(df, df_x):\n assert len(df) == len(df_x)\n assert len(df) > 0\n return df\n\n res = dd.map_partitions(check, ddf, ddf.x)\n assert_eq(res, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_dataframes_test_drop_duplicates.with_pytest_raises_NotImp.d_drop_duplicates_keep_Fa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_align_dataframes_test_drop_duplicates.with_pytest_raises_NotImp.d_drop_duplicates_keep_Fa", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1070, "end_line": 1109, "span_ids": ["test_drop_duplicates", "test_align_dataframes"], "tokens": 366}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_align_dataframes():\n df1 = pd.DataFrame({\"A\": [1, 2, 3, 3, 2, 3], \"B\": [1, 2, 3, 4, 5, 6]})\n df2 = pd.DataFrame({\"A\": [3, 1, 2], \"C\": [1, 2, 3]})\n\n def merge(a, b):\n res = pd.merge(a, b, left_on=\"A\", right_on=\"A\", how=\"left\")\n return res\n\n expected = merge(df1, df2)\n\n ddf1 = dd.from_pandas(df1, npartitions=2)\n actual = ddf1.map_partitions(merge, df2, align_dataframes=False)\n\n assert_eq(actual, expected, check_index=False, check_divisions=False)\n\n\ndef test_drop_duplicates():\n res = d.drop_duplicates()\n res2 = d.drop_duplicates(split_every=2)\n sol = full.drop_duplicates()\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n res = d.a.drop_duplicates()\n res2 = d.a.drop_duplicates(split_every=2)\n sol = full.a.drop_duplicates()\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n res = d.index.drop_duplicates()\n res2 = d.index.drop_duplicates(split_every=2)\n sol = full.index.drop_duplicates()\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n with pytest.raises(NotImplementedError):\n d.drop_duplicates(keep=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_duplicates_subset_test_nbytes.assert_eq_d_index_nbytes_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_drop_duplicates_subset_test_nbytes.assert_eq_d_index_nbytes_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1112, "end_line": 1327, "span_ids": ["test_value_counts", "test_size", "test_shape", "test_value_counts_with_dropna", "test_dtype", "test_value_counts_with_normalize", "test_value_counts_with_normalize_and_dropna", "test_unique", "test_contains_frame", "test_nbytes", "test_len", "test_value_counts_not_sorted", "test_isin", "test_ndim", "test_get_partition", "test_drop_duplicates_subset"], "tokens": 2174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_drop_duplicates_subset():\n df = pd.DataFrame({\"x\": [1, 2, 3, 1, 2, 3], \"y\": [\"a\", \"a\", \"b\", \"b\", \"c\", \"c\"]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for kwarg in [{\"keep\": \"first\"}, {\"keep\": \"last\"}]:\n assert_eq(df.x.drop_duplicates(**kwarg), ddf.x.drop_duplicates(**kwarg))\n for ss in [[\"x\"], \"y\", [\"x\", \"y\"]]:\n assert_eq(\n df.drop_duplicates(subset=ss, **kwarg),\n ddf.drop_duplicates(subset=ss, **kwarg),\n )\n assert_eq(df.drop_duplicates(ss, **kwarg), ddf.drop_duplicates(ss, **kwarg))\n\n\ndef test_get_partition():\n pdf = pd.DataFrame(np.random.randn(10, 5), columns=list(\"abcde\"))\n ddf = dd.from_pandas(pdf, 3)\n assert ddf.divisions == (0, 4, 8, 9)\n\n # DataFrame\n div1 = ddf.get_partition(0)\n assert isinstance(div1, dd.DataFrame)\n assert_eq(div1, pdf.loc[0:3])\n div2 = ddf.get_partition(1)\n assert_eq(div2, pdf.loc[4:7])\n div3 = ddf.get_partition(2)\n assert_eq(div3, pdf.loc[8:9])\n assert len(div1) + len(div2) + len(div3) == len(pdf)\n\n # Series\n div1 = ddf.a.get_partition(0)\n assert isinstance(div1, dd.Series)\n assert_eq(div1, pdf.a.loc[0:3])\n div2 = ddf.a.get_partition(1)\n assert_eq(div2, pdf.a.loc[4:7])\n div3 = ddf.a.get_partition(2)\n assert_eq(div3, pdf.a.loc[8:9])\n assert len(div1) + len(div2) + len(div3) == len(pdf.a)\n\n with pytest.raises(ValueError):\n ddf.get_partition(-1)\n\n with pytest.raises(ValueError):\n ddf.get_partition(3)\n\n\ndef test_ndim():\n assert d.ndim == 2\n assert d.a.ndim == 1\n assert d.index.ndim == 1\n\n\ndef test_dtype():\n assert (d.dtypes == full.dtypes).all()\n\n\ndef test_value_counts():\n df = pd.DataFrame({\"x\": [1, 2, 1, 3, 3, 1, 4]})\n ddf = dd.from_pandas(df, npartitions=3)\n result = ddf.x.value_counts()\n expected = df.x.value_counts()\n assert_eq(result, expected)\n result2 = ddf.x.value_counts(split_every=2)\n assert_eq(result2, expected)\n assert result._name != result2._name\n\n\ndef test_value_counts_not_sorted():\n df = pd.DataFrame({\"x\": [1, 2, 1, 3, 3, 1, 4]})\n ddf = dd.from_pandas(df, npartitions=3)\n result = ddf.x.value_counts(sort=False)\n expected = df.x.value_counts(sort=False)\n assert_eq(result, expected)\n result2 = ddf.x.value_counts(split_every=2)\n assert_eq(result2, expected)\n assert result._name != result2._name\n\n\ndef test_value_counts_with_dropna():\n df = pd.DataFrame({\"x\": [1, 2, 1, 3, np.nan, 1, 4]})\n ddf = dd.from_pandas(df, npartitions=3)\n if not PANDAS_GT_110:\n with pytest.raises(NotImplementedError, match=\"dropna is not a valid argument\"):\n ddf.x.value_counts(dropna=False)\n return\n\n result = ddf.x.value_counts(dropna=False)\n expected = df.x.value_counts(dropna=False)\n assert_eq(result, expected)\n result2 = ddf.x.value_counts(split_every=2, dropna=False)\n assert_eq(result2, expected)\n assert result._name != result2._name\n\n\ndef test_value_counts_with_normalize():\n df = pd.DataFrame({\"x\": [1, 2, 1, 3, 3, 1, 4]})\n ddf = dd.from_pandas(df, npartitions=3)\n result = ddf.x.value_counts(normalize=True)\n expected = df.x.value_counts(normalize=True)\n assert_eq(result, expected)\n\n result2 = ddf.x.value_counts(split_every=2, normalize=True)\n assert_eq(result2, expected)\n assert result._name != result2._name\n\n result3 = ddf.x.value_counts(split_out=2, normalize=True)\n assert_eq(result3, expected)\n assert result._name != result3._name\n\n\n@pytest.mark.skipif(not PANDAS_GT_110, reason=\"dropna implemented in pandas 1.1.0\")\ndef test_value_counts_with_normalize_and_dropna():\n df = pd.DataFrame({\"x\": [1, 2, 1, 3, np.nan, 1, 4]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n result = ddf.x.value_counts(dropna=False, normalize=True)\n expected = df.x.value_counts(dropna=False, normalize=True)\n assert_eq(result, expected)\n\n result2 = ddf.x.value_counts(split_every=2, dropna=False, normalize=True)\n assert_eq(result2, expected)\n assert result._name != result2._name\n\n result3 = ddf.x.value_counts(split_out=2, dropna=False, normalize=True)\n assert_eq(result3, expected)\n assert result._name != result3._name\n\n result4 = ddf.x.value_counts(dropna=True, normalize=True, split_out=2)\n expected4 = df.x.value_counts(dropna=True, normalize=True)\n assert_eq(result4, expected4)\n\n\ndef test_unique():\n pdf = pd.DataFrame(\n {\n \"x\": [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],\n \"y\": [\"a\", \"c\", \"b\", np.nan, \"c\", \"b\", \"a\", \"d\", np.nan, \"a\"],\n }\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n assert_eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name=\"x\"))\n assert_eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name=\"y\"))\n\n assert_eq(ddf.x.unique(split_every=2), pd.Series(pdf.x.unique(), name=\"x\"))\n assert_eq(ddf.y.unique(split_every=2), pd.Series(pdf.y.unique(), name=\"y\"))\n assert_eq(ddf.index.unique(), pdf.index.unique())\n\n assert ddf.x.unique(split_every=2)._name != ddf.x.unique()._name\n\n\ndef test_isin():\n f_list = [1, 2, 3]\n f_series = pd.Series(f_list)\n f_dict = {\"a\": [0, 3], \"b\": [1, 2]}\n\n # Series\n assert_eq(d.a.isin(f_list), full.a.isin(f_list))\n assert_eq(d.a.isin(f_series), full.a.isin(f_series))\n with pytest.raises(NotImplementedError):\n d.a.isin(d.a)\n\n # Index\n da.utils.assert_eq(d.index.isin(f_list), full.index.isin(f_list))\n da.utils.assert_eq(d.index.isin(f_series), full.index.isin(f_series))\n with pytest.raises(NotImplementedError):\n d.a.isin(d.a)\n\n # DataFrame test\n assert_eq(d.isin(f_list), full.isin(f_list))\n assert_eq(d.isin(f_dict), full.isin(f_dict))\n for obj in [d, f_series, full]:\n with pytest.raises(NotImplementedError):\n d.isin(obj)\n\n\ndef test_contains_frame():\n df = dd.from_pandas(pd.DataFrame({\"A\": [1, 2], 0: [3, 4]}), 1)\n assert \"A\" in df\n assert 0 in df\n assert \"B\" not in df\n assert 1 not in df\n\n\ndef test_len():\n assert len(d) == len(full)\n assert len(d.a) == len(full.a)\n assert len(dd.from_pandas(pd.DataFrame(), npartitions=1)) == 0\n assert len(dd.from_pandas(pd.DataFrame(columns=[1, 2]), npartitions=1)) == 0\n # Regression test for https://github.com/dask/dask/issues/6110\n assert len(dd.from_pandas(pd.DataFrame(columns=[\"foo\", \"foo\"]), npartitions=1)) == 0\n\n\ndef test_size():\n assert_eq(d.size, full.size)\n assert_eq(d.a.size, full.a.size)\n assert_eq(d.index.size, full.index.size)\n\n\ndef test_shape():\n result = d.shape\n assert_eq((result[0].compute(), result[1]), (len(full), len(full.columns)))\n assert_eq(dd.compute(result)[0], (len(full), len(full.columns)))\n\n result = d.a.shape\n assert_eq(result[0].compute(), len(full.a))\n assert_eq(dd.compute(result)[0], (len(full.a),))\n\n sh = dd.from_pandas(pd.DataFrame(index=[1, 2, 3]), npartitions=2).shape\n assert (sh[0].compute(), sh[1]) == (3, 0)\n sh = dd.from_pandas(pd.DataFrame({\"a\": [], \"b\": []}, index=[]), npartitions=1).shape\n assert (sh[0].compute(), sh[1]) == (0, 2)\n\n\ndef test_nbytes():\n assert_eq(d.a.nbytes, full.a.nbytes)\n assert_eq(d.index.nbytes, full.index.nbytes)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_test_empty_quantile.assert_eq_result_exp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_quantile_test_empty_quantile.assert_eq_result_exp_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1330, "end_line": 1400, "span_ids": ["test_quantile", "test_empty_quantile", "test_quantile_missing"], "tokens": 647}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"method,expected\",\n [(\"tdigest\", (0.35, 3.80, 2.5, 6.5, 2.0)), (\"dask\", (0.0, 4.0, 1.2, 6.2, 2.0))],\n)\ndef test_quantile(method, expected):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n # series / multiple\n result = d.b.quantile([0.3, 0.7], method=method)\n\n exp = full.b.quantile([0.3, 0.7]) # result may different\n assert len(result) == 2\n assert result.divisions == (0.3, 0.7)\n assert_eq(result.index, exp.index)\n assert isinstance(result, dd.Series)\n\n result = result.compute()\n assert isinstance(result, pd.Series)\n\n assert result.iloc[0] == pytest.approx(expected[0])\n assert result.iloc[1] == pytest.approx(expected[1])\n\n # index\n s = pd.Series(np.arange(10), index=np.arange(10))\n ds = dd.from_pandas(s, 2)\n\n result = ds.index.quantile([0.3, 0.7], method=method)\n exp = s.quantile([0.3, 0.7])\n assert len(result) == 2\n assert result.divisions == (0.3, 0.7)\n assert_eq(result.index, exp.index)\n assert isinstance(result, dd.Series)\n\n result = result.compute()\n assert isinstance(result, pd.Series)\n assert result.iloc[0] == pytest.approx(expected[2])\n assert result.iloc[1] == pytest.approx(expected[3])\n\n # series / single\n result = d.b.quantile(0.5, method=method)\n assert isinstance(result, dd.core.Scalar)\n result = result.compute()\n assert result == expected[4]\n\n\n@pytest.mark.parametrize(\"method\", [\"tdigest\", \"dask\"])\ndef test_quantile_missing(method):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n df = pd.DataFrame({\"A\": [0, np.nan, 2]})\n ddf = dd.from_pandas(df, 2)\n expected = df.quantile()\n result = ddf.quantile(method=method)\n assert_eq(result, expected)\n\n expected = df.A.quantile()\n result = ddf.A.quantile(method=method)\n assert_eq(result, expected)\n\n\n@pytest.mark.parametrize(\"method\", [\"tdigest\", \"dask\"])\ndef test_empty_quantile(method):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n result = d.b.quantile([], method=method)\n exp = full.b.quantile([])\n assert result.divisions == (None, None)\n\n assert result.name == \"b\"\n assert result.compute().name == \"b\"\n assert_eq(result, exp)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_quantile_test_map.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dataframe_quantile_test_map.None_6", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1403, "end_line": 1627, "span_ids": ["test_assign_dtypes", "test_assign_callable", "test_quantile_for_possibly_unsorted_q", "test_map", "test_dataframe_quantile", "test_index", "test_quantile_tiny_partitions", "test_quantile_trivial_partitions", "test_assign"], "tokens": 2087}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"method,expected\",\n [\n (\n \"tdigest\",\n (\n pd.Series([9.5, 29.5, 19.5], index=[\"A\", \"X\", \"B\"]),\n pd.DataFrame(\n [[4.5, 24.5, 14.5], [14.5, 34.5, 24.5]],\n index=[0.25, 0.75],\n columns=[\"A\", \"X\", \"B\"],\n ),\n ),\n ),\n (\n \"dask\",\n (\n pd.Series([7.0, 27.0, 17.0], index=[\"A\", \"X\", \"B\"]),\n pd.DataFrame(\n [[1.50, 21.50, 11.50], [14.0, 34.0, 24.0]],\n index=[0.25, 0.75],\n columns=[\"A\", \"X\", \"B\"],\n ),\n ),\n ),\n ],\n)\ndef test_dataframe_quantile(method, expected):\n if method == \"tdigest\":\n pytest.importorskip(\"crick\")\n # column X is for test column order and result division\n df = pd.DataFrame(\n {\n \"A\": np.arange(20),\n \"X\": np.arange(20, 40),\n \"B\": np.arange(10, 30),\n \"C\": [\"a\", \"b\", \"c\", \"d\"] * 5,\n },\n columns=[\"A\", \"X\", \"B\", \"C\"],\n )\n ddf = dd.from_pandas(df, 3)\n\n result = ddf.quantile(method=method)\n assert result.npartitions == 1\n assert result.divisions == (\"A\", \"X\")\n\n result = result.compute()\n assert isinstance(result, pd.Series)\n assert result.name == 0.5\n tm.assert_index_equal(result.index, pd.Index([\"A\", \"X\", \"B\"]))\n assert (result == expected[0]).all()\n\n result = ddf.quantile([0.25, 0.75], method=method)\n assert result.npartitions == 1\n assert result.divisions == (0.25, 0.75)\n\n result = result.compute()\n assert isinstance(result, pd.DataFrame)\n tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))\n tm.assert_index_equal(result.columns, pd.Index([\"A\", \"X\", \"B\"]))\n\n assert (result == expected[1]).all().all()\n\n assert_eq(ddf.quantile(axis=1, method=method), df.quantile(axis=1))\n pytest.raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1, method=method))\n\n\ndef test_quantile_for_possibly_unsorted_q():\n \"\"\"check that quantile is giving correct answers even when quantile parameter, q, may be unsorted.\n\n See https://github.com/dask/dask/issues/4642.\n \"\"\"\n # prepare test case where percentiles should equal values\n A = da.arange(0, 101)\n ds = dd.from_dask_array(A)\n\n for q in [\n [0.25, 0.50, 0.75],\n [0.25, 0.50, 0.75, 0.99],\n [0.75, 0.5, 0.25],\n [0.25, 0.99, 0.75, 0.50],\n ]:\n r = ds.quantile(q).compute()\n assert_eq(r.loc[0.25], 25.0)\n assert_eq(r.loc[0.50], 50.0)\n assert_eq(r.loc[0.75], 75.0)\n\n r = ds.quantile([0.25]).compute()\n assert_eq(r.loc[0.25], 25.0)\n\n r = ds.quantile(0.25).compute()\n assert_eq(r, 25.0)\n\n\ndef test_quantile_tiny_partitions():\n \"\"\"See https://github.com/dask/dask/issues/6551\"\"\"\n df = pd.DataFrame({\"a\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=3)\n r = ddf[\"a\"].quantile(0.5).compute()\n assert r == 2\n\n\ndef test_quantile_trivial_partitions():\n \"\"\"See https://github.com/dask/dask/issues/2792\"\"\"\n df = pd.DataFrame({\"A\": []})\n ddf = dd.from_pandas(df, npartitions=2)\n expected = df.quantile(0.5)\n assert_eq(ddf.quantile(0.5), expected)\n\n df = pd.DataFrame({\"A\": [np.nan, np.nan, np.nan, np.nan]})\n ddf = dd.from_pandas(df, npartitions=2)\n expected = df.quantile(0.5)\n assert_eq(ddf.quantile(0.5), expected)\n\n\ndef test_index():\n assert_eq(d.index, full.index)\n\n\ndef test_assign():\n df = pd.DataFrame(\n {\"a\": range(8), \"b\": [float(i) for i in range(10, 18)]},\n index=pd.Index(list(\"abcdefgh\")),\n )\n ddf = dd.from_pandas(df, npartitions=3)\n ddf_unknown = dd.from_pandas(df, npartitions=3, sort=False)\n assert not ddf_unknown.known_divisions\n\n res = ddf.assign(\n c=1,\n d=\"string\",\n e=ddf.a.sum(),\n f=ddf.a + ddf.b,\n g=lambda x: x.a + x.c,\n dt=pd.Timestamp(2018, 2, 13),\n )\n res_unknown = ddf_unknown.assign(\n c=1,\n d=\"string\",\n e=ddf_unknown.a.sum(),\n f=ddf_unknown.a + ddf_unknown.b,\n g=lambda x: x.a + x.c,\n dt=pd.Timestamp(2018, 2, 13),\n )\n sol = df.assign(\n c=1,\n d=\"string\",\n e=df.a.sum(),\n f=df.a + df.b,\n g=lambda x: x.a + x.c,\n dt=pd.Timestamp(2018, 2, 13),\n )\n assert_eq(res, sol)\n assert_eq(res_unknown, sol)\n\n res = ddf.assign(c=df.a + 1)\n assert_eq(res, df.assign(c=df.a + 1))\n\n res = ddf.assign(c=ddf.index)\n assert_eq(res, df.assign(c=df.index))\n\n # divisions unknown won't work with pandas\n with pytest.raises(ValueError):\n ddf_unknown.assign(c=df.a + 1)\n\n # unsupported type\n with pytest.raises(TypeError):\n ddf.assign(c=list(range(9)))\n\n # Fails when assigning known divisions to unknown divisions\n with pytest.raises(ValueError):\n ddf_unknown.assign(foo=ddf.a)\n # Fails when assigning unknown divisions to known divisions\n with pytest.raises(ValueError):\n ddf.assign(foo=ddf_unknown.a)\n\n df = pd.DataFrame({\"A\": [1, 2]})\n df.assign(B=lambda df: df[\"A\"], C=lambda df: df.A + df.B)\n\n ddf = dd.from_pandas(pd.DataFrame({\"A\": [1, 2]}), npartitions=2)\n ddf.assign(B=lambda df: df[\"A\"], C=lambda df: df.A + df.B)\n\n assert_eq(df, ddf)\n\n\ndef test_assign_callable():\n df = dd.from_pandas(pd.DataFrame({\"A\": range(10)}), npartitions=2)\n a = df.assign(B=df.A.shift())\n b = df.assign(B=lambda x: x.A.shift())\n assert_eq(a, b)\n\n\ndef test_assign_dtypes():\n ddf = dd.from_pandas(\n pd.DataFrame(\n data={\"col1\": [\"a\", \"b\"], \"col2\": [1, 2]}, columns=[\"col1\", \"col2\"]\n ),\n npartitions=2,\n )\n\n new_col = {\"col3\": pd.Series([\"0\", \"1\"])}\n res = ddf.assign(**new_col)\n\n assert_eq(\n res.dtypes,\n pd.Series(data=[\"object\", \"int64\", \"object\"], index=[\"col1\", \"col2\", \"col3\"]),\n )\n\n\ndef test_map():\n df = pd.DataFrame(\n {\"a\": range(9), \"b\": [4, 5, 6, 1, 2, 3, 0, 0, 0]},\n index=pd.Index([0, 1, 3, 5, 6, 8, 9, 9, 9], name=\"myindex\"),\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(ddf.a.map(lambda x: x + 1), df.a.map(lambda x: x + 1))\n lk = {v: v + 1 for v in df.a.values}\n assert_eq(ddf.a.map(lk), df.a.map(lk))\n assert_eq(ddf.b.map(lk), df.b.map(lk))\n lk = pd.Series(lk)\n assert_eq(ddf.a.map(lk), df.a.map(lk))\n assert_eq(ddf.b.map(lk), df.b.map(lk))\n assert_eq(ddf.b.map(lk, meta=ddf.b), df.b.map(lk))\n assert_eq(ddf.b.map(lk, meta=(\"b\", \"i8\")), df.b.map(lk))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_concat_test_align.None_15": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_concat_test_align.None_15", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1630, "end_line": 1721, "span_ids": ["test_with_min_count", "test_args", "test_align", "test_concat", "test_unknown_divisions", "test_known_divisions"], "tokens": 1045}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat():\n x = _concat([pd.DataFrame(columns=[\"a\", \"b\"]), pd.DataFrame(columns=[\"a\", \"b\"])])\n assert list(x.columns) == [\"a\", \"b\"]\n assert len(x) == 0\n\n\ndef test_args():\n e = d.assign(c=d.a + 1)\n f = type(e)(*e._args)\n assert_eq(e, f)\n assert_eq(d.a, type(d.a)(*d.a._args))\n assert_eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))\n\n\ndef test_known_divisions():\n assert d.known_divisions\n df = dd.DataFrame(dsk, \"x\", meta, divisions=[None, None, None])\n assert not df.known_divisions\n\n\ndef test_unknown_divisions():\n dsk = {\n (\"x\", 0): pd.DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]}),\n (\"x\", 1): pd.DataFrame({\"a\": [4, 5, 6], \"b\": [3, 2, 1]}),\n (\"x\", 2): pd.DataFrame({\"a\": [7, 8, 9], \"b\": [0, 0, 0]}),\n }\n meta = make_meta({\"a\": \"i8\", \"b\": \"i8\"}, parent_meta=pd.DataFrame())\n d = dd.DataFrame(dsk, \"x\", meta, [None, None, None, None])\n full = d.compute(scheduler=\"sync\")\n\n assert_eq(d.a.sum(), full.a.sum())\n assert_eq(d.a + d.b + 1, full.a + full.b + 1)\n\n\ndef test_with_min_count():\n dfs = [\n pd.DataFrame([[None, 2, 3], [None, 5, 6], [5, 4, 9]]),\n pd.DataFrame([[2, None, None], [None, 5, 6], [5, 4, 9]]),\n ]\n ddfs = [dd.from_pandas(df, npartitions=4) for df in dfs]\n axes = [0, 1]\n\n for df, ddf in zip(dfs, ddfs):\n for axis in axes:\n for min_count in [0, 1, 2, 3]:\n assert_eq(\n df.sum(min_count=min_count, axis=axis),\n ddf.sum(min_count=min_count, axis=axis),\n )\n assert_eq(\n df.prod(min_count=min_count, axis=axis),\n ddf.prod(min_count=min_count, axis=axis),\n )\n\n\n@pytest.mark.parametrize(\"join\", [\"inner\", \"outer\", \"left\", \"right\"])\ndef test_align(join):\n df1a = pd.DataFrame(\n {\"A\": np.random.randn(10), \"B\": np.random.randn(10)},\n index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11],\n )\n\n df1b = pd.DataFrame(\n {\"A\": np.random.randn(10), \"B\": np.random.randn(10)},\n index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13],\n )\n ddf1a = dd.from_pandas(df1a, 3)\n ddf1b = dd.from_pandas(df1b, 3)\n\n # DataFrame\n res1, res2 = ddf1a.align(ddf1b, join=join)\n exp1, exp2 = df1a.align(df1b, join=join)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n # Series\n res1, res2 = ddf1a[\"A\"].align(ddf1b[\"B\"], join=join)\n exp1, exp2 = df1a[\"A\"].align(df1b[\"B\"], join=join)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n # DataFrame with fill_value\n res1, res2 = ddf1a.align(ddf1b, join=join, fill_value=1)\n exp1, exp2 = df1a.align(df1b, join=join, fill_value=1)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)\n\n # Series\n res1, res2 = ddf1a[\"A\"].align(ddf1b[\"B\"], join=join, fill_value=1)\n exp1, exp2 = df1a[\"A\"].align(df1b[\"B\"], join=join, fill_value=1)\n assert assert_eq(res1, exp1)\n assert assert_eq(res2, exp2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fillna_test_eval.with_pytest_raises_NotImp.d_eval_z_x_y_inpla": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_fillna_test_eval.with_pytest_raises_NotImp.d_eval_z_x_y_inpla", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2285, "end_line": 2492, "span_ids": ["test_fillna_multi_dataframe", "test_fillna_duplicate_index", "test_from_delayed_empty_meta_provided", "test_ffill_bfill", "test_fillna", "test_eval", "test_empty_max", "test_query", "test_sample_without_replacement", "test_sample_raises", "test_fillna_series_types", "test_delayed_roundtrip", "test_sample", "test_from_delayed_lazy_if_meta_provided"], "tokens": 2055}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fillna():\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=5, sort=False)\n\n assert_eq(ddf.fillna(100), df.fillna(100))\n assert_eq(ddf.A.fillna(100), df.A.fillna(100))\n assert_eq(ddf.A.fillna(ddf[\"A\"].mean()), df.A.fillna(df[\"A\"].mean()))\n\n assert_eq(ddf.fillna(method=\"pad\"), df.fillna(method=\"pad\"))\n assert_eq(ddf.A.fillna(method=\"pad\"), df.A.fillna(method=\"pad\"))\n\n assert_eq(ddf.fillna(method=\"bfill\"), df.fillna(method=\"bfill\"))\n assert_eq(ddf.A.fillna(method=\"bfill\"), df.A.fillna(method=\"bfill\"))\n\n assert_eq(ddf.fillna(method=\"pad\", limit=2), df.fillna(method=\"pad\", limit=2))\n assert_eq(ddf.A.fillna(method=\"pad\", limit=2), df.A.fillna(method=\"pad\", limit=2))\n\n assert_eq(ddf.fillna(method=\"bfill\", limit=2), df.fillna(method=\"bfill\", limit=2))\n assert_eq(\n ddf.A.fillna(method=\"bfill\", limit=2), df.A.fillna(method=\"bfill\", limit=2)\n )\n\n assert_eq(ddf.fillna(100, axis=1), df.fillna(100, axis=1))\n assert_eq(ddf.fillna(method=\"pad\", axis=1), df.fillna(method=\"pad\", axis=1))\n assert_eq(\n ddf.fillna(method=\"pad\", limit=2, axis=1),\n df.fillna(method=\"pad\", limit=2, axis=1),\n )\n\n pytest.raises(ValueError, lambda: ddf.A.fillna(0, axis=1))\n pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10))\n pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10, axis=1))\n\n df = _compat.makeMissingDataframe()\n df.iloc[:15, 0] = np.nan # all NaN partition\n ddf = dd.from_pandas(df, npartitions=5, sort=False)\n pytest.raises(ValueError, lambda: ddf.fillna(method=\"pad\").compute())\n assert_eq(df.fillna(method=\"pad\", limit=3), ddf.fillna(method=\"pad\", limit=3))\n\n\n@pytest.mark.parametrize(\"optimize\", [True, False])\ndef test_delayed_roundtrip(optimize: bool):\n df1 = d + 1 + 1\n delayed = df1.to_delayed(optimize_graph=optimize)\n\n for x in delayed:\n assert x.__dask_layers__() == (\n \"delayed-\" + df1._name if optimize else df1._name,\n )\n x.dask.validate()\n\n assert len(delayed) == df1.npartitions\n assert len(delayed[0].dask.layers) == (1 if optimize else 3)\n\n dm = d.a.mean().to_delayed(optimize_graph=optimize)\n\n delayed2 = [x * 2 - dm for x in delayed]\n\n for x in delayed2:\n x.dask.validate()\n\n df3 = dd.from_delayed(delayed2, meta=df1, divisions=df1.divisions)\n df4 = df3 - 1 - 1\n\n df4.dask.validate()\n assert_eq(df4, (full + 2) * 2 - full.a.mean() - 2)\n\n\ndef test_from_delayed_lazy_if_meta_provided():\n \"\"\"Ensure that the graph is 100% lazily evaluted if meta is provided\"\"\"\n\n @dask.delayed\n def raise_exception():\n raise RuntimeError()\n\n tasks = [raise_exception()]\n ddf = dd.from_delayed(tasks, meta=dict(a=float))\n\n with pytest.raises(RuntimeError):\n ddf.compute()\n\n\ndef test_from_delayed_empty_meta_provided():\n ddf = dd.from_delayed([], meta=dict(a=float))\n expected = pd.DataFrame({\"a\": [0.1]}).iloc[:0]\n assert_eq(ddf, expected)\n\n\ndef test_fillna_duplicate_index():\n @dask.delayed\n def f():\n return pd.DataFrame(dict(a=[1.0], b=[np.NaN]))\n\n ddf = dd.from_delayed([f(), f()], meta=dict(a=float, b=float))\n ddf.b = ddf.b.fillna(ddf.a)\n ddf.compute()\n\n\ndef test_fillna_multi_dataframe():\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=5, sort=False)\n\n assert_eq(ddf.A.fillna(ddf.B), df.A.fillna(df.B))\n assert_eq(ddf.B.fillna(ddf.A), df.B.fillna(df.A))\n\n\ndef test_ffill_bfill():\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=5, sort=False)\n\n assert_eq(ddf.ffill(), df.ffill())\n assert_eq(ddf.bfill(), df.bfill())\n assert_eq(ddf.ffill(axis=1), df.ffill(axis=1))\n assert_eq(ddf.bfill(axis=1), df.bfill(axis=1))\n\n\ndef test_fillna_series_types():\n # https://github.com/dask/dask/issues/2809\n df = pd.DataFrame({\"A\": [1, np.nan, 3], \"B\": [1, np.nan, 3]})\n ddf = dd.from_pandas(df, npartitions=2)\n fill_value = pd.Series([1, 10], index=[\"A\", \"C\"])\n assert_eq(ddf.fillna(fill_value), df.fillna(fill_value))\n\n\ndef test_sample():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, None, 6], \"y\": list(\"abdabd\")},\n index=[10, 20, 30, 40, 50, 60],\n )\n a = dd.from_pandas(df, 2)\n\n b = a.sample(frac=0.5)\n\n assert_eq(b, b)\n\n c = a.sample(frac=0.5, random_state=1234)\n d = a.sample(frac=0.5, random_state=1234)\n assert_eq(c, d)\n\n assert a.sample(frac=0.5)._name != a.sample(frac=0.5)._name\n\n\ndef test_sample_without_replacement():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, None, 6], \"y\": list(\"abdabd\")},\n index=[10, 20, 30, 40, 50, 60],\n )\n a = dd.from_pandas(df, 2)\n b = a.sample(frac=0.7, replace=False)\n bb = b.index.compute()\n assert len(bb) == len(set(bb))\n\n\ndef test_sample_raises():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, None, 6], \"y\": list(\"abdabd\")},\n index=[10, 20, 30, 40, 50, 60],\n )\n a = dd.from_pandas(df, 2)\n\n # Make sure frac is replaced with n when 0 <= n <= 1\n # This is so existing code (i.e. ddf.sample(0.5)) won't break\n with pytest.warns(UserWarning):\n b = a.sample(0.5, random_state=1234)\n c = a.sample(frac=0.5, random_state=1234)\n assert_eq(b, c)\n\n with pytest.raises(ValueError):\n a.sample(n=10)\n\n # Make sure frac is provided\n with pytest.raises(ValueError):\n a.sample(frac=None)\n\n\ndef test_empty_max():\n meta = make_meta({\"x\": \"i8\"}, parent_meta=pd.DataFrame())\n a = dd.DataFrame(\n {(\"x\", 0): pd.DataFrame({\"x\": [1]}), (\"x\", 1): pd.DataFrame({\"x\": []})},\n \"x\",\n meta,\n [None, None, None],\n )\n assert_eq(a.x.max(), 1)\n\n\ndef test_query():\n pytest.importorskip(\"numexpr\")\n\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.query(\"x**2 > y\"), df.query(\"x**2 > y\"))\n assert_eq(\n ddf.query(\"x**2 > @value\", local_dict={\"value\": 4}),\n df.query(\"x**2 > @value\", local_dict={\"value\": 4}),\n )\n\n\ndef test_eval():\n pytest.importorskip(\"numexpr\")\n\n p = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n d = dd.from_pandas(p, npartitions=2)\n\n assert_eq(p.eval(\"x + y\"), d.eval(\"x + y\"))\n assert_eq(p.eval(\"z = x + y\", inplace=False), d.eval(\"z = x + y\", inplace=False))\n with pytest.raises(NotImplementedError):\n d.eval(\"z = x + y\", inplace=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_select_dtypes_test_reduction_method.assert_eq_res_pd_DataFra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_select_dtypes_test_reduction_method.assert_eq_res_pd_DataFra", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2495, "end_line": 2705, "span_ids": ["test_aca_split_every", "test_select_dtypes", "test_deterministic_apply_concat_apply_names", "test_aca_meta_infer", "test_reduction_method"], "tokens": 1894}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"include, exclude\",\n [\n ([int], None),\n (None, [int]),\n ([np.number, object], [float]),\n ([\"datetime\"], None),\n ],\n)\ndef test_select_dtypes(include, exclude):\n n = 10\n df = pd.DataFrame(\n {\n \"cint\": [1] * n,\n \"cstr\": [\"a\"] * n,\n \"clfoat\": [1.0] * n,\n \"cdt\": pd.date_range(\"2016-01-01\", periods=n),\n }\n )\n a = dd.from_pandas(df, npartitions=2)\n result = a.select_dtypes(include=include, exclude=exclude)\n expected = df.select_dtypes(include=include, exclude=exclude)\n assert_eq(result, expected)\n\n # count dtypes\n tm.assert_series_equal(a.dtypes.value_counts(), df.dtypes.value_counts())\n\n tm.assert_series_equal(result.dtypes.value_counts(), expected.dtypes.value_counts())\n\n\ndef test_deterministic_apply_concat_apply_names():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n a = dd.from_pandas(df, npartitions=2)\n\n assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)\n assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)\n assert sorted(a.x.drop_duplicates().dask) == sorted(a.x.drop_duplicates().dask)\n assert sorted(a.groupby(\"x\").y.mean().dask) == sorted(a.groupby(\"x\").y.mean().dask)\n\n # Test aca without passing in token string\n f = lambda a: a.nlargest(5)\n f2 = lambda a: a.nlargest(3)\n assert sorted(aca(a.x, f, f, a.x._meta).dask) != sorted(\n aca(a.x, f2, f2, a.x._meta).dask\n )\n assert sorted(aca(a.x, f, f, a.x._meta).dask) == sorted(\n aca(a.x, f, f, a.x._meta).dask\n )\n\n # Test aca with keywords\n def chunk(x, c_key=0, both_key=0):\n return x.sum() + c_key + both_key\n\n def agg(x, a_key=0, both_key=0):\n return pd.Series(x).sum() + a_key + both_key\n\n c_key = 2\n a_key = 3\n both_key = 4\n\n res = aca(\n a.x,\n chunk=chunk,\n aggregate=agg,\n chunk_kwargs={\"c_key\": c_key},\n aggregate_kwargs={\"a_key\": a_key},\n both_key=both_key,\n )\n assert sorted(res.dask) == sorted(\n aca(\n a.x,\n chunk=chunk,\n aggregate=agg,\n chunk_kwargs={\"c_key\": c_key},\n aggregate_kwargs={\"a_key\": a_key},\n both_key=both_key,\n ).dask\n )\n assert sorted(res.dask) != sorted(\n aca(\n a.x,\n chunk=chunk,\n aggregate=agg,\n chunk_kwargs={\"c_key\": c_key},\n aggregate_kwargs={\"a_key\": a_key},\n both_key=0,\n ).dask\n )\n\n assert_eq(res, df.x.sum() + 2 * (c_key + both_key) + a_key + both_key)\n\n\ndef test_aca_meta_infer():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n def chunk(x, y, constant=1.0):\n return (x + y + constant).head()\n\n def agg(x):\n return x.head()\n\n res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, chunk_kwargs=dict(constant=2.0))\n sol = (df + 2.0 + 2.0).head()\n assert_eq(res, sol)\n\n # Should infer as a scalar\n res = aca(\n [ddf.x], chunk=lambda x: pd.Series([x.sum()]), aggregate=lambda x: x.sum()\n )\n assert isinstance(res, Scalar)\n assert res.compute() == df.x.sum()\n\n\ndef test_aca_split_every():\n df = pd.DataFrame({\"x\": [1] * 60})\n ddf = dd.from_pandas(df, npartitions=15)\n\n def chunk(x, y, constant=0):\n return x.sum() + y + constant\n\n def combine(x, constant=0):\n return x.sum() + constant + 1\n\n def agg(x, constant=0):\n return x.sum() + constant + 2\n\n f = lambda n: aca(\n [ddf, 2.0],\n chunk=chunk,\n aggregate=agg,\n combine=combine,\n chunk_kwargs=dict(constant=1.0),\n combine_kwargs=dict(constant=2.0),\n aggregate_kwargs=dict(constant=3.0),\n split_every=n,\n )\n\n assert_max_deps(f(3), 3)\n assert_max_deps(f(4), 4, False)\n assert_max_deps(f(5), 5)\n assert f(15).dask.keys() == f(ddf.npartitions).dask.keys()\n\n r3 = f(3)\n r4 = f(4)\n assert r3._name != r4._name\n # Only intersect on reading operations\n assert len(r3.dask.keys() & r4.dask.keys()) == len(ddf.dask)\n\n # Keywords are different for each step\n assert f(3).compute() == 60 + 15 * (2 + 1) + 7 * (2 + 1) + (3 + 2)\n # Keywords are same for each step\n res = aca(\n [ddf, 2.0],\n chunk=chunk,\n aggregate=agg,\n combine=combine,\n constant=3.0,\n split_every=3,\n )\n assert res.compute() == 60 + 15 * (2 + 3) + 7 * (3 + 1) + (3 + 2)\n # No combine provided, combine is agg\n res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, constant=3, split_every=3)\n assert res.compute() == 60 + 15 * (2 + 3) + 8 * (3 + 2)\n\n # split_every must be >= 2\n with pytest.raises(ValueError):\n f(1)\n\n # combine_kwargs with no combine provided\n with pytest.raises(ValueError):\n aca(\n [ddf, 2.0],\n chunk=chunk,\n aggregate=agg,\n split_every=3,\n chunk_kwargs=dict(constant=1.0),\n combine_kwargs=dict(constant=2.0),\n aggregate_kwargs=dict(constant=3.0),\n )\n\n\ndef test_reduction_method():\n df = pd.DataFrame({\"x\": range(50), \"y\": range(50, 100)})\n ddf = dd.from_pandas(df, npartitions=4)\n\n chunk = lambda x, val=0: (x >= val).sum()\n agg = lambda x: x.sum()\n\n # Output of chunk is a scalar\n res = ddf.x.reduction(chunk, aggregate=agg)\n assert_eq(res, df.x.count())\n\n # Output of chunk is a series\n res = ddf.reduction(chunk, aggregate=agg)\n assert res._name == ddf.reduction(chunk, aggregate=agg)._name\n assert_eq(res, df.count())\n\n # Test with keywords\n res2 = ddf.reduction(chunk, aggregate=agg, chunk_kwargs={\"val\": 25})\n res2._name == ddf.reduction(chunk, aggregate=agg, chunk_kwargs={\"val\": 25})._name\n assert res2._name != res._name\n assert_eq(res2, (df >= 25).sum())\n\n # Output of chunk is a dataframe\n def sum_and_count(x):\n return pd.DataFrame({\"sum\": x.sum(), \"count\": x.count()})\n\n res = ddf.reduction(sum_and_count, aggregate=lambda x: x.groupby(level=0).sum())\n\n assert_eq(res, pd.DataFrame({\"sum\": df.sum(), \"count\": df.count()}))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reduction_method_split_every_test_to_dask_array.assert_result_chunks_e": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_reduction_method_split_every_test_to_dask_array.assert_result_chunks_e", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2708, "end_line": 2941, "span_ids": ["test_to_frame", "test_drop_axis_1", "test_rename_function", "test_gh_517", "test_rename_dict", "test_to_dask_array", "test_gh6305", "test_to_timestamp", "test_reduction_method_split_every", "test_drop_columns", "test_pipe", "test_to_dask_array_unknown", "test_to_dask_array_raises", "test_gh580", "test_rename_index"], "tokens": 2068}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reduction_method_split_every():\n df = pd.Series([1] * 60)\n ddf = dd.from_pandas(df, npartitions=15)\n\n def chunk(x, constant=0):\n return x.sum() + constant\n\n def combine(x, constant=0):\n return x.sum() + constant + 1\n\n def agg(x, constant=0):\n return x.sum() + constant + 2\n\n f = lambda n: ddf.reduction(\n chunk,\n aggregate=agg,\n combine=combine,\n chunk_kwargs=dict(constant=1.0),\n combine_kwargs=dict(constant=2.0),\n aggregate_kwargs=dict(constant=3.0),\n split_every=n,\n )\n\n assert_max_deps(f(3), 3)\n assert_max_deps(f(4), 4, False)\n assert_max_deps(f(5), 5)\n assert f(15).dask.keys() == f(ddf.npartitions).dask.keys()\n\n r3 = f(3)\n r4 = f(4)\n assert r3._name != r4._name\n # Only intersect on reading operations\n assert len(r3.dask.keys() & r4.dask.keys()) == len(ddf.dask)\n\n # Keywords are different for each step\n assert f(3).compute() == 60 + 15 + 7 * (2 + 1) + (3 + 2)\n # Keywords are same for each step\n res = ddf.reduction(\n chunk, aggregate=agg, combine=combine, constant=3.0, split_every=3\n )\n assert res.compute() == 60 + 15 * 3 + 7 * (3 + 1) + (3 + 2)\n # No combine provided, combine is agg\n res = ddf.reduction(chunk, aggregate=agg, constant=3.0, split_every=3)\n assert res.compute() == 60 + 15 * 3 + 8 * (3 + 2)\n\n # split_every must be >= 2\n with pytest.raises(ValueError):\n f(1)\n\n # combine_kwargs with no combine provided\n with pytest.raises(ValueError):\n ddf.reduction(\n chunk,\n aggregate=agg,\n split_every=3,\n chunk_kwargs=dict(constant=1.0),\n combine_kwargs=dict(constant=2.0),\n aggregate_kwargs=dict(constant=3.0),\n )\n\n\ndef test_pipe():\n df = pd.DataFrame({\"x\": range(50), \"y\": range(50, 100)})\n ddf = dd.from_pandas(df, npartitions=4)\n\n def f(x, y, z=0):\n return x + y + z\n\n assert_eq(ddf.pipe(f, 1, z=2), f(ddf, 1, z=2))\n assert_eq(ddf.x.pipe(f, 1, z=2), f(ddf.x, 1, z=2))\n\n\ndef test_gh_517():\n arr = np.random.randn(100, 2)\n df = pd.DataFrame(arr, columns=[\"a\", \"b\"])\n ddf = dd.from_pandas(df, 2)\n assert ddf.index.nunique().compute() == 100\n\n ddf2 = dd.from_pandas(pd.concat([df, df]), 5)\n assert ddf2.index.nunique().compute() == 100\n\n\ndef test_drop_axis_1():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [5, 6, 7, 8], \"z\": [9, 10, 11, 12]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(ddf.drop(\"y\", axis=1), df.drop(\"y\", axis=1))\n assert_eq(ddf.drop([\"y\", \"z\"], axis=1), df.drop([\"y\", \"z\"], axis=1))\n with pytest.raises(ValueError):\n ddf.drop([\"a\", \"x\"], axis=1)\n assert_eq(\n ddf.drop([\"a\", \"x\"], axis=1, errors=\"ignore\"),\n df.drop([\"a\", \"x\"], axis=1, errors=\"ignore\"),\n )\n assert_eq(ddf.drop(columns=[\"y\", \"z\"]), df.drop(columns=[\"y\", \"z\"]))\n\n\n@pytest.mark.parametrize(\"columns\", [[\"b\"], []])\ndef test_drop_columns(columns):\n # Check both populated and empty list argument\n # https://github.com/dask/dask/issues/6870\n\n df = pd.DataFrame(\n {\n \"a\": [2, 4, 6, 8],\n \"b\": [\"1a\", \"2b\", \"3c\", \"4d\"],\n }\n )\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = ddf.drop(columns=columns)\n ddf[\"new\"] = ddf[\"a\"] + 1 # Check that ddf2 is not modified\n\n assert_eq(df.drop(columns=columns), ddf2)\n\n\ndef test_gh580():\n df = pd.DataFrame({\"x\": np.arange(10, dtype=float)})\n ddf = dd.from_pandas(df, 2)\n assert_eq(np.cos(df[\"x\"]), np.cos(ddf[\"x\"]))\n assert_eq(np.cos(df[\"x\"]), np.cos(ddf[\"x\"]))\n\n\ndef test_gh6305():\n df = pd.DataFrame({\"x\": np.arange(3, dtype=float)})\n ddf = dd.from_pandas(df, 1)\n ddf_index_only = ddf.set_index(\"x\")\n ds = ddf[\"x\"]\n\n is_broadcastable([ddf_index_only], ds)\n\n\ndef test_rename_dict():\n renamer = {\"a\": \"A\", \"b\": \"B\"}\n assert_eq(d.rename(columns=renamer), full.rename(columns=renamer))\n\n\ndef test_rename_function():\n renamer = lambda x: x.upper()\n assert_eq(d.rename(columns=renamer), full.rename(columns=renamer))\n\n\ndef test_rename_index():\n renamer = {0: 1}\n pytest.raises(ValueError, lambda: d.rename(index=renamer))\n\n\ndef test_to_timestamp():\n index = pd.period_range(freq=\"A\", start=\"1/1/2001\", end=\"12/1/2004\")\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]}, index=index)\n ddf = dd.from_pandas(df, npartitions=3)\n assert_eq(ddf.to_timestamp(), df.to_timestamp(), **CHECK_FREQ)\n assert_eq(\n ddf.to_timestamp(freq=\"M\", how=\"s\").compute(),\n df.to_timestamp(freq=\"M\", how=\"s\"),\n **CHECK_FREQ,\n )\n assert_eq(ddf.x.to_timestamp(), df.x.to_timestamp())\n assert_eq(\n ddf.x.to_timestamp(freq=\"M\", how=\"s\").compute(),\n df.x.to_timestamp(freq=\"M\", how=\"s\"),\n **CHECK_FREQ,\n )\n\n\ndef test_to_frame():\n s = pd.Series([1, 2, 3], name=\"foo\")\n a = dd.from_pandas(s, npartitions=2)\n\n assert_eq(s.to_frame(), a.to_frame())\n assert_eq(s.to_frame(\"bar\"), a.to_frame(\"bar\"))\n\n\n@pytest.mark.parametrize(\"as_frame\", [False, False])\ndef test_to_dask_array_raises(as_frame):\n s = pd.Series([1, 2, 3, 4, 5, 6], name=\"foo\")\n a = dd.from_pandas(s, npartitions=2)\n\n if as_frame:\n a = a.to_frame()\n\n with pytest.raises(ValueError, match=\"4 != 2\"):\n a.to_dask_array((1, 2, 3, 4))\n\n with pytest.raises(ValueError, match=\"Unexpected value\"):\n a.to_dask_array(5)\n\n\n@pytest.mark.parametrize(\"as_frame\", [False, True])\ndef test_to_dask_array_unknown(as_frame):\n s = pd.Series([1, 2, 3, 4, 5], name=\"foo\")\n a = dd.from_pandas(s, chunksize=2)\n\n if as_frame:\n a = a.to_frame()\n\n result = a.to_dask_array()\n assert isinstance(result, da.Array)\n result = result.chunks\n\n if as_frame:\n assert len(result) == 2\n assert result[1] == (1,)\n else:\n assert len(result) == 1\n\n result = result[0]\n assert len(result) == 2\n assert all(np.isnan(x) for x in result)\n\n\n@pytest.mark.parametrize(\n \"lengths,as_frame,meta\",\n [\n ([2, 3], False, None),\n (True, False, None),\n (True, False, np.array([], dtype=\"f4\")),\n ],\n)\ndef test_to_dask_array(meta, as_frame, lengths):\n s = pd.Series([1, 2, 3, 4, 5], name=\"foo\", dtype=\"i4\")\n a = dd.from_pandas(s, chunksize=2)\n\n if as_frame:\n a = a.to_frame()\n\n result = a.to_dask_array(lengths=lengths, meta=meta)\n assert isinstance(result, da.Array)\n\n expected_chunks = ((2, 3),)\n\n if as_frame:\n expected_chunks = expected_chunks + ((1,),)\n\n assert result.chunks == expected_chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mod_eq_test_nunique.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_mod_eq_test_nunique.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4546, "end_line": 4765, "span_ids": ["test_broadcast", "test_scalar_with_array", "test_replace", "test_nunique", "test_setitem", "test_dtype_cast", "test_mod_eq", "test_map_partitions_delays_lists", "test_setitem_with_bool_series_as_key", "test_dataframe_explode", "test_series_explode", "test_setitem_with_numeric_column_name_raises_not_implemented", "test_setitem_with_bool_dataframe_as_key", "test_assign_index", "test_index_divisions", "test_meta_error_message", "test_has_parallel_type", "test_map_index", "test_series_map", "test_pop"], "tokens": 2169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mod_eq():\n df = pd.DataFrame({\"a\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n assert_eq(df, ddf)\n assert_eq(df.a, ddf.a)\n assert_eq(df.a + 2, ddf.a + 2)\n assert_eq(df.a + 2 == 0, ddf.a + 2 == 0)\n\n\ndef test_setitem():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]})\n ddf = dd.from_pandas(df.copy(), 2)\n df[df.columns] = 1\n ddf[ddf.columns] = 1\n assert_eq(df, ddf)\n\n\ndef test_setitem_with_bool_dataframe_as_key():\n df = pd.DataFrame({\"A\": [1, 4], \"B\": [3, 2]})\n ddf = dd.from_pandas(df.copy(), 2)\n df[df > 2] = 5\n ddf[ddf > 2] = 5\n assert_eq(df, ddf)\n\n\ndef test_setitem_with_bool_series_as_key():\n df = pd.DataFrame({\"A\": [1, 4], \"B\": [3, 2]})\n ddf = dd.from_pandas(df.copy(), 2)\n df[df[\"A\"] > 2] = 5\n ddf[ddf[\"A\"] > 2] = 5\n assert_eq(df, ddf)\n\n\ndef test_setitem_with_numeric_column_name_raises_not_implemented():\n df = pd.DataFrame({0: [1, 4], 1: [3, 2]})\n ddf = dd.from_pandas(df.copy(), 2)\n # works for pandas\n df[0] = 5\n # raises error for dask\n with pytest.raises(NotImplementedError, match=\"not supported\"):\n ddf[0] = 5\n\n\ndef test_broadcast():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf - (ddf.sum() + 1), df - (df.sum() + 1))\n\n\ndef test_scalar_with_array():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n da.utils.assert_eq(df.x.values + df.x.mean(), ddf.x.values + ddf.x.mean())\n\n\ndef test_has_parallel_type():\n assert has_parallel_type(pd.DataFrame())\n assert has_parallel_type(pd.Series(dtype=float))\n assert not has_parallel_type(123)\n\n\ndef test_meta_error_message():\n with pytest.raises(TypeError) as info:\n dd.DataFrame({(\"x\", 1): 123}, \"x\", pd.Series(dtype=float), [None, None])\n\n assert \"Series\" in str(info.value)\n assert \"DataFrame\" in str(info.value)\n assert \"pandas\" in str(info.value)\n\n\ndef test_map_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert ddf.known_divisions is True\n\n cleared = ddf.index.map(lambda x: x * 10)\n assert cleared.known_divisions is False\n\n applied = ddf.index.map(lambda x: x * 10, is_monotonic=True)\n assert applied.known_divisions is True\n assert applied.divisions == tuple(x * 10 for x in ddf.divisions)\n\n\ndef test_assign_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf_copy = ddf.copy()\n\n ddf.index = ddf.index * 10\n\n expected = df.copy()\n expected.index = expected.index * 10\n\n assert_eq(ddf, expected)\n assert_eq(ddf_copy, df)\n\n\ndef test_index_divisions():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(ddf.index + 1, df.index + 1)\n assert_eq(10 * ddf.index, 10 * df.index)\n assert_eq(-ddf.index, -df.index)\n\n\ndef test_replace():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(df.replace(1, 10), ddf.replace(1, 10))\n assert_eq(df.replace({1: 10, 2: 20}), ddf.replace({1: 10, 2: 20}))\n assert_eq(df.x.replace(1, 10), ddf.x.replace(1, 10))\n assert_eq(df.x.replace({1: 10, 2: 20}), ddf.x.replace({1: 10, 2: 20}))\n\n\ndef test_map_partitions_delays_lists():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n L = list(range(100))\n out = ddf.map_partitions(lambda x, y: x + sum(y), y=L)\n assert any(str(L) == str(v) for v in out.__dask_graph__().values())\n\n out = ddf.map_partitions(lambda x, y: x + sum(y), L)\n assert any(str(L) == str(v) for v in out.__dask_graph__().values())\n\n\ndef test_dtype_cast():\n df = pd.DataFrame(\n {\n \"A\": np.arange(10, dtype=np.int32),\n \"B\": np.arange(10, dtype=np.int64),\n \"C\": np.arange(10, dtype=np.float32),\n }\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert ddf.A.dtype == np.int32\n assert ddf.B.dtype == np.int64\n assert ddf.C.dtype == np.float32\n\n col = pd.Series(np.arange(10, dtype=np.float32)) / 2\n assert col.dtype == np.float32\n\n ddf = ddf.assign(D=col)\n assert ddf.D.dtype == np.float32\n assert ddf.C.dtype == np.float32\n # fails\n assert ddf.B.dtype == np.int64\n # fails\n assert ddf.A.dtype == np.int32\n\n\n@pytest.mark.parametrize(\"base_npart\", [1, 4])\n@pytest.mark.parametrize(\"map_npart\", [1, 3])\n@pytest.mark.parametrize(\"sorted_index\", [False, True])\n@pytest.mark.parametrize(\"sorted_map_index\", [False, True])\ndef test_series_map(base_npart, map_npart, sorted_index, sorted_map_index):\n base = pd.Series(\n [\"\".join(np.random.choice([\"a\", \"b\", \"c\"], size=3)) for x in range(100)]\n )\n if not sorted_index:\n index = np.arange(100)\n np.random.shuffle(index)\n base.index = index\n map_index = [\"\".join(x) for x in product(\"abc\", repeat=3)]\n mapper = pd.Series(np.random.randint(50, size=len(map_index)), index=map_index)\n if not sorted_map_index:\n map_index = np.array(map_index)\n np.random.shuffle(map_index)\n mapper.index = map_index\n expected = base.map(mapper)\n dask_base = dd.from_pandas(base, npartitions=base_npart, sort=False)\n dask_map = dd.from_pandas(mapper, npartitions=map_npart, sort=False)\n result = dask_base.map(dask_map)\n dd.utils.assert_eq(expected, result)\n\n\ndef test_dataframe_explode():\n df = pd.DataFrame({\"A\": [[1, 2, 3], \"foo\", [3, 4]], \"B\": 1})\n exploded_df = df.explode(\"A\")\n ddf = dd.from_pandas(df, npartitions=2)\n exploded_ddf = ddf.explode(\"A\")\n assert ddf.divisions == exploded_ddf.divisions\n assert_eq(exploded_ddf.compute(), exploded_df)\n\n\ndef test_series_explode():\n s = pd.Series([[1, 2, 3], \"foo\", [3, 4]])\n exploded_s = s.explode()\n ds = dd.from_pandas(s, npartitions=2)\n exploded_ds = ds.explode()\n assert_eq(exploded_ds, exploded_s)\n assert ds.divisions == exploded_ds.divisions\n\n\ndef test_pop():\n df = pd.DataFrame({\"x\": range(10), \"y\": range(10)})\n\n ddf = dd.from_pandas(df, npartitions=2)\n\n s = ddf.pop(\"y\")\n assert s.name == \"y\"\n assert ddf.columns == [\"x\"]\n assert_eq(ddf, df[[\"x\"]])\n\n\n@pytest.mark.parametrize(\"dropna\", [True, False])\n@pytest.mark.parametrize(\"axis\", [0, 1])\ndef test_nunique(dropna, axis):\n df = pd.DataFrame(\n {\"x\": [\"a\", \"a\", \"c\"], \"y\": [None, 1, 2], \"c\": np.arange(0, 1, 0.4)}\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf[\"y\"].nunique(dropna=dropna), df[\"y\"].nunique(dropna=dropna))\n assert_eq(\n ddf.nunique(dropna=dropna, axis=axis), df.nunique(dropna=dropna, axis=axis)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_view_test_dot.with_pytest_raises_TypeEr.dask_s1_dot_da_array_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_view_test_dot.with_pytest_raises_TypeEr.dask_s1_dot_da_array_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4794, "end_line": 4971, "span_ids": ["test_attrs_series_in_dataframes", "test_assign_na_float_columns", "test_iter", "test_dot", "test_dataframe_groupby_cumprod_agg_empty_partitions", "test_repr_html_dataframe_highlevelgraph", "test_dataframe_groupby_cumsum_agg_empty_partitions", "test_simple_map_partitions", "test_attrs_dataframe", "test_view", "test_dask_layers", "test_fuse_roots", "test_attrs_series", "test_join_series"], "tokens": 2113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_view():\n data = {\n \"x\": pd.Series(range(5), dtype=\"int8\"),\n \"y\": pd.Series(\n [\n \"2021-11-27 00:05:02.175274\",\n \"2021-11-27 00:05:05.205596\",\n \"2021-11-27 00:05:29.212572\",\n \"2021-11-27 00:05:25.708343\",\n \"2021-11-27 00:05:47.714958\",\n ],\n dtype=\"datetime64[ns]\",\n ),\n }\n\n df = pd.DataFrame(data)\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf[\"x\"].view(\"uint8\"), df[\"x\"].view(\"uint8\"))\n assert_eq(ddf[\"y\"].view(\"int64\"), df[\"y\"].view(\"int64\"))\n\n\ndef test_simple_map_partitions():\n data = {\"col_0\": [9, -3, 0, -1, 5], \"col_1\": [-2, -7, 6, 8, -5]}\n df = pd.DataFrame(data)\n ddf = dd.from_pandas(df, npartitions=2)\n ddf = ddf.clip(-4, 6)\n task = ddf.__dask_graph__()[ddf.__dask_keys__()[0]]\n [v] = task[0].dsk.values()\n assert v[0] == M.clip or v[1] == M.clip\n\n\ndef test_iter():\n df = pd.DataFrame({\"A\": [1, 2, 3, 4], \"B\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, 2)\n\n assert list(df) == list(ddf)\n for col, expected in zip(ddf, [\"A\", \"B\"]):\n assert col == expected\n\n\ndef test_dataframe_groupby_cumsum_agg_empty_partitions():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=4)\n assert_eq(ddf[ddf.x < 5].x.cumsum(), df[df.x < 5].x.cumsum())\n assert_eq(ddf[ddf.x > 5].x.cumsum(), df[df.x > 5].x.cumsum())\n\n\ndef test_dataframe_groupby_cumprod_agg_empty_partitions():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=4)\n assert_eq(ddf[ddf.x < 5].x.cumprod(), df[df.x < 5].x.cumprod())\n assert_eq(ddf[ddf.x > 5].x.cumprod(), df[df.x > 5].x.cumprod())\n\n\ndef test_fuse_roots():\n pdf1 = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [3, 5, 2, 5, 7, 2, 4, 2, 4]}\n )\n ddf1 = dd.from_pandas(pdf1, 2)\n pdf2 = pd.DataFrame({\"a\": [True, False, True] * 3, \"b\": [False, False, True] * 3})\n ddf2 = dd.from_pandas(pdf2, 2)\n\n res = ddf1.where(ddf2)\n hlg = fuse_roots(res.__dask_graph__(), keys=res.__dask_keys__())\n hlg.validate()\n\n\ndef test_attrs_dataframe():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n df.attrs = {\"date\": \"2020-10-16\"}\n ddf = dd.from_pandas(df, 2)\n\n assert df.attrs == ddf.attrs\n assert df.abs().attrs == ddf.abs().attrs\n\n\ndef test_attrs_series():\n s = pd.Series([1, 2], name=\"A\")\n s.attrs[\"unit\"] = \"kg\"\n ds = dd.from_pandas(s, 2)\n\n assert s.attrs == ds.attrs\n assert s.fillna(1).attrs == ds.fillna(1).attrs\n\n\n@pytest.mark.xfail(reason=\"df.iloc[:0] does not keep the series attrs\")\ndef test_attrs_series_in_dataframes():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n df.A.attrs[\"unit\"] = \"kg\"\n ddf = dd.from_pandas(df, 2)\n\n # Fails because the pandas iloc method doesn't currently persist\n # the attrs dict for series in a dataframe. Dask uses df.iloc[:0]\n # when creating the _meta dataframe in make_meta_pandas(x, index=None).\n # Should start xpassing when df.iloc works. Remove the xfail then.\n assert df.A.attrs == ddf.A.attrs\n\n\ndef test_join_series():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=1)\n expected_df = dd.from_pandas(df.join(df[\"x\"], lsuffix=\"_\"), npartitions=1)\n actual_df = ddf.join(ddf[\"x\"], lsuffix=\"_\")\n assert_eq(actual_df, expected_df)\n\n\ndef test_dask_layers():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert ddf.dask.layers.keys() == {ddf._name}\n assert ddf.dask.dependencies == {ddf._name: set()}\n assert ddf.__dask_layers__() == (ddf._name,)\n dds = ddf[\"x\"]\n assert dds.dask.layers.keys() == {ddf._name, dds._name}\n assert dds.dask.dependencies == {ddf._name: set(), dds._name: {ddf._name}}\n assert dds.__dask_layers__() == (dds._name,)\n ddi = dds.min()\n assert ddi.key[1:] == (0,)\n # Note that the `min` operation will use two layers\n # now that ACA uses uses HLG\n assert {ddf._name, dds._name, ddi.key[0]}.issubset(ddi.dask.layers.keys())\n assert len(ddi.dask.layers) == 4\n assert ddi.dask.dependencies[ddf._name] == set()\n assert ddi.dask.dependencies[dds._name] == {ddf._name}\n assert len(ddi.dask.dependencies) == 4\n assert ddi.__dask_layers__() == (ddi.key[0],)\n\n\ndef test_repr_html_dataframe_highlevelgraph():\n pytest.importorskip(\"jinja2\")\n x = timeseries().shuffle(\"id\", shuffle=\"tasks\").head(compute=False)\n hg = x.dask\n assert xml.etree.ElementTree.fromstring(hg._repr_html_()) is not None\n for layer in hg.layers.values():\n assert xml.etree.ElementTree.fromstring(layer._repr_html_()) is not None\n\n\n@pytest.mark.skipif(\n not dd._compat.PANDAS_GT_120, reason=\"Float64 was introduced in pandas>=1.2\"\n)\ndef test_assign_na_float_columns():\n # See https://github.com/dask/dask/issues/7156\n df_pandas = pd.DataFrame({\"a\": [1.1]}, dtype=\"Float64\")\n df = dd.from_pandas(df_pandas, npartitions=1)\n\n df = df.assign(new_col=df[\"a\"])\n\n assert df.compute()[\"a\"].dtypes == \"Float64\"\n assert df.compute()[\"new_col\"].dtypes == \"Float64\"\n\n\ndef test_dot():\n s1 = pd.Series([1, 2, 3, 4])\n s2 = pd.Series([4, 5, 6, 6])\n df = pd.DataFrame({\"one\": s1, \"two\": s2})\n\n dask_s1 = dd.from_pandas(s1, npartitions=1)\n dask_df = dd.from_pandas(df, npartitions=1)\n dask_s2 = dd.from_pandas(s2, npartitions=1)\n\n assert_eq(s1.dot(s2), dask_s1.dot(dask_s2))\n assert_eq(s1.dot(df), dask_s1.dot(dask_df))\n\n # With partitions\n partitioned_s1 = dd.from_pandas(s1, npartitions=2)\n partitioned_df = dd.from_pandas(df, npartitions=2)\n partitioned_s2 = dd.from_pandas(s2, npartitions=2)\n\n assert_eq(s1.dot(s2), partitioned_s1.dot(partitioned_s2))\n assert_eq(s1.dot(df), partitioned_s1.dot(partitioned_df))\n\n # Test passing meta kwarg\n res = dask_s1.dot(dask_df, meta=pd.Series([1], name=\"test_series\")).compute()\n assert res.name == \"test_series\"\n\n # Test validation of second operand\n with pytest.raises(TypeError):\n dask_s1.dot(da.array([1, 2, 3, 4]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dot_nan_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dot_nan_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4974, "end_line": 5119, "span_ids": ["test_use_of_weakref_proxy", "test_index_is_monotonic_dt64", "test_is_monotonic_numeric", "test_index_is_monotonic_numeric", "test_custom_map_reduce", "test_dot_nan", "test_is_monotonic_dt64"], "tokens": 1565}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dot_nan():\n # Test that nan inputs match pandas' behavior\n s1 = pd.Series([1, 2, 3, 4])\n dask_s1 = dd.from_pandas(s1, npartitions=1)\n\n s2 = pd.Series([np.nan, np.nan, np.nan, np.nan])\n dask_s2 = dd.from_pandas(s2, npartitions=1)\n\n df = pd.DataFrame({\"one\": s1, \"two\": s2})\n dask_df = dd.from_pandas(df, npartitions=1)\n\n assert_eq(s1.dot(s2), dask_s1.dot(dask_s2))\n assert_eq(s2.dot(df), dask_s2.dot(dask_df))\n\n\ndef test_use_of_weakref_proxy():\n \"\"\"Testing wrapping frames in proxy wrappers\"\"\"\n df = pd.DataFrame({\"data\": [1, 2, 3]})\n df_pxy = weakref.proxy(df)\n ser = pd.Series({\"data\": [1, 2, 3]})\n ser_pxy = weakref.proxy(ser)\n\n assert is_dataframe_like(df_pxy)\n assert is_series_like(ser_pxy)\n\n assert dask.dataframe.groupby._cov_chunk(df_pxy, \"data\")\n assert isinstance(\n dask.dataframe.groupby._groupby_apply_funcs(df_pxy, \"data\", funcs=[]),\n pd.DataFrame,\n )\n\n # Test wrapping each Dask dataframe chunk in a proxy\n l = []\n\n def f(x):\n l.append(x) # Keep `x` alive\n return weakref.proxy(x)\n\n d = pd.DataFrame({\"g\": [0, 0, 1] * 3, \"b\": [1, 2, 3] * 3})\n a = dd.from_pandas(d, npartitions=1)\n a = a.map_partitions(f, meta=a._meta)\n pxy = weakref.proxy(a)\n res = pxy[\"b\"].groupby(pxy[\"g\"]).sum()\n isinstance(res.compute(), pd.Series)\n\n\ndef test_is_monotonic_numeric():\n s = pd.Series(range(20))\n ds = dd.from_pandas(s, npartitions=5)\n assert_eq(s.is_monotonic_increasing, ds.is_monotonic_increasing)\n # `is_monotonic` was deprecated starting in `pandas=1.5.0`\n with _check_warning(\n PANDAS_GT_150, FutureWarning, message=\"is_monotonic is deprecated\"\n ):\n expected = s.is_monotonic\n with _check_warning(\n PANDAS_GT_150, FutureWarning, message=\"is_monotonic is deprecated\"\n ):\n result = ds.is_monotonic\n assert_eq(expected, result)\n\n s_2 = pd.Series(range(20, 0, -1))\n ds_2 = dd.from_pandas(s_2, npartitions=5)\n assert_eq(s_2.is_monotonic_decreasing, ds_2.is_monotonic_decreasing)\n\n s_3 = pd.Series(list(range(0, 5)) + list(range(0, 20)))\n ds_3 = dd.from_pandas(s_3, npartitions=5)\n assert_eq(s_3.is_monotonic_increasing, ds_3.is_monotonic_increasing)\n assert_eq(s_3.is_monotonic_decreasing, ds_3.is_monotonic_decreasing)\n\n\ndef test_is_monotonic_dt64():\n s = pd.Series(pd.date_range(\"20130101\", periods=10))\n ds = dd.from_pandas(s, npartitions=5)\n assert_eq(s.is_monotonic_increasing, ds.is_monotonic_increasing)\n\n s_2 = pd.Series(list(reversed(s)))\n ds_2 = dd.from_pandas(s_2, npartitions=5)\n assert_eq(s_2.is_monotonic_decreasing, ds_2.is_monotonic_decreasing)\n\n\ndef test_index_is_monotonic_numeric():\n s = pd.Series(1, index=range(20))\n ds = dd.from_pandas(s, npartitions=5, sort=False)\n assert_eq(s.index.is_monotonic_increasing, ds.index.is_monotonic_increasing)\n # `is_monotonic` was deprecated starting in `pandas=1.5.0`\n with _check_warning(\n PANDAS_GT_150, FutureWarning, message=\"is_monotonic is deprecated\"\n ):\n expected = s.index.is_monotonic\n with _check_warning(\n PANDAS_GT_150, FutureWarning, message=\"is_monotonic is deprecated\"\n ):\n result = ds.index.is_monotonic\n assert_eq(expected, result)\n\n s_2 = pd.Series(1, index=range(20, 0, -1))\n ds_2 = dd.from_pandas(s_2, npartitions=5, sort=False)\n assert_eq(s_2.index.is_monotonic_decreasing, ds_2.index.is_monotonic_decreasing)\n\n s_3 = pd.Series(1, index=list(range(0, 5)) + list(range(0, 20)))\n ds_3 = dd.from_pandas(s_3, npartitions=5, sort=False)\n assert_eq(s_3.index.is_monotonic_increasing, ds_3.index.is_monotonic_increasing)\n assert_eq(s_3.index.is_monotonic_decreasing, ds_3.index.is_monotonic_decreasing)\n\n\ndef test_index_is_monotonic_dt64():\n s = pd.Series(1, index=pd.date_range(\"20130101\", periods=10))\n ds = dd.from_pandas(s, npartitions=5, sort=False)\n assert_eq(s.index.is_monotonic_increasing, ds.index.is_monotonic_increasing)\n\n s_2 = pd.Series(1, index=list(reversed(s)))\n ds_2 = dd.from_pandas(s_2, npartitions=5, sort=False)\n assert_eq(s_2.index.is_monotonic_decreasing, ds_2.index.is_monotonic_decreasing)\n\n\ndef test_custom_map_reduce():\n # Make sure custom map-reduce workflows can use\n # the universal ACA code path with metadata\n # that is not DataFrame-like.\n # See: https://github.com/dask/dask/issues/8636\n\n df = pd.DataFrame(columns=[\"a\"], data=[[2], [4], [8]], index=[0, 1, 2])\n ddf = dd.from_pandas(df, npartitions=2)\n\n def map_fn(x):\n return {\"x\": x, \"y\": x}\n\n def reduce_fn(series):\n merged = None\n for mapped in series:\n if merged is None:\n merged = mapped.copy()\n else:\n merged[\"x\"] += mapped[\"x\"]\n merged[\"y\"] *= mapped[\"y\"]\n return merged\n\n result = (\n ddf[\"a\"]\n .map(map_fn, meta=(\"data\", \"object\"))\n .reduction(reduce_fn, aggregate=reduce_fn, meta=(\"data\", \"object\"))\n .compute()[0]\n )\n assert result == {\"x\": 14, \"y\": 64}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_collections_auto_shuffle_method.yield": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_collections_auto_shuffle_method.yield", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 47, "span_ids": ["imports", "agg_func", "auto_shuffle_method"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import collections\nimport operator\nimport pickle\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe import _compat\nfrom dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_150, tm\nfrom dask.dataframe.utils import assert_dask_graph, assert_eq, assert_max_deps\nfrom dask.utils import M\n\nAGG_FUNCS = [\n \"sum\",\n \"mean\",\n \"min\",\n \"max\",\n \"count\",\n \"size\",\n \"std\",\n \"var\",\n \"cov\",\n \"corr\",\n \"nunique\",\n \"first\",\n \"last\",\n \"prod\",\n]\n\n\n@pytest.fixture(params=AGG_FUNCS)\ndef agg_func(request):\n \"\"\"\n Aggregations supported for groups\n \"\"\"\n return request.param\n\n\n# Wrapper fixture for shuffle_method to auto-apply it to all the tests in this module,\n# as we don't want to auto-apply the fixture repo-wide.\n@pytest.fixture(autouse=True)\ndef auto_shuffle_method(shuffle_method):\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_test_full_groupby.with_pytest_warns_UserWar.assert_eq_expected_ddf_g": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_test_full_groupby.with_pytest_warns_UserWar.assert_eq_expected_ddf_g", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 125, "end_line": 144, "span_ids": ["test_full_groupby"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_full_groupby():\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n pytest.raises(KeyError, lambda: ddf.groupby(\"does_not_exist\"))\n pytest.raises(AttributeError, lambda: ddf.groupby(\"a\").does_not_exist)\n assert \"b\" in dir(ddf.groupby(\"a\"))\n\n def func(df):\n return df.assign(b=df.b - df.b.mean())\n\n expected = df.groupby(\"a\").apply(func)\n\n with pytest.warns(UserWarning, match=\"`meta` is not specified\"):\n assert ddf.groupby(\"a\").apply(func)._name.startswith(\"func\")\n\n assert_eq(expected, ddf.groupby(\"a\").apply(func))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_apply_multiarg_test_full_groupby_apply_multiarg.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_full_groupby_apply_multiarg_test_full_groupby_apply_multiarg.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 147, "end_line": 212, "span_ids": ["test_full_groupby_apply_multiarg"], "tokens": 630}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_full_groupby_apply_multiarg():\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n def func(df, c, d=3):\n return df.assign(b=df.b - df.b.mean() + c * d)\n\n c = df.a.sum()\n d = df.b.mean()\n\n c_scalar = ddf.a.sum()\n d_scalar = ddf.b.mean()\n c_delayed = dask.delayed(lambda: c)()\n d_delayed = dask.delayed(lambda: d)()\n\n with pytest.warns(UserWarning, match=\"`meta` is not specified\"):\n assert_eq(\n df.groupby(\"a\").apply(func, c, d=d),\n ddf.groupby(\"a\").apply(func, c, d=d_scalar),\n )\n\n assert_eq(df.groupby(\"a\").apply(func, c), ddf.groupby(\"a\").apply(func, c))\n\n assert_eq(\n df.groupby(\"a\").apply(func, c, d=d), ddf.groupby(\"a\").apply(func, c, d=d)\n )\n\n assert_eq(\n df.groupby(\"a\").apply(func, c),\n ddf.groupby(\"a\").apply(func, c_scalar),\n check_dtype=False,\n )\n\n meta = df.groupby(\"a\").apply(func, c)\n\n assert_eq(\n df.groupby(\"a\").apply(func, c),\n ddf.groupby(\"a\").apply(func, c_scalar, meta=meta),\n )\n\n assert_eq(\n df.groupby(\"a\").apply(func, c, d=d),\n ddf.groupby(\"a\").apply(func, c, d=d_scalar, meta=meta),\n )\n\n # Delayed arguments work, but only if metadata is provided\n with pytest.raises(ValueError) as exc:\n ddf.groupby(\"a\").apply(func, c, d=d_delayed)\n assert \"dask.delayed\" in str(exc.value) and \"meta\" in str(exc.value)\n\n with pytest.raises(ValueError) as exc:\n ddf.groupby(\"a\").apply(func, c_delayed, d=d)\n assert \"dask.delayed\" in str(exc.value) and \"meta\" in str(exc.value)\n\n assert_eq(\n df.groupby(\"a\").apply(func, c),\n ddf.groupby(\"a\").apply(func, c_delayed, meta=meta),\n )\n\n assert_eq(\n df.groupby(\"a\").apply(func, c, d=d),\n ddf.groupby(\"a\").apply(func, c, d=d_delayed, meta=meta),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in__test_split_apply_combine_on_series.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.for_ddkey_pdkey_in__test_split_apply_combine_on_series.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 646, "end_line": 698, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 797}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\ndef test_split_apply_combine_on_series(empty):\n # ... other code\n\n for ddkey, pdkey in [\n (\"a\", \"a\"),\n (ddf.a, pdf.a),\n (ddf.a + 1, pdf.a + 1),\n (ddf.a > 3, pdf.a > 3),\n ]:\n assert_eq(ddf.groupby(ddkey).b.sum(), pdf.groupby(pdkey).b.sum())\n assert_eq(ddf.groupby(ddkey).b.min(), pdf.groupby(pdkey).b.min())\n assert_eq(ddf.groupby(ddkey).b.max(), pdf.groupby(pdkey).b.max())\n assert_eq(ddf.groupby(ddkey).b.count(), pdf.groupby(pdkey).b.count())\n assert_eq(ddf.groupby(ddkey).b.mean(), pdf.groupby(pdkey).b.mean())\n assert_eq(ddf.groupby(ddkey).b.nunique(), pdf.groupby(pdkey).b.nunique())\n assert_eq(ddf.groupby(ddkey).b.size(), pdf.groupby(pdkey).b.size())\n assert_eq(ddf.groupby(ddkey).b.first(), pdf.groupby(pdkey).b.first())\n assert_eq(ddf.groupby(ddkey).last(), pdf.groupby(pdkey).last())\n assert_eq(ddf.groupby(ddkey).prod(), pdf.groupby(pdkey).prod())\n\n assert_eq(ddf.groupby(ddkey).sum(), pdf.groupby(pdkey).sum())\n assert_eq(ddf.groupby(ddkey).min(), pdf.groupby(pdkey).min())\n assert_eq(ddf.groupby(ddkey).max(), pdf.groupby(pdkey).max())\n assert_eq(ddf.groupby(ddkey).count(), pdf.groupby(pdkey).count())\n assert_eq(ddf.groupby(ddkey).mean(), pdf.groupby(pdkey).mean().astype(float))\n assert_eq(ddf.groupby(ddkey).size(), pdf.groupby(pdkey).size())\n assert_eq(ddf.groupby(ddkey).first(), pdf.groupby(pdkey).first())\n assert_eq(ddf.groupby(ddkey).last(), pdf.groupby(pdkey).last())\n assert_eq(ddf.groupby(ddkey).prod(), pdf.groupby(pdkey).prod())\n\n for ddof in ddofs:\n assert_eq(ddf.groupby(ddkey).b.std(ddof), pdf.groupby(pdkey).b.std(ddof))\n\n assert sorted(ddf.groupby(\"b\").a.sum().dask) == sorted(\n ddf.groupby(\"b\").a.sum().dask\n )\n assert sorted(ddf.groupby(ddf.a > 3).b.mean().dask) == sorted(\n ddf.groupby(ddf.a > 3).b.mean().dask\n )\n\n # test raises with incorrect key\n pytest.raises(KeyError, lambda: ddf.groupby(\"x\"))\n pytest.raises(KeyError, lambda: ddf.groupby([\"a\", \"x\"]))\n pytest.raises(KeyError, lambda: ddf.groupby(\"a\")[\"x\"])\n with warnings.catch_warnings():\n # pandas warns about using tuples before throwing the KeyError\n warnings.simplefilter(\"ignore\", FutureWarning)\n pytest.raises(KeyError, lambda: ddf.groupby(\"a\")[\"b\", \"x\"])\n pytest.raises(KeyError, lambda: ddf.groupby(\"a\")[[\"b\", \"x\"]])\n\n # test graph node labels\n assert_dask_graph(ddf.groupby(\"b\").a.sum(), \"series-groupby-sum\")\n assert_dask_graph(ddf.groupby(\"b\").a.min(), \"series-groupby-min\")\n assert_dask_graph(ddf.groupby(\"b\").a.max(), \"series-groupby-max\")\n assert_dask_graph(ddf.groupby(\"b\").a.count(), \"series-groupby-count\")\n assert_dask_graph(ddf.groupby(\"b\").a.var(), \"series-groupby-var\")\n # mean consists from sum and count operations\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.None_9_test_split_apply_combine_on_series.None_28": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_split_apply_combine_on_series.None_9_test_split_apply_combine_on_series.None_28", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 699, "end_line": 721, "span_ids": ["test_split_apply_combine_on_series"], "tokens": 480}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"empty\", [True, False])\ndef test_split_apply_combine_on_series(empty):\n # ... other code\n pytest.raises(KeyError, lambda: ddf.groupby(\"a\")[[\"b\", \"x\"]])\n # ... other code\n assert_dask_graph(ddf.groupby(\"b\").a.cov(), \"series-groupby-cov\")\n assert_dask_graph(ddf.groupby(\"b\").a.first(), \"series-groupby-first\")\n assert_dask_graph(ddf.groupby(\"b\").a.last(), \"series-groupby-last\")\n assert_dask_graph(ddf.groupby(\"b\").a.tail(), \"series-groupby-tail\")\n assert_dask_graph(ddf.groupby(\"b\").a.head(), \"series-groupby-head\")\n assert_dask_graph(ddf.groupby(\"b\").a.prod(), \"series-groupby-prod\")\n # mean consists from sum and count operations\n assert_dask_graph(ddf.groupby(\"b\").a.mean(), \"series-groupby-sum\")\n assert_dask_graph(ddf.groupby(\"b\").a.mean(), \"series-groupby-count\")\n assert_dask_graph(ddf.groupby(\"b\").a.nunique(), \"series-groupby-nunique\")\n assert_dask_graph(ddf.groupby(\"b\").a.size(), \"series-groupby-size\")\n\n assert_dask_graph(ddf.groupby(\"b\").sum(), \"dataframe-groupby-sum\")\n assert_dask_graph(ddf.groupby(\"b\").min(), \"dataframe-groupby-min\")\n assert_dask_graph(ddf.groupby(\"b\").max(), \"dataframe-groupby-max\")\n assert_dask_graph(ddf.groupby(\"b\").count(), \"dataframe-groupby-count\")\n assert_dask_graph(ddf.groupby(\"b\").first(), \"dataframe-groupby-first\")\n assert_dask_graph(ddf.groupby(\"b\").last(), \"dataframe-groupby-last\")\n assert_dask_graph(ddf.groupby(\"b\").prod(), \"dataframe-groupby-prod\")\n # mean consists from sum and count operations\n assert_dask_graph(ddf.groupby(\"b\").mean(), \"dataframe-groupby-sum\")\n assert_dask_graph(ddf.groupby(\"b\").mean(), \"dataframe-groupby-count\")\n assert_dask_graph(ddf.groupby(\"b\").size(), \"dataframe-groupby-size\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_apply_tasks_test_groupby_apply_tasks.for_ind_in_lambda_x_A_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_apply_tasks_test_groupby_apply_tasks.for_ind_in_lambda_x_A_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 888, "end_line": 908, "span_ids": ["test_groupby_apply_tasks"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_apply_tasks(shuffle_method):\n if shuffle_method == \"disk\":\n pytest.skip(\"Tasks-only shuffle test\")\n\n df = _compat.makeTimeDataFrame()\n df[\"A\"] = df.A // 0.1\n df[\"B\"] = df.B // 0.1\n ddf = dd.from_pandas(df, npartitions=10)\n\n for ind in [lambda x: \"A\", lambda x: x.A]:\n a = df.groupby(ind(df)).apply(len)\n with pytest.warns(UserWarning):\n b = ddf.groupby(ind(ddf)).apply(len)\n assert_eq(a, b.compute())\n assert not any(\"partd\" in k[0] for k in b.dask)\n\n a = df.groupby(ind(df)).B.apply(len)\n with pytest.warns(UserWarning):\n b = ddf.groupby(ind(ddf)).B.apply(len)\n assert_eq(a, b.compute())\n assert not any(\"partd\" in k[0] for k in b.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_normalize_by_test_groupby_normalize_by.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_normalize_by_test_groupby_normalize_by.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 921, "end_line": 934, "span_ids": ["test_groupby_normalize_by"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_normalize_by():\n full = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n d = dd.from_pandas(full, npartitions=3)\n\n assert d.groupby(\"a\").by == \"a\"\n assert d.groupby(d[\"a\"]).by == \"a\"\n assert d.groupby(d[\"a\"] > 2).by._name == (d[\"a\"] > 2)._name\n assert d.groupby([\"a\", \"b\"]).by == [\"a\", \"b\"]\n\n assert d.groupby([d[\"a\"], d[\"b\"]]).by == [\"a\", \"b\"]\n assert d.groupby([d[\"a\"], \"b\"]).by == [\"a\", \"b\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_apply_test_groupby_column_and_index_apply.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_column_and_index_apply_test_groupby_column_and_index_apply.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1595, "end_line": 1634, "span_ids": ["test_groupby_column_and_index_apply"], "tokens": 470}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"group_args\", [[\"idx\", \"a\"], [\"a\", \"idx\"], [\"idx\"], \"idx\"])\n@pytest.mark.parametrize(\n \"apply_func\",\n [np.min, np.mean, lambda s, axis=None: np.max(s.values) - np.mean(s.values)],\n)\ndef test_groupby_column_and_index_apply(group_args, apply_func):\n df = pd.DataFrame(\n {\"idx\": [1, 1, 1, 2, 2, 2], \"a\": [1, 2, 1, 2, 1, 2], \"b\": np.arange(6)}\n ).set_index(\"idx\")\n\n ddf = dd.from_pandas(df, npartitions=df.index.nunique())\n ddf_no_divs = dd.from_pandas(df, npartitions=df.index.nunique(), sort=False)\n\n # Expected result\n expected = df.groupby(group_args).apply(apply_func, axis=0)\n\n # Compute on dask DataFrame with divisions (no shuffling)\n result = ddf.groupby(group_args).apply(apply_func, axis=0, meta=expected)\n assert_eq(expected, result, check_divisions=False)\n\n # Check that partitioning is preserved\n assert ddf.divisions == result.divisions\n\n # Check that no shuffling occurred.\n # The groupby operation should add only 1 task per partition\n assert len(result.dask) == (len(ddf.dask) + ddf.npartitions)\n\n expected = df.groupby(group_args).apply(apply_func, axis=0)\n\n # Compute on dask DataFrame without divisions (requires shuffling)\n result = ddf_no_divs.groupby(group_args).apply(apply_func, axis=0, meta=expected)\n\n assert_eq(expected, result, check_divisions=False)\n\n # Check that divisions were preserved (all None in this case)\n assert ddf_no_divs.divisions == result.divisions\n\n # Crude check to see if shuffling was performed.\n # The groupby operation should add only more than 1 task per partition\n assert len(result.dask) > (len(ddf_no_divs.dask) + ddf_no_divs.npartitions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_basic_input_test_groupby_shift_basic_input.None_2.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_basic_input_test_groupby_shift_basic_input.None_2.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2041, "end_line": 2067, "span_ids": ["test_groupby_shift_basic_input"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", [1, 2, 5])\n@pytest.mark.parametrize(\"period\", [1, -1, 10])\n@pytest.mark.parametrize(\"axis\", [0, 1])\ndef test_groupby_shift_basic_input(npartitions, period, axis):\n pdf = pd.DataFrame(\n {\n \"a\": [0, 0, 1, 1, 2, 2, 3, 3, 3],\n \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0],\n \"c\": [0, 0, 0, 0, 0, 1, 1, 1, 1],\n },\n )\n ddf = dd.from_pandas(pdf, npartitions=npartitions)\n with pytest.warns(UserWarning):\n assert_eq(\n pdf.groupby([\"a\", \"c\"]).shift(period, axis=axis),\n ddf.groupby([\"a\", \"c\"]).shift(period, axis=axis),\n )\n with pytest.warns(UserWarning):\n assert_eq(\n pdf.groupby([\"a\"]).shift(period, axis=axis),\n ddf.groupby([\"a\"]).shift(period, axis=axis),\n )\n with pytest.warns(UserWarning):\n assert_eq(\n pdf.groupby(pdf.c).shift(period, axis=axis),\n ddf.groupby(ddf.c).shift(period, axis=axis),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_series_test_groupby_shift_series.with_pytest_warns_UserWar.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_series_test_groupby_shift_series.with_pytest_warns_UserWar.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2070, "end_line": 2082, "span_ids": ["test_groupby_shift_series"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_shift_series():\n pdf = pd.DataFrame(\n {\n \"a\": [0, 0, 1, 1, 2, 2, 3, 3, 3],\n \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0],\n },\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n with pytest.warns(UserWarning):\n assert_eq(\n pdf.groupby(\"a\")[\"b\"].shift(periods=2),\n ddf.groupby(\"a\")[\"b\"].shift(periods=2),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_lazy_input_test_groupby_shift_lazy_input.with_pytest_warns_UserWar.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_lazy_input_test_groupby_shift_lazy_input.with_pytest_warns_UserWar.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2085, "end_line": 2103, "span_ids": ["test_groupby_shift_lazy_input"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_shift_lazy_input():\n pdf = pd.DataFrame(\n {\n \"a\": [0, 0, 1, 1, 2, 2, 3, 3, 3],\n \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0],\n \"c\": [0, 0, 0, 0, 0, 1, 1, 1, 1],\n },\n )\n delayed_periods = dask.delayed(lambda: 1)()\n ddf = dd.from_pandas(pdf, npartitions=3)\n assert_eq(\n pdf.groupby(pdf.c).shift(periods=1),\n ddf.groupby(ddf.c).shift(periods=delayed_periods, meta={\"a\": int, \"b\": int}),\n )\n with pytest.warns(UserWarning):\n assert_eq(\n pdf.groupby(pdf.c).shift(periods=1, fill_value=pdf.b.max()),\n ddf.groupby(ddf.c).shift(periods=1, fill_value=ddf.b.max()),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_with_freq_test_groupby_shift_with_freq.assert_eq_df_result_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_with_freq_test_groupby_shift_with_freq.assert_eq_df_result_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2106, "end_line": 2119, "span_ids": ["test_groupby_shift_with_freq"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_shift_with_freq():\n pdf = pd.DataFrame(\n dict(a=[1, 2, 3, 4, 5, 6], b=[0, 0, 0, 1, 1, 1]),\n index=pd.date_range(start=\"20100101\", periods=6),\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n # just pass the pandas result as meta for convenience\n df_result = pdf.groupby(pdf.index).shift(periods=-2, freq=\"D\")\n assert_eq(\n df_result, ddf.groupby(ddf.index).shift(periods=-2, freq=\"D\", meta=df_result)\n )\n df_result = pdf.groupby(\"b\").shift(periods=-2, freq=\"D\")\n assert_eq(df_result, ddf.groupby(\"b\").shift(periods=-2, freq=\"D\", meta=df_result))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_empty_partitions_with_value_counts_test_empty_partitions_with_value_counts.assert_eq_expected_actua": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_empty_partitions_with_value_counts_test_empty_partitions_with_value_counts.assert_eq_expected_actua", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2475, "end_line": 2495, "span_ids": ["test_empty_partitions_with_value_counts"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_empty_partitions_with_value_counts():\n # https://github.com/dask/dask/issues/7065\n df = pd.DataFrame(\n data=[\n [\"a1\", \"b1\"],\n [\"a1\", None],\n [\"a1\", \"b1\"],\n [None, None],\n [None, None],\n [None, None],\n [\"a3\", \"b3\"],\n [\"a3\", \"b3\"],\n [\"a5\", \"b5\"],\n ],\n columns=[\"A\", \"B\"],\n )\n\n expected = df.groupby(\"A\")[\"B\"].value_counts()\n ddf = dd.from_pandas(df, npartitions=3)\n actual = ddf.groupby(\"A\")[\"B\"].value_counts()\n assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_with_pd_grouper_test_groupby_with_pd_grouper.None_1.ddf_groupby_key1_pd_G": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_with_pd_grouper_test_groupby_with_pd_grouper.None_1.ddf_groupby_key1_pd_G", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2498, "end_line": 2508, "span_ids": ["test_groupby_with_pd_grouper"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_with_pd_grouper():\n ddf = dd.from_pandas(\n pd.DataFrame(\n {\"key1\": [\"a\", \"b\", \"a\"], \"key2\": [\"c\", \"c\", \"c\"], \"value\": [1, 2, 3]}\n ),\n npartitions=3,\n )\n with pytest.raises(NotImplementedError):\n ddf.groupby(pd.Grouper(key=\"key1\"))\n with pytest.raises(NotImplementedError):\n ddf.groupby([\"key1\", pd.Grouper(key=\"key2\")])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_empty_partitions_with_rows_operation_test_groupby_empty_partitions_with_rows_operation.assert_eq_expected_actua": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_empty_partitions_with_rows_operation_test_groupby_empty_partitions_with_rows_operation.assert_eq_expected_actua", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2511, "end_line": 2533, "span_ids": ["test_groupby_empty_partitions_with_rows_operation"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"operation\", [\"head\", \"tail\"])\ndef test_groupby_empty_partitions_with_rows_operation(operation):\n\n df = pd.DataFrame(\n data=[\n [\"a1\", \"b1\"],\n [\"a1\", None],\n [\"a1\", \"b1\"],\n [None, None],\n [None, None],\n [None, None],\n [\"a3\", \"b3\"],\n [\"a3\", \"b3\"],\n [\"a5\", \"b5\"],\n ],\n columns=[\"A\", \"B\"],\n )\n\n caller = operator.methodcaller(operation, 1)\n expected = caller(df.groupby(\"A\")[\"B\"])\n ddf = dd.from_pandas(df, npartitions=3)\n actual = caller(ddf.groupby(\"A\")[\"B\"])\n assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_with_row_operations_test_groupby_with_row_operations.assert_eq_expected_actua": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_with_row_operations_test_groupby_with_row_operations.assert_eq_expected_actua", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2536, "end_line": 2557, "span_ids": ["test_groupby_with_row_operations"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"operation\", [\"head\", \"tail\"])\ndef test_groupby_with_row_operations(operation):\n df = pd.DataFrame(\n data=[\n [\"a0\", \"b1\"],\n [\"a0\", \"b2\"],\n [\"a1\", \"b1\"],\n [\"a3\", \"b3\"],\n [\"a3\", \"b3\"],\n [\"a5\", \"b5\"],\n [\"a1\", \"b1\"],\n [\"a1\", \"b1\"],\n [\"a1\", \"b1\"],\n ],\n columns=[\"A\", \"B\"],\n )\n\n caller = operator.methodcaller(operation)\n expected = caller(df.groupby(\"A\")[\"B\"])\n ddf = dd.from_pandas(df, npartitions=3)\n actual = caller(ddf.groupby(\"A\")[\"B\"])\n assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_test_loc_with_array.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_series_test_loc_with_array.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 84, "end_line": 99, "span_ids": ["test_loc_with_series", "test_loc_with_array"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_with_series():\n assert_eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])\n\n assert sorted(d.loc[d.a % 2 == 0].dask) == sorted(d.loc[d.a % 2 == 0].dask)\n assert sorted(d.loc[d.a % 2 == 0].dask) != sorted(d.loc[d.a % 3 == 0].dask)\n\n\ndef test_loc_with_array():\n assert_eq(d.loc[(d.a % 2 == 0).values], full.loc[(full.a % 2 == 0).values])\n\n assert sorted(d.loc[(d.a % 2 == 0).values].dask) == sorted(\n d.loc[(d.a % 2 == 0).values].dask\n )\n assert sorted(d.loc[(d.a % 2 == 0).values].dask) != sorted(\n d.loc[(d.a % 3 == 0).values].dask\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_function_test_loc_with_array_different_partition.with_pytest_raises_ValueE.ddf_loc_ddf_A_0_repar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_function_test_loc_with_array_different_partition.with_pytest_raises_ValueE.ddf_loc_ddf_A_0_repar", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 102, "end_line": 121, "span_ids": ["test_loc_with_array_different_partition", "test_loc_with_function"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_with_function():\n assert_eq(d.loc[lambda df: df[\"a\"] > 3, :], full.loc[lambda df: df[\"a\"] > 3, :])\n\n def _col_loc_fun(_df):\n return _df.columns.str.contains(\"b\")\n\n assert_eq(d.loc[:, _col_loc_fun], full.loc[:, _col_loc_fun])\n\n\ndef test_loc_with_array_different_partition():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"ABCDE\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.loc[(ddf.A > 0).values], df.loc[(df.A > 0).values])\n with pytest.raises(ValueError):\n ddf.loc[(ddf.A > 0).repartition([\"a\", \"g\", \"k\", \"o\", \"t\"]).values]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_non_boolean_series_test_loc_with_non_boolean_series.with_pytest_raises_.ddf_loc_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc_with_non_boolean_series_test_loc_with_non_boolean_series.with_pytest_raises_.ddf_loc_s_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 168, "span_ids": ["test_loc_with_non_boolean_series"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc_with_non_boolean_series():\n df = pd.Series(\n np.random.randn(20),\n index=list(\"abcdefghijklmnopqrst\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n s = pd.Series(list(\"bdmnat\"))\n ds = dd.from_pandas(s, npartitions=3)\n\n msg = (\n \"Cannot index with non-boolean dask Series. Try passing computed values instead\"\n )\n with pytest.raises(KeyError, match=msg):\n ddf.loc[ds]\n\n assert_eq(ddf.loc[s], df.loc[s])\n\n with pytest.raises(KeyError, match=msg):\n ddf.loc[ds.values]\n\n assert_eq(ddf.loc[s.values], df.loc[s])\n\n ddf = ddf.clear_divisions()\n with pytest.raises(KeyError, match=msg):\n ddf.loc[ds]\n\n with pytest.raises(\n KeyError, match=\"Cannot index with list against unknown division\"\n ):\n ddf.loc[s]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_with_known_divisions_test_loc2d_with_known_divisions.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_loc2d_with_known_divisions_test_loc2d_with_known_divisions.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 202, "end_line": 220, "span_ids": ["test_loc2d_with_known_divisions"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_loc2d_with_known_divisions():\n df = pd.DataFrame(\n np.random.randn(20, 5),\n index=list(\"abcdefghijklmnopqrst\"),\n columns=list(\"ABCDE\"),\n )\n ddf = dd.from_pandas(df, 3)\n\n assert_eq(ddf.loc[\"a\", \"A\"], df.loc[[\"a\"], \"A\"])\n assert_eq(ddf.loc[\"a\", [\"A\"]], df.loc[[\"a\"], [\"A\"]])\n assert_eq(ddf.loc[\"a\":\"o\", \"A\"], df.loc[\"a\":\"o\", \"A\"])\n assert_eq(ddf.loc[\"a\":\"o\", [\"A\"]], df.loc[\"a\":\"o\", [\"A\"]])\n assert_eq(ddf.loc[[\"n\"], [\"A\"]], df.loc[[\"n\"], [\"A\"]])\n assert_eq(ddf.loc[[\"a\", \"c\", \"n\"], [\"A\"]], df.loc[[\"a\", \"c\", \"n\"], [\"A\"]])\n assert_eq(ddf.loc[[\"t\", \"b\"], [\"A\"]], df.loc[[\"t\", \"b\"], [\"A\"]])\n assert_eq(\n ddf.loc[[\"r\", \"r\", \"c\", \"g\", \"h\"], [\"A\"]],\n df.loc[[\"r\", \"r\", \"c\", \"g\", \"h\"], [\"A\"]],\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_series_test_to_series.assert_eq_expected_actua": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_series_test_to_series.assert_eq_expected_actua", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 538, "end_line": 553, "span_ids": ["test_to_series"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"index\",\n [\n pd.date_range(\"2011-01-01\", freq=\"H\", periods=100), # time index\n range(100), # numerical index\n ],\n)\ndef test_to_series(index):\n df = pd.DataFrame({\"A\": np.random.randn(100)}, index=index)\n ddf = dd.from_pandas(df, 10)\n\n expected = df.index.to_series()\n actual = ddf.index.to_series()\n\n assert actual.known_divisions\n assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_frame_test_to_frame.assert_eq_df_index_to_fra": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_to_frame_test_to_frame.assert_eq_df_index_to_fra", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 556, "end_line": 574, "span_ids": ["test_to_frame"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"index\",\n [\n pd.date_range(\"2011-01-01\", freq=\"H\", periods=100), # time index\n range(100), # numerical index\n ],\n)\ndef test_to_frame(index):\n df = pd.DataFrame({\"A\": np.random.randn(100)}, index=index)\n ddf = dd.from_pandas(df, 10)\n\n expected = df.index.to_frame()\n actual = ddf.index.to_frame()\n\n assert actual.known_divisions\n assert_eq(expected, actual)\n\n # test name option\n assert_eq(df.index.to_frame(name=\"foo\"), ddf.index.to_frame(name=\"foo\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_out_of_order_selection_test_iloc_out_of_order_selection.assert_c1_name_B_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_iloc_out_of_order_selection_test_iloc_out_of_order_selection.assert_c1_name_B_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 657, "end_line": 673, "span_ids": ["test_iloc_out_of_order_selection"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_iloc_out_of_order_selection():\n df = pd.DataFrame({\"A\": [1] * 100, \"B\": [2] * 100, \"C\": [3] * 100, \"D\": [4] * 100})\n ddf = dd.from_pandas(df, 2)\n ddf = ddf[[\"C\", \"A\", \"B\"]]\n a = ddf.iloc[:, 0]\n b = ddf.iloc[:, 1]\n c = ddf.iloc[:, 2]\n\n assert a.name == \"C\"\n assert b.name == \"A\"\n assert c.name == \"B\"\n\n a1, b1, c1 = dask.compute(a, b, c)\n\n assert a1.name == \"C\"\n assert b1.name == \"A\"\n assert c1.name == \"B\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_pandas_nullable_boolean_data_type_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_indexing.py_test_pandas_nullable_boolean_data_type_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_indexing.py", "file_name": "test_indexing.py", "file_type": "text/x-python", "category": "test", "start_line": 676, "end_line": 685, "span_ids": ["test_pandas_nullable_boolean_data_type"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pandas_nullable_boolean_data_type():\n s1 = pd.Series([0, 1, 2])\n s2 = pd.Series([True, False, pd.NA], dtype=\"boolean\")\n\n ddf1 = dd.from_pandas(s1, npartitions=1)\n ddf2 = dd.from_pandas(s2, npartitions=1)\n\n assert_eq(ddf1[ddf2], s1[s2])\n assert_eq(ddf1.loc[ddf2], s1.loc[s2])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_ddf_left_on.return.request_param": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_ddf_left_on.return.request_param", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 86, "span_ids": ["ddf_right", "ddf_right_unknown", "on", "how", "ddf_left_single", "ddf_right_single", "ddf_left_double", "ddf_left_unknown", "ddf_right_double", "ddf_left"], "tokens": 366}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef ddf_left(df_left):\n # Create frame with 10 partitions\n # Skip division on 2 so there is one mismatch with ddf_right\n return dd.repartition(df_left, [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n\n\n@pytest.fixture\ndef ddf_left_unknown(ddf_left):\n return ddf_left.clear_divisions()\n\n\n@pytest.fixture\ndef ddf_left_single(df_left):\n return dd.from_pandas(df_left, npartitions=1, sort=False)\n\n\n@pytest.fixture\ndef ddf_right(df_right):\n # Create frame with 10 partitions\n # Skip division on 3 so there is one mismatch with ddf_left\n return dd.repartition(df_right, [0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11])\n\n\n@pytest.fixture\ndef ddf_right_unknown(ddf_right):\n return ddf_right.clear_divisions()\n\n\n@pytest.fixture\ndef ddf_right_single(df_right):\n return dd.from_pandas(df_right, npartitions=1, sort=False)\n\n\n@pytest.fixture\ndef ddf_right_double(df_right):\n return dd.from_pandas(df_right, npartitions=2, sort=False)\n\n\n@pytest.fixture\ndef ddf_left_double(df_left):\n return dd.from_pandas(df_left, npartitions=2, sort=False)\n\n\n@pytest.fixture(params=[\"inner\", \"left\", \"right\", \"outer\"])\ndef how(request):\n return request.param\n\n\n@pytest.fixture(params=[\"idx\", [\"idx\"], [\"idx\", \"k\"], [\"k\", \"idx\"]])\ndef on(request):\n return request.param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py__Tests_test_merge_known_to_known.assert_len_result___dask_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py__Tests_test_merge_known_to_known.assert_len_result___dask_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 89, "end_line": 103, "span_ids": ["test_merge_known_to_known", "on"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Tests\n# =====\ndef test_merge_known_to_known(\n df_left, df_right, ddf_left, ddf_right, on, how, shuffle_method\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left.merge(ddf_right, on=on, how=how, shuffle=shuffle_method)\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, tuple(range(12)))\n assert len(result.__dask_graph__()) < 80", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_unknown_test_merge_unknown_to_known.assert_eq_result_division": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_unknown_test_merge_unknown_to_known.assert_eq_result_division", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 163, "span_ids": ["test_merge_known_to_unknown", "test_merge_unknown_to_known"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_known_to_unknown(\n df_left, df_right, ddf_left, ddf_right_unknown, on, how, shuffle_method\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left.merge(ddf_right_unknown, on=on, how=how, shuffle=shuffle_method)\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, tuple(None for _ in range(11)))\n\n\ndef test_merge_unknown_to_known(\n df_left, df_right, ddf_left_unknown, ddf_right, on, how, shuffle_method\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left_unknown.merge(ddf_right, on=on, how=how, shuffle=shuffle_method)\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, tuple(None for _ in range(11)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_unknown_to_unknown_test_merge_unknown_to_unknown.assert_eq_result_division": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_unknown_to_unknown_test_merge_unknown_to_unknown.assert_eq_result_division", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 166, "end_line": 185, "span_ids": ["test_merge_unknown_to_unknown"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_unknown_to_unknown(\n df_left,\n df_right,\n ddf_left_unknown,\n ddf_right_unknown,\n on,\n how,\n shuffle_method,\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Merge unknown to unknown\n result = ddf_left_unknown.merge(\n ddf_right_unknown, on=on, how=how, shuffle=shuffle_method\n )\n\n # Assertions\n assert_eq(result, expected)\n assert_eq(result.divisions, tuple(None for _ in range(11)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_double_bcast_right_test_merge_known_to_double_bcast_right.if_shuffle_method_tas.assert_eq_result_division": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_merge_column_and_index.py_test_merge_known_to_double_bcast_right_test_merge_known_to_double_bcast_right.if_shuffle_method_tas.assert_eq_result_division", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_merge_column_and_index.py", "file_name": "test_merge_column_and_index.py", "file_type": "text/x-python", "category": "test", "start_line": 188, "end_line": 204, "span_ids": ["test_merge_known_to_double_bcast_right"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"how\", [\"inner\", \"left\"])\ndef test_merge_known_to_double_bcast_right(\n df_left, df_right, ddf_left, ddf_right_double, on, how, shuffle_method\n):\n # Compute expected\n expected = df_left.merge(df_right, on=on, how=how)\n\n # Perform merge\n result = ddf_left.merge(\n ddf_right_double, on=on, how=how, shuffle=shuffle_method, broadcast=True\n )\n\n # Assertions\n assert_eq(result, expected)\n # Hash join used in disk-shuffling doesn't preserve divisions.\n if shuffle_method == \"task\":\n assert_eq(result.divisions, ddf_left.divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_warnings_from_dask_utils_test_impo": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_warnings_from_dask_utils_test_impo", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 26, "span_ids": ["imports"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask.dataframe as dd\nfrom dask.base import compute_as_if_collection\nfrom dask.dataframe._compat import PANDAS_GT_140, tm\nfrom dask.dataframe.core import _Frame\nfrom dask.dataframe.methods import concat\nfrom dask.dataframe.multi import (\n _maybe_align_partitions,\n align_partitions,\n concat_indexed_dataframes,\n hash_join,\n merge_indexed_dataframes,\n)\nfrom dask.dataframe.utils import (\n assert_divisions,\n assert_eq,\n clear_known_categories,\n has_known_categories,\n make_meta,\n)\nfrom dask.utils_test import hlg_layer_topological", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_gives_proper_divisions_test_join_gives_proper_divisions.assert_eq_expected_actua": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_join_gives_proper_divisions_test_join_gives_proper_divisions.assert_eq_expected_actua", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1232, "end_line": 1243, "span_ids": ["test_join_gives_proper_divisions"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_join_gives_proper_divisions():\n # https://github.com/dask/dask/issues/8113\n df = pd.DataFrame({\"a\": [\"a\", \"b\", \"c\"]}, index=[0, 1, 2])\n ddf = dd.from_pandas(df, npartitions=1)\n\n right_df = pd.DataFrame({\"b\": [1.0, 2.0, 3.0]}, index=[\"a\", \"b\", \"c\"])\n\n expected = df.join(right_df, how=\"inner\", on=\"a\")\n actual = ddf.join(right_df, how=\"inner\", on=\"a\")\n assert actual.divisions == ddf.divisions\n\n assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_test_cheap_single_partition_merge.list_eq_cc_pd_merge_pd_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_cheap_single_partition_merge_test_cheap_single_partition_merge.list_eq_cc_pd_merge_pd_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1425, "end_line": 1443, "span_ids": ["test_cheap_single_partition_merge"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"flip\", [False, True])\ndef test_cheap_single_partition_merge(flip):\n a = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")}, index=[10, 20, 30, 40, 50, 60]\n )\n aa = dd.from_pandas(a, npartitions=3)\n\n b = pd.DataFrame({\"x\": [1, 2, 3, 4], \"z\": list(\"abda\")})\n bb = dd.from_pandas(b, npartitions=1, sort=False)\n\n pd_inputs = (b, a) if flip else (a, b)\n inputs = (bb, aa) if flip else (aa, bb)\n\n cc = dd.merge(*inputs, on=\"x\", how=\"inner\")\n assert not hlg_layer_topological(cc.dask, -1).is_materialized()\n assert all(\"shuffle\" not in k[0] for k in cc.dask)\n assert len(cc.dask) == len(aa.dask) * 2 + len(bb.dask)\n\n list_eq(cc, pd.merge(*pd_inputs, on=\"x\", how=\"inner\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_join_test_categorical_join.assert_assert_eq_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_join_test_categorical_join.assert_assert_eq_expected", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2297, "end_line": 2319, "span_ids": ["test_categorical_join"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_join():\n # https://github.com/dask/dask/issues/6134\n df = pd.DataFrame(\n {\n \"join_col\": [\"a\", \"a\", \"b\", \"b\"],\n \"a\": [0, 0, 10, 10],\n }\n )\n df2 = pd.DataFrame({\"b\": [1, 2, 1, 2]}, index=[\"a\", \"a\", \"b\", \"b\"])\n\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = dd.from_pandas(df2, npartitions=1)\n ddf[\"join_col\"] = ddf[\"join_col\"].astype(\"category\")\n ddf2.index = ddf2.index.astype(\"category\")\n\n expected = ddf.compute().join(ddf2.compute(), on=\"join_col\", how=\"left\")\n\n actual_dask = ddf.join(ddf2, on=\"join_col\", how=\"left\")\n assert actual_dask.join_col.dtype == \"category\"\n\n actual = actual_dask.compute()\n assert actual.join_col.dtype == \"category\"\n assert assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_retains_category_dtype_test_categorical_merge_retains_category_dtype.assert_actual_A_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_categorical_merge_retains_category_dtype_test_categorical_merge_retains_category_dtype.assert_actual_A_dtype_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2356, "end_line": 2371, "span_ids": ["test_categorical_merge_retains_category_dtype"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_categorical_merge_retains_category_dtype():\n # https://github.com/dask/dask/issues/6142\n a = pd.DataFrame({\"A\": [0, 1, 2, 3], \"B\": [4, 5, 6, 7]})\n b = pd.DataFrame({\"A\": [0, 1, 2, 4], \"C\": [4, 5, 7, 7]})\n\n df1 = dd.from_pandas(a, 2)\n df1[\"A\"] = df1.A.astype(\"category\")\n\n df2 = dd.from_pandas(b, 2)\n df2[\"A\"] = df2.A.astype(\"category\")\n\n actual_dask = df1.merge(df2, on=\"A\")\n assert actual_dask.A.dtype == \"category\"\n\n actual = actual_dask.compute()\n assert actual.A.dtype == \"category\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_repr_test_time_rolling_constructor.assert_result__win_type_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_rolling_repr_test_time_rolling_constructor.assert_result__win_type_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 242, "end_line": 259, "span_ids": ["test_time_rolling_constructor", "test_rolling_repr", "test_time_rolling_repr"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rolling_repr():\n ddf = dd.from_pandas(pd.DataFrame([10] * 30), npartitions=3)\n res = repr(ddf.rolling(4))\n assert res == \"Rolling [window=4,center=False,axis=0]\"\n\n\ndef test_time_rolling_repr():\n res = repr(dts.rolling(\"4s\"))\n assert res == \"Rolling [window=4s,center=False,win_type=freq,axis=0]\"\n\n\ndef test_time_rolling_constructor():\n result = dts.rolling(\"4s\")\n assert result.window == \"4s\"\n assert result.min_periods is None\n assert result.win_type is None\n\n assert result._win_type == \"freq\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_methods_test_time_rolling_methods.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_methods_test_time_rolling_methods.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 262, "end_line": 295, "span_ids": ["test_time_rolling_methods"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"method,args,check_less_precise\", rolling_method_args_check_less_precise\n)\n@pytest.mark.parametrize(\"window\", [\"1S\", \"2S\", \"3S\", pd.offsets.Second(5)])\ndef test_time_rolling_methods(method, args, window, check_less_precise):\n if dd._compat.PANDAS_GT_110:\n if check_less_precise:\n check_less_precise = {\"atol\": 1e-3, \"rtol\": 1e-3}\n else:\n check_less_precise = {}\n else:\n check_less_precise = {\"check_less_precise\": check_less_precise}\n\n # DataFrame\n if method == \"apply\":\n kwargs = {\"raw\": False}\n else:\n kwargs = {}\n prolling = ts.rolling(window)\n drolling = dts.rolling(window)\n assert_eq(\n getattr(prolling, method)(*args, **kwargs),\n getattr(drolling, method)(*args, **kwargs),\n **check_less_precise,\n )\n\n # Series\n prolling = ts.a.rolling(window)\n drolling = dts.a.rolling(window)\n assert_eq(\n getattr(prolling, method)(*args, **kwargs),\n getattr(drolling, method)(*args, **kwargs),\n **check_less_precise,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_cov_test_time_rolling_cov.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_cov_test_time_rolling_cov.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 298, "end_line": 308, "span_ids": ["test_time_rolling_cov"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"window\", [\"1S\", \"2S\", \"3S\", pd.offsets.Second(5)])\ndef test_time_rolling_cov(window):\n # DataFrame\n prolling = ts.drop(\"a\", axis=1).rolling(window)\n drolling = dts.drop(\"a\", axis=1).rolling(window)\n assert_eq(prolling.cov(), drolling.cov())\n\n # Series\n prolling = ts.b.rolling(window)\n drolling = dts.b.rolling(window)\n assert_eq(prolling.cov(), drolling.cov())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_fixed_chunks_test_time_rolling_large_window_fixed_chunks.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_fixed_chunks_test_time_rolling_large_window_fixed_chunks.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 311, "end_line": 326, "span_ids": ["test_time_rolling_large_window_fixed_chunks"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"window,N\",\n [(\"1s\", 10), (\"2s\", 10), (\"10s\", 10), (\"10h\", 10), (\"10s\", 100), (\"10h\", 100)],\n)\ndef test_time_rolling_large_window_fixed_chunks(window, N):\n df = pd.DataFrame(\n {\n \"a\": pd.date_range(\"2016-01-01 00:00:00\", periods=N, freq=\"1s\"),\n \"b\": np.random.randint(100, size=(N,)),\n }\n )\n df = df.set_index(\"a\")\n ddf = dd.from_pandas(df, 5)\n assert_eq(ddf.rolling(window).sum(), df.rolling(window).sum())\n assert_eq(ddf.rolling(window).count(), df.rolling(window).count())\n assert_eq(ddf.rolling(window).mean(), df.rolling(window).mean())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_variable_chunks_test_time_rolling.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_time_rolling_large_window_variable_chunks_test_time_rolling.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 329, "end_line": 353, "span_ids": ["test_time_rolling_large_window_variable_chunks", "test_time_rolling"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"window\", [\"2s\", \"5s\", \"20s\", \"10h\"])\ndef test_time_rolling_large_window_variable_chunks(window):\n df = pd.DataFrame(\n {\n \"a\": pd.date_range(\"2016-01-01 00:00:00\", periods=100, freq=\"1s\"),\n \"b\": np.random.randint(100, size=(100,)),\n }\n )\n ddf = dd.from_pandas(df, 5)\n ddf = ddf.repartition(divisions=[0, 5, 20, 28, 33, 54, 79, 80, 82, 99])\n df = df.set_index(\"a\")\n ddf = ddf.set_index(\"a\")\n assert_eq(ddf.rolling(window).sum(), df.rolling(window).sum())\n assert_eq(ddf.rolling(window).count(), df.rolling(window).count())\n assert_eq(ddf.rolling(window).mean(), df.rolling(window).mean())\n\n\n@pytest.mark.parametrize(\"before, after\", [(\"6s\", \"6s\"), (\"2s\", \"2s\"), (\"6s\", \"2s\")])\ndef test_time_rolling(before, after):\n window = before\n before = pd.Timedelta(before)\n after = pd.Timedelta(after)\n result = dts.map_overlap(lambda x: x.rolling(window).count(), before, after)\n expected = dts.compute().rolling(window).count()\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_groupby_rolling_test_groupby_rolling.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_groupby_rolling_test_groupby_rolling.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 406, "end_line": 425, "span_ids": ["test_groupby_rolling"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_rolling():\n df = pd.DataFrame(\n {\n \"column1\": range(600),\n \"group1\": 5 * [\"g\" + str(i) for i in range(120)],\n },\n index=pd.date_range(\"20190101\", periods=60).repeat(10),\n )\n\n ddf = dd.from_pandas(df, npartitions=8)\n\n expected = df.groupby(\"group1\").rolling(\"15D\").sum()\n actual = ddf.groupby(\"group1\").rolling(\"15D\").sum()\n\n assert_eq(expected, actual, check_divisions=False)\n\n expected = df.groupby(\"group1\").column1.rolling(\"15D\").mean()\n actual = ddf.groupby(\"group1\").column1.rolling(\"15D\").mean()\n\n assert_eq(expected, actual, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_groupby_rolling_with_integer_window_raises_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_rolling.py_test_groupby_rolling_with_integer_window_raises_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_rolling.py", "file_name": "test_rolling.py", "file_type": "text/x-python", "category": "test", "start_line": 428, "end_line": 436, "span_ids": ["test_groupby_rolling_with_integer_window_raises"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_rolling_with_integer_window_raises():\n df = pd.DataFrame(\n {\"B\": [0, 1, 2, np.nan, 4, 5, 6], \"C\": [\"a\", \"a\", \"a\", \"b\", \"b\", \"a\", \"b\"]}\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n with pytest.raises(ValueError, match=\"``window`` must be a ``freq``\"):\n ddf.groupby(\"C\").rolling(2).sum()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__conflicts_with_keyword__test_default_partitions.assert_shuffle_d_d_b_np": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py__conflicts_with_keyword__test_default_partitions.assert_shuffle_d_d_b_np", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 67, "span_ids": ["imports", "test_default_partitions", "test_shuffle"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": " # conflicts with keyword argument\n\n\ndef test_shuffle(shuffle_method):\n s = shuffle_func(d, d.b, shuffle=shuffle_method)\n assert isinstance(s, dd.DataFrame)\n assert s.npartitions == d.npartitions\n\n x = dask.get(s.dask, (s._name, 0))\n y = dask.get(s.dask, (s._name, 1))\n\n assert not (set(x.b) & set(y.b)) # disjoint\n assert set(s.dask).issuperset(d.dask)\n\n assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name\n\n\ndef test_default_partitions():\n assert shuffle(d, d.b).npartitions == d.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_npartitions_test_shuffle_npartitions.assert_set_map_tuple_sc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_npartitions_test_shuffle_npartitions.assert_set_map_tuple_sc_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 70, "end_line": 80, "span_ids": ["test_shuffle_npartitions"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shuffle_npartitions(shuffle_method):\n df = pd.DataFrame({\"x\": np.random.random(100)})\n ddf = dd.from_pandas(df, npartitions=10)\n s = shuffle(ddf, ddf.x, shuffle=shuffle_method, npartitions=17, max_branch=4)\n sc = s.compute()\n assert s.npartitions == 17\n assert set(s.dask).issuperset(set(ddf.dask))\n\n assert len(sc) == len(df)\n assert list(s.columns) == list(df.columns)\n assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_npartitions_lt_input_partitions_test_shuffle_npartitions_lt_input_partitions.assert_set_map_tuple_sc_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_npartitions_lt_input_partitions_test_shuffle_npartitions_lt_input_partitions.assert_set_map_tuple_sc_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 83, "end_line": 93, "span_ids": ["test_shuffle_npartitions_lt_input_partitions"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shuffle_npartitions_lt_input_partitions(shuffle_method):\n df = pd.DataFrame({\"x\": np.random.random(100)})\n ddf = dd.from_pandas(df, npartitions=20)\n s = shuffle(ddf, ddf.x, shuffle=shuffle_method, npartitions=5, max_branch=2)\n sc = s.compute()\n assert s.npartitions == 5\n assert set(s.dask).issuperset(set(ddf.dask))\n\n assert len(sc) == len(df)\n assert list(s.columns) == list(df.columns)\n assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_index_with_non_series_df2.pd_DataFrame_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_index_with_non_series_df2.pd_DataFrame_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 96, "end_line": 142, "span_ids": ["test_index_with_non_series", "test_index_with_dataframe", "test_shuffle_empty_partitions", "test_shuffle_from_one_partition_to_one_other", "impl:15"], "tokens": 525}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_index_with_non_series(shuffle_method):\n from dask.dataframe.tests.test_multi import list_eq\n\n list_eq(\n shuffle(d, d.b, shuffle=shuffle_method), shuffle(d, \"b\", shuffle=shuffle_method)\n )\n\n\ndef test_index_with_dataframe(shuffle_method):\n res1 = shuffle(d, d[[\"b\"]], shuffle=shuffle_method).compute()\n res2 = shuffle(d, [\"b\"], shuffle=shuffle_method).compute()\n res3 = shuffle(d, \"b\", shuffle=shuffle_method).compute()\n\n assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())\n assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())\n\n\ndef test_shuffle_from_one_partition_to_one_other(shuffle_method):\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n a = dd.from_pandas(df, 1)\n\n for i in [1, 2]:\n b = shuffle(a, \"x\", npartitions=i, shuffle=shuffle_method)\n assert len(a.compute(scheduler=\"sync\")) == len(b.compute(scheduler=\"sync\"))\n\n\ndef test_shuffle_empty_partitions(shuffle_method):\n df = pd.DataFrame({\"x\": [1, 2, 3] * 10})\n ddf = dd.from_pandas(df, npartitions=3)\n s = shuffle(ddf, ddf.x, npartitions=6, shuffle=shuffle_method)\n parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())\n for p in parts:\n assert s.columns == p.columns\n\n\ndf2 = pd.DataFrame(\n {\n \"i32\": np.array([1, 2, 3] * 3, dtype=\"int32\"),\n \"f32\": np.array([None, 2.5, 3.5] * 3, dtype=\"float32\"),\n \"cat\": pd.Series([\"a\", \"b\", \"c\"] * 3).astype(\"category\"),\n \"obj\": pd.Series([\"d\", \"e\", \"f\"] * 3),\n \"bool\": np.array([True, False, True] * 3),\n \"dt\": pd.Series(pd.date_range(\"20130101\", periods=9)),\n \"dt_tz\": pd.Series(pd.date_range(\"20130101\", periods=9, tz=\"US/Eastern\")),\n \"td\": pd.Series(pd.timedelta_range(\"2000\", periods=9)),\n }\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_general_test_set_index_self_index.assert_eq_b_df_set_index": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_general_test_set_index_self_index.assert_eq_b_df_set_index", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 179, "end_line": 215, "span_ids": ["test_set_index_general", "test_set_index_self_index"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"npartitions\", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]\n)\ndef test_set_index_general(npartitions, shuffle_method):\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n\n ddf = dd.from_pandas(df, npartitions=npartitions)\n\n assert_eq(df.set_index(\"x\"), ddf.set_index(\"x\", shuffle=shuffle_method))\n\n assert_eq(df.set_index(\"y\"), ddf.set_index(\"y\", shuffle=shuffle_method))\n\n assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle=shuffle_method))\n\n assert_eq(\n df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle=shuffle_method)\n )\n\n assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle=shuffle_method))\n\n assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle=shuffle_method))\n\n\ndef test_set_index_self_index(shuffle_method):\n df = pd.DataFrame(\n {\"x\": np.random.random(100), \"y\": np.random.random(100) // 0.2},\n index=np.random.random(100),\n )\n\n a = dd.from_pandas(df, npartitions=4)\n b = a.set_index(a.index, shuffle=shuffle_method)\n assert a is b\n\n assert_eq(b, df.set_index(df.index))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_2_test_set_index_3.assert_ddf2_npartitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_2_test_set_index_3.assert_ddf2_npartitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 243, "end_line": 266, "span_ids": ["test_set_index_2", "test_set_index_3"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_2(shuffle_method):\n df = dd.demo.make_timeseries(\n \"2000\",\n \"2004\",\n {\"value\": float, \"name\": str, \"id\": int},\n freq=\"2H\",\n partition_freq=\"1M\",\n seed=1,\n )\n\n df2 = df.set_index(\"name\", shuffle=shuffle_method)\n df2.value.sum().compute(scheduler=\"sync\")\n\n\ndef test_set_index_3(shuffle_method):\n df = pd.DataFrame(np.random.random((10, 2)), columns=[\"x\", \"y\"])\n ddf = dd.from_pandas(df, npartitions=5)\n\n ddf2 = ddf.set_index(\n \"x\", shuffle=shuffle_method, max_branch=2, npartitions=ddf.npartitions\n )\n df2 = df.set_index(\"x\")\n assert_eq(df2, ddf2)\n assert ddf2.npartitions == ddf.npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_dataframe_shuffle_on_arg_test_dataframe_shuffle_on_arg.if_ignore_index_and_shuff.else_.assert_df_out_1_index_dty": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_dataframe_shuffle_on_arg_test_dataframe_shuffle_on_arg.if_ignore_index_and_shuff.else_.assert_df_out_1_index_dty", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1095, "end_line": 1127, "span_ids": ["test_dataframe_shuffle_on_arg"], "tokens": 326}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ignore_index\", [None, True, False])\n@pytest.mark.parametrize(\n \"on\", [\"id\", \"name\", [\"id\", \"name\"], pd.Series([\"id\", \"name\"])]\n)\n@pytest.mark.parametrize(\"max_branch\", [None, 4])\ndef test_dataframe_shuffle_on_arg(on, ignore_index, max_branch, shuffle_method):\n # Make sure DataFrame.shuffle API returns the same result\n # whether the ``on`` argument is a list of column names,\n # or a separate DataFrame with equivalent values...\n df_in = dask.datasets.timeseries(\n \"2000\",\n \"2001\",\n types={\"value\": float, \"name\": str, \"id\": int},\n freq=\"2H\",\n partition_freq=\"1M\",\n seed=1,\n )\n if isinstance(on, str):\n ext_on = df_in[[on]].copy()\n else:\n ext_on = df_in[on].copy()\n df_out_1 = df_in.shuffle(\n on, shuffle=shuffle_method, ignore_index=ignore_index, max_branch=max_branch\n )\n df_out_2 = df_in.shuffle(ext_on, shuffle=shuffle_method, ignore_index=ignore_index)\n\n assert_eq(df_out_1, df_out_2, check_index=(not ignore_index))\n\n # disk shuffling doesn't support ignore_index\n if ignore_index and shuffle_method == \"tasks\":\n assert df_out_1.index.dtype != df_in.index.dtype\n else:\n assert df_out_1.index.dtype == df_in.index.dtype", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_nan_partition_test_sort_values.dd_assert_eq_got_expect_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_nan_partition_test_sort_values.dd_assert_eq_got_expect_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1292, "end_line": 1313, "span_ids": ["test_sort_values", "test_set_index_nan_partition"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_nan_partition():\n d[d.a > 3].set_index(\"a\") # Set index with 1 null partition\n d[d.a > 1].set_index(\"a\", sorted=True) # Set sorted index with 0 null partitions\n a = d[d.a > 3].set_index(\"a\", sorted=True) # Set sorted index with 1 null partition\n assert_eq(a, a)\n\n\n@pytest.mark.parametrize(\"ascending\", [True, False])\n@pytest.mark.parametrize(\"by\", [\"a\", \"b\"])\n@pytest.mark.parametrize(\"nelem\", [10, 500])\ndef test_sort_values(nelem, by, ascending):\n np.random.seed(0)\n df = pd.DataFrame()\n df[\"a\"] = np.ascontiguousarray(np.arange(nelem)[::-1])\n df[\"b\"] = np.arange(100, nelem + 100)\n ddf = dd.from_pandas(df, npartitions=10)\n\n # run on single-threaded scheduler for debugging purposes\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(by=by, ascending=ascending)\n expect = df.sort_values(by=by, ascending=ascending)\n dd.assert_eq(got, expect, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_sort_values_with_nulls_test_sort_values_with_nulls.dd_assert_eq_got_expect_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_sort_values_with_nulls_test_sort_values_with_nulls.dd_assert_eq_got_expect_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1333, "end_line": 1358, "span_ids": ["test_sort_values_with_nulls"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"na_position\", [\"first\", \"last\"])\n@pytest.mark.parametrize(\"ascending\", [True, False])\n@pytest.mark.parametrize(\"by\", [\"a\", \"b\"])\n@pytest.mark.parametrize(\"nparts\", [1, 5])\n@pytest.mark.parametrize(\n \"data\",\n [\n {\n \"a\": list(range(50)) + [None] * 50 + list(range(50, 100)), # type: ignore\n \"b\": [None] * 100 + list(range(100, 150)), # type: ignore\n },\n {\n \"a\": list(range(15)) + [None] * 5, # type: ignore\n \"b\": list(reversed(range(20))),\n },\n ],\n)\ndef test_sort_values_with_nulls(data, nparts, by, ascending, na_position):\n df = pd.DataFrame(data)\n ddf = dd.from_pandas(df, npartitions=nparts)\n\n # run on single-threaded scheduler for debugging purposes\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(by=by, ascending=ascending, na_position=na_position)\n expect = df.sort_values(by=by, ascending=ascending, na_position=na_position)\n dd.assert_eq(got, expect, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_sort_values_custom_function_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_sort_values_custom_function_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1384, "end_line": 1411, "span_ids": ["test_sort_values_bool_ascending", "test_sort_values_custom_function"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"by\", [[\"a\", \"b\"], [\"b\", \"a\"]])\n@pytest.mark.parametrize(\"nparts\", [1, 10])\ndef test_sort_values_custom_function(by, nparts):\n df = pd.DataFrame({\"a\": [1, 2, 3] * 20, \"b\": [4, 5, 6, 7] * 15})\n ddf = dd.from_pandas(df, npartitions=nparts)\n\n def f(partition, by_columns, ascending, na_position, **kwargs):\n return partition.sort_values(\n by_columns, ascending=ascending, na_position=na_position\n )\n\n # run on single-threaded scheduler for debugging purposes\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(\n by=by[0], sort_function=f, sort_function_kwargs={\"by_columns\": by}\n )\n expect = df.sort_values(by=by)\n dd.assert_eq(got, expect, check_index=False)\n\n\ndef test_sort_values_bool_ascending():\n df = pd.DataFrame({\"a\": [1, 2, 3] * 20, \"b\": [4, 5, 6, 7] * 15})\n ddf = dd.from_pandas(df, npartitions=10)\n\n # attempt to sort with list of ascending booleans\n with pytest.raises(NotImplementedError):\n ddf.sort_values(by=\"a\", ascending=[True, False])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_re_from_dask_local_import_ge": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_re_from_dask_local_import_ge", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 27, "span_ids": ["imports"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import re\nimport warnings\nfrom typing import Iterable\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.core import apply_and_enforce\nfrom dask.dataframe.utils import (\n PANDAS_GT_120,\n UNKNOWN_CATEGORIES,\n assert_eq,\n check_matching_columns,\n check_meta,\n is_dataframe_like,\n is_index_like,\n is_series_like,\n make_meta,\n meta_nonempty,\n raise_on_meta_error,\n shard_df_on_index,\n)\nfrom dask.local import get_sync", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_index_test_meta_nonempty_index.None_27": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_meta_nonempty_index_test_meta_nonempty_index.None_27", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 209, "end_line": 282, "span_ids": ["test_meta_nonempty_index"], "tokens": 731}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_meta_nonempty_index():\n idx = pd.RangeIndex(1, name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.RangeIndex\n assert res.name == idx.name\n\n idx = pd.Index([1], name=\"foo\", dtype=\"int\")\n res = meta_nonempty(idx)\n assert type(res) is type(idx)\n assert res.dtype == \"int64\"\n assert res.name == idx.name\n\n idx = pd.Index([\"a\"], name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.Index\n assert res.name == idx.name\n\n idx = pd.DatetimeIndex([\"1970-01-01\"], freq=\"d\", tz=\"America/New_York\", name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.DatetimeIndex\n assert res.tz == idx.tz\n assert res.freq == idx.freq\n assert res.name == idx.name\n\n idx = pd.PeriodIndex([\"1970-01-01\"], freq=\"d\", name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.PeriodIndex\n assert res.freq == idx.freq\n assert res.name == idx.name\n\n idx = pd.TimedeltaIndex([np.timedelta64(1, \"D\")], freq=\"d\", name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.TimedeltaIndex\n assert res.freq == idx.freq\n assert res.name == idx.name\n\n idx = pd.CategoricalIndex([\"xyx\"], [\"xyx\", \"zzz\"], ordered=True, name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.CategoricalIndex\n assert (res.categories == idx.categories).all()\n assert res.ordered == idx.ordered\n assert res.name == idx.name\n\n idx = pd.CategoricalIndex([], [UNKNOWN_CATEGORIES], ordered=True, name=\"foo\")\n res = meta_nonempty(idx)\n assert type(res) is pd.CategoricalIndex\n assert res.ordered == idx.ordered\n assert res.name == idx.name\n\n levels = [pd.Index([1], name=\"a\"), pd.Index([1.0], name=\"b\")]\n codes = [[0], [0]]\n idx = pd.MultiIndex(levels=levels, names=[\"a\", \"b\"], codes=codes)\n res = meta_nonempty(idx)\n assert type(res) is pd.MultiIndex\n for idx1, idx2 in zip(idx.levels, res.levels):\n assert type(idx1) is type(idx2)\n assert idx1.name == idx2.name\n assert res.names == idx.names\n\n levels = [\n pd.Index([1], name=\"a\"),\n pd.CategoricalIndex(data=[\"xyx\"], categories=[\"xyx\"], name=\"b\"),\n pd.TimedeltaIndex([np.timedelta64(1, \"D\")], name=\"timedelta\"),\n ]\n\n codes = [[0], [0], [0]]\n\n idx = pd.MultiIndex(levels=levels, names=[\"a\", \"b\", \"timedelta\"], codes=codes)\n res = meta_nonempty(idx)\n assert type(res) is pd.MultiIndex\n for idx1, idx2 in zip(idx.levels, res.levels):\n assert type(idx1) is type(idx2)\n assert idx1.name == idx2.name\n assert res.names == idx.names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_meta_test_check_meta.None_7": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_check_meta_test_check_meta.None_7", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 327, "end_line": 400, "span_ids": ["test_check_meta"], "tokens": 694}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_check_meta():\n df = pd.DataFrame(\n {\n \"a\": [\"x\", \"y\", \"z\"],\n \"b\": [True, False, True],\n \"c\": [1, 2.5, 3.5],\n \"d\": [1, 2, 3],\n \"e\": pd.Categorical([\"x\", \"y\", \"z\"]),\n \"f\": pd.Series([1, 2, 3], dtype=np.uint64),\n }\n )\n meta = df.iloc[:0]\n\n # DataFrame metadata passthrough if correct\n assert check_meta(df, meta) is df\n # Series metadata passthrough if correct\n e = df.e\n assert check_meta(e, meta.e) is e\n # numeric_equal means floats and ints are equivalent\n d = df.d\n f = df.f\n assert check_meta(d, meta.d.astype(\"f8\"), numeric_equal=True) is d\n assert check_meta(f, meta.f.astype(\"f8\"), numeric_equal=True) is f\n assert check_meta(f, meta.f.astype(\"i8\"), numeric_equal=True) is f\n\n # Series metadata error\n with pytest.raises(ValueError) as err:\n check_meta(d, meta.d.astype(\"f8\"), numeric_equal=False)\n assert str(err.value) == (\n \"Metadata mismatch found.\\n\"\n \"\\n\"\n \"Partition type: `pandas.core.series.Series`\\n\"\n \"+----------+---------+\\n\"\n \"| | dtype |\\n\"\n \"+----------+---------+\\n\"\n \"| Found | int64 |\\n\"\n \"| Expected | float64 |\\n\"\n \"+----------+---------+\"\n )\n\n # DataFrame metadata error\n meta2 = meta.astype({\"a\": \"category\", \"d\": \"f8\"})[[\"a\", \"b\", \"c\", \"d\"]]\n df2 = df[[\"a\", \"b\", \"d\", \"e\"]]\n with pytest.raises(ValueError) as err:\n check_meta(df2, meta2, funcname=\"from_delayed\")\n\n exp = (\n \"Metadata mismatch found in `from_delayed`.\\n\"\n \"\\n\"\n \"Partition type: `pandas.core.frame.DataFrame`\\n\"\n \"+--------+----------+----------+\\n\"\n \"| Column | Found | Expected |\\n\"\n \"+--------+----------+----------+\\n\"\n \"| 'a' | object | category |\\n\"\n \"| 'c' | - | float64 |\\n\"\n \"| 'e' | category | - |\\n\"\n \"+--------+----------+----------+\"\n )\n assert str(err.value) == exp\n\n # pandas dtype metadata error\n with pytest.raises(ValueError) as err:\n check_meta(df.a, pd.Series([], dtype=\"string\"), numeric_equal=False)\n assert str(err.value) == (\n \"Metadata mismatch found.\\n\"\n \"\\n\"\n \"Partition type: `pandas.core.series.Series`\\n\"\n \"+----------+--------+\\n\"\n \"| | dtype |\\n\"\n \"+----------+--------+\\n\"\n \"| Found | object |\\n\"\n \"| Expected | string |\\n\"\n \"+----------+--------+\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_nonempty_series_sparse_test_assert_eq_sorts.with_pytest_raises_Assert.assert_eq_df1_df2_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_nonempty_series_sparse_test_assert_eq_sorts.with_pytest_raises_Assert.assert_eq_df1_df2_r_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 530, "end_line": 551, "span_ids": ["test_nonempty_series_sparse", "test_nonempty_series_nullable_float", "test_assert_eq_sorts"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nonempty_series_sparse():\n ser = pd.Series(pd.array([0, 1], dtype=\"Sparse\"))\n with warnings.catch_warnings(record=True) as record:\n meta_nonempty(ser)\n assert not record\n\n\n@pytest.mark.skipif(not PANDAS_GT_120, reason=\"Float64 was introduced in pandas>=1.2\")\ndef test_nonempty_series_nullable_float():\n ser = pd.Series([], dtype=\"Float64\")\n non_empty = meta_nonempty(ser)\n assert non_empty.dtype == \"Float64\"\n\n\ndef test_assert_eq_sorts():\n df1 = pd.DataFrame({\"A\": np.linspace(0, 1, 10), \"B\": np.random.random(10)})\n df2 = df1.sort_values(\"B\")\n df2_r = df2.reset_index(drop=True)\n assert_eq(df1, df2)\n assert_eq(df1, df2_r, check_index=False)\n with pytest.raises(AssertionError):\n assert_eq(df1, df2_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_assert_eq_scheduler_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_assert_eq_scheduler_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 528, "end_line": 554, "span_ids": ["test_assert_eq_scheduler"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assert_eq_scheduler():\n using_custom_scheduler = False\n\n def custom_scheduler(*args, **kwargs):\n nonlocal using_custom_scheduler\n try:\n using_custom_scheduler = True\n return get_sync(*args, **kwargs)\n finally:\n using_custom_scheduler = False\n\n def check_custom_scheduler(part: pd.DataFrame) -> pd.DataFrame:\n assert using_custom_scheduler, \"not using custom scheduler\"\n return part + 1\n\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = ddf.map_partitions(check_custom_scheduler, meta=ddf)\n\n with pytest.raises(AssertionError, match=\"not using custom scheduler\"):\n # NOTE: we compare `ddf2` to itself in order to test both sides of the `assert_eq` logic.\n assert_eq(ddf2, ddf2)\n\n assert_eq(ddf2, ddf2, scheduler=custom_scheduler)\n with dask.config.set(scheduler=custom_scheduler):\n assert_eq(ddf2, ddf2, scheduler=None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_matching_columns_index_summary.return.f_name_n_entries_sum": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_check_matching_columns_index_summary.return.f_name_n_entries_sum", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 411, "end_line": 439, "span_ids": ["check_matching_columns", "index_summary"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_matching_columns(meta, actual):\n # Need nan_to_num otherwise nan comparison gives False\n if not np.array_equal(np.nan_to_num(meta.columns), np.nan_to_num(actual.columns)):\n extra = methods.tolist(actual.columns.difference(meta.columns))\n missing = methods.tolist(meta.columns.difference(actual.columns))\n if extra or missing:\n extra_info = f\" Extra: {extra}\\n Missing: {missing}\"\n else:\n extra_info = \"Order of columns does not match\"\n raise ValueError(\n \"The columns in the computed data do not match\"\n \" the columns in the provided metadata\\n\"\n f\"{extra_info}\"\n )\n\n\ndef index_summary(idx, name=None):\n \"\"\"Summarized representation of an Index.\"\"\"\n n = len(idx)\n if name is None:\n name = idx.__class__.__name__\n if n:\n head = idx[0]\n tail = idx[-1]\n summary = f\", {head} to {tail}\"\n else:\n summary = \"\"\n\n return f\"{name}: {n} entries{summary}\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__maybe_sort__maybe_sort.return.a_sort_index_if_check_i": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py__maybe_sort__maybe_sort.return.a_sort_index_if_check_i", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 508, "end_line": 521, "span_ids": ["_maybe_sort"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _maybe_sort(a, check_index: bool):\n # sort by value, then index\n try:\n if is_dataframe_like(a):\n if set(a.index.names) & set(a.columns):\n a.index.names = [\n \"-overlapped-index-name-%d\" % i for i in range(len(a.index.names))\n ]\n a = a.sort_values(by=methods.tolist(a.columns))\n else:\n a = a.sort_values()\n except (TypeError, IndexError, ValueError):\n pass\n return a.sort_index() if check_index else a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_eq_assert_eq.return.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_assert_eq_assert_eq.return.True", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 524, "end_line": 577, "span_ids": ["assert_eq"], "tokens": 444}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_eq(\n a,\n b,\n check_names=True,\n check_dtype=True,\n check_divisions=True,\n check_index=True,\n scheduler=\"sync\",\n **kwargs,\n):\n if check_divisions:\n assert_divisions(a, scheduler=scheduler)\n assert_divisions(b, scheduler=scheduler)\n if hasattr(a, \"divisions\") and hasattr(b, \"divisions\"):\n at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python\n bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion\n assert at == bt, (at, bt)\n assert_sane_keynames(a)\n assert_sane_keynames(b)\n a = _check_dask(\n a, check_names=check_names, check_dtypes=check_dtype, scheduler=scheduler\n )\n b = _check_dask(\n b, check_names=check_names, check_dtypes=check_dtype, scheduler=scheduler\n )\n if hasattr(a, \"to_pandas\"):\n a = a.to_pandas()\n if hasattr(b, \"to_pandas\"):\n b = b.to_pandas()\n if isinstance(a, (pd.DataFrame, pd.Series)):\n a = _maybe_sort(a, check_index)\n b = _maybe_sort(b, check_index)\n if not check_index:\n a = a.reset_index(drop=True)\n b = b.reset_index(drop=True)\n if isinstance(a, pd.DataFrame):\n tm.assert_frame_equal(\n a, b, check_names=check_names, check_dtype=check_dtype, **kwargs\n )\n elif isinstance(a, pd.Series):\n tm.assert_series_equal(\n a, b, check_names=check_names, check_dtype=check_dtype, **kwargs\n )\n elif isinstance(a, pd.Index):\n tm.assert_index_equal(a, b, exact=check_dtype, **kwargs)\n else:\n if a == b:\n return True\n else:\n if np.isnan(a):\n assert np.isnan(b)\n else:\n assert np.allclose(a, b)\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed.if_isinstance_obj_Delaye_delayed.if_task_is_obj_.else_.return.Delayed_name_graph_nout": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_delayed.if_isinstance_obj_Delaye_delayed.if_task_is_obj_.else_.return.Delayed_name_graph_nout", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 435, "end_line": 460, "span_ids": ["delayed"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@curry\ndef delayed(obj, name=None, pure=None, nout=None, traverse=True):\n if isinstance(obj, Delayed):\n return obj\n\n if is_dask_collection(obj) or traverse:\n task, collections = unpack_collections(obj)\n else:\n task = quote(obj)\n collections = set()\n\n if not (nout is None or (type(nout) is int and nout >= 0)):\n raise ValueError(\"nout must be None or a non-negative integer, got %s\" % nout)\n if task is obj:\n if not name:\n try:\n prefix = obj.__name__\n except AttributeError:\n prefix = type(obj).__name__\n token = tokenize(obj, nout, pure=pure)\n name = f\"{prefix}-{token}\"\n return DelayedLeaf(obj, name, pure=pure, nout=nout)\n else:\n if not name:\n name = f\"{type(obj).__name__}-{tokenize(task, pure=pure)}\"\n layer = {name: task}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=collections)\n return Delayed(name, graph, nout)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_right_optimize.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_right_optimize.return.dsk", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 463, "end_line": 478, "span_ids": ["right", "optimize"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def right(method):\n \"\"\"Wrapper to create 'right' version of operator given left version\"\"\"\n\n def _inner(self, other):\n return method(other, self)\n\n return _inner\n\n\ndef optimize(dsk, keys, **kwargs):\n if not isinstance(keys, (list, set)):\n keys = [keys]\n if not isinstance(dsk, HighLevelGraph):\n dsk = HighLevelGraph.from_collections(id(dsk), dsk, dependencies=())\n dsk = dsk.cull(set(flatten(keys)))\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed_Delayed.__dask_postpersist__.return.self__rebuild_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed_Delayed.__dask_postpersist__.return.self__rebuild_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 481, "end_line": 528, "span_ids": ["Delayed:5", "Delayed.__init__", "Delayed.__dask_keys__", "Delayed.__dask_postcompute__", "Delayed.__dask_postpersist__", "Delayed.__dask_tokenize__", "Delayed.__dask_layers__", "Delayed.__dask_graph__", "Delayed.dask", "Delayed", "Delayed.key"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Delayed(DaskMethodsMixin, OperatorMethodMixin):\n \"\"\"Represents a value to be computed by dask.\n\n Equivalent to the output from a single key in a dask graph.\n \"\"\"\n\n __slots__ = (\"_key\", \"_dask\", \"_length\", \"_layer\")\n\n def __init__(self, key, dsk, length=None, layer=None):\n self._key = key\n self._dask = dsk\n self._length = length\n\n # NOTE: Layer is used by `to_delayed` in other collections, but not in normal Delayed use\n self._layer = layer or key\n if isinstance(dsk, HighLevelGraph) and self._layer not in dsk.layers:\n raise ValueError(\n f\"Layer {self._layer} not in the HighLevelGraph's layers: {list(dsk.layers)}\"\n )\n\n @property\n def key(self):\n return self._key\n\n @property\n def dask(self):\n return self._dask\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return [self.key]\n\n def __dask_layers__(self):\n return (self._layer,)\n\n def __dask_tokenize__(self):\n return self.key\n\n __dask_scheduler__ = staticmethod(threaded.get)\n __dask_optimize__ = globalmethod(optimize, key=\"delayed_optimize\")\n\n def __dask_postcompute__(self):\n return single_key, ()\n\n def __dask_postpersist__(self):\n return self._rebuild, ()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed._rebuild_Delayed._rebuild.return.Delayed_key_dsk_self__l": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed._rebuild_Delayed._rebuild.return.Delayed_key_dsk_self__l", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 530, "end_line": 540, "span_ids": ["Delayed._rebuild"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Delayed(DaskMethodsMixin, OperatorMethodMixin):\n\n def _rebuild(self, dsk, *, rename=None):\n key = replace_name_in_key(self.key, rename) if rename else self.key\n if isinstance(dsk, HighLevelGraph) and len(dsk.layers) == 1:\n # FIXME Delayed is currently the only collection type that supports both high- and low-level graphs.\n # The HLG output of `optimize` will have a layer name that doesn't match `key`.\n # Remove this when Delayed is HLG-only (because `optimize` will only be passed HLGs, so it won't have\n # to generate random layer names).\n layer = next(iter(dsk.layers))\n else:\n layer = None\n return Delayed(key, dsk, self._length, layer=layer)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed.__repr___Delayed.__getattr__.return.DelayedAttr_self_attr_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/delayed.py_Delayed.__repr___Delayed.__getattr__.return.DelayedAttr_self_attr_", "embedding": null, "metadata": {"file_path": "dask/delayed.py", "file_name": "delayed.py", "file_type": "text/x-python", "category": "implementation", "start_line": 542, "end_line": 563, "span_ids": ["Delayed.__repr__", "Delayed.__getattr__", "Delayed.__hash__", "Delayed.__dir__"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Delayed(DaskMethodsMixin, OperatorMethodMixin):\n\n def __repr__(self):\n return f\"Delayed({repr(self.key)})\"\n\n def __hash__(self):\n return hash(self.key)\n\n def __dir__(self):\n return dir(type(self))\n\n def __getattr__(self, attr):\n if attr.startswith(\"_\"):\n raise AttributeError(f\"Attribute {attr} not found\")\n\n if attr == \"visualise\":\n # added to warn users in case of spelling error\n # for more details: https://github.com/dask/dask/issues/5721\n warnings.warn(\n \"dask.delayed objects have no `visualise` method. \"\n \"Perhaps you meant `visualize`?\"\n )\n\n return DelayedAttr(self, attr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_contextlib_format_time.if_h_.else_.return.f_s_4_1f_s_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/progress.py_contextlib_format_time.if_h_.else_.return.f_s_4_1f_s_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/progress.py", "file_name": "progress.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["imports", "format_time"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\nimport sys\nimport threading\nimport time\nfrom timeit import default_timer\n\nfrom ..callbacks import Callback\n\n\ndef format_time(t):\n \"\"\"Format seconds into a human readable form.\n\n >>> format_time(10.4)\n '10.4s'\n >>> format_time(1000.4)\n '16min 40.4s'\n \"\"\"\n m, s = divmod(t, 60)\n h, m = divmod(m, 60)\n if h:\n return f\"{h:2.0f}hr {m:2.0f}min {s:4.1f}s\"\n elif m:\n return f\"{m:2.0f}min {s:4.1f}s\"\n else:\n return f\"{s:4.1f}s\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_plot_with_invalid_bokeh_kwarg_raises_error_test_plot_multiple.visualize_prof_rprof_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_plot_with_invalid_bokeh_kwarg_raises_error_test_plot_multiple.visualize_prof_rprof_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 317, "end_line": 345, "span_ids": ["test_plot_multiple", "test_cache_profiler_plot_with_invalid_bokeh_kwarg_raises_error"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\ndef test_cache_profiler_plot_with_invalid_bokeh_kwarg_raises_error():\n with CacheProfiler(metric_name=\"non-standard\") as cprof:\n get(dsk, \"e\")\n with pytest.raises(AttributeError, match=\"foo_bar\"):\n cprof.visualize(foo_bar=\"fake\")\n\n\n@pytest.mark.skipif(\"not bokeh\")\n@pytest.mark.skipif(\"not psutil\")\ndef test_plot_multiple():\n from dask.diagnostics.profile_visualize import visualize\n\n with ResourceProfiler(dt=0.01) as rprof:\n with prof:\n get(dsk2, \"c\")\n p = visualize(\n [prof, rprof], label_size=50, title=\"Not the default\", show=False, save=False\n )\n figures = [r[0] for r in p.children[1].children]\n assert len(figures) == 2\n assert figures[0].title.text == \"Not the default\"\n assert figures[0].xaxis[0].axis_label is None\n assert figures[1].title is None\n assert figures[1].xaxis[0].axis_label == \"Time (s)\"\n # Test empty, checking for errors\n prof.clear()\n rprof.clear()\n visualize([prof, rprof], show=False, save=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_saves_file_test_saves_file_path_deprecated.with_tmpfile_html_as_f.with_open_fn_as_f_.assert_html_in_f_read_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_saves_file_test_saves_file_path_deprecated.with_tmpfile_html_as_f.with_open_fn_as_f_.assert_html_in_f_read_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 348, "end_line": 373, "span_ids": ["test_saves_file_path_deprecated", "test_saves_file"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\ndef test_saves_file():\n with tmpfile(\"html\") as fn:\n with prof:\n get(dsk, \"e\")\n # Run just to see that it doesn't error\n prof.visualize(show=False, filename=fn)\n\n assert os.path.exists(fn)\n with open(fn) as f:\n assert \"html\" in f.read().lower()\n\n\n@pytest.mark.skipif(\"not bokeh\")\ndef test_saves_file_path_deprecated():\n with tmpfile(\"html\") as fn:\n with prof:\n get(dsk, \"e\")\n # Run just to see that it warns, but still works.\n with pytest.warns(FutureWarning) as record:\n prof.visualize(show=False, file_path=fn)\n\n assert len(record) == 1\n assert os.path.exists(fn)\n with open(fn) as f:\n assert \"html\" in f.read().lower()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_get_colors_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_get_colors_", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 376, "end_line": 403, "span_ids": ["test_get_colors"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\ndef test_get_colors():\n from bokeh.palettes import Blues5, Blues256, Viridis\n\n from dask.diagnostics.profile_visualize import get_colors\n\n funcs = list(range(11))\n cmap = get_colors(\"Blues\", funcs)\n assert set(cmap) < set(Blues256)\n assert len(set(cmap)) == 11\n\n funcs = list(range(5))\n cmap = get_colors(\"Blues\", funcs)\n lk = dict(zip(funcs, Blues5))\n assert cmap == [lk[i] for i in funcs]\n\n funcs = [0, 1, 0, 1, 0, 1]\n cmap = get_colors(\"BrBG\", funcs)\n assert len(set(cmap)) == 2\n\n funcs = list(range(100))\n cmap = get_colors(\"Viridis\", funcs)\n assert len(set(cmap)) == 100\n\n funcs = list(range(300))\n cmap = get_colors(\"Viridis\", funcs)\n assert len(set(cmap)) == len(set(Viridis[256]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_os_graphviz.import_required_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_os_graphviz.import_required_", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 15, "span_ids": ["imports"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport re\nfrom functools import partial\n\nfrom .core import get_dependencies, ishashable, istask\nfrom .utils import apply, funcname, import_required, key_split\n\ngraphviz = import_required(\n \"graphviz\",\n \"Drawing dask graphs requires the `graphviz` python library and the \"\n \"`graphviz` system library.\\n\\n\"\n \"Please either conda or pip install as follows:\\n\\n\"\n \" conda install python-graphviz # either conda install\\n\"\n \" python -m pip install graphviz # or pip install and follow installation instructions\",\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_task_label_task_label.if_any_has_sub_tasks_i_f.else_.return.head": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dot.py_task_label_task_label.if_any_has_sub_tasks_i_f.else_.return.head", "embedding": null, "metadata": {"file_path": "dask/dot.py", "file_name": "dot.py", "file_type": "text/x-python", "category": "implementation", "start_line": 18, "end_line": 42, "span_ids": ["task_label"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if func is apply:\n func = task[1]\n if hasattr(func, \"funcs\"):\n if len(func.funcs) > 1:\n return f\"{funcname(func.funcs[0])}(...)\"\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(func)\n if any(has_sub_tasks(i) for i in task[1:]):\n return f\"{head}(...)\"\n else:\n return head", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.cull_Layer.cull.return.MaterializedLayer_out_an": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.cull_Layer.cull.return.MaterializedLayer_out_an", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 103, "end_line": 147, "span_ids": ["Layer.cull"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n def cull(\n self, keys: set, all_hlg_keys: Iterable\n ) -> tuple[Layer, Mapping[Hashable, set]]:\n \"\"\"Remove unnecessary tasks from the layer\n\n In other words, return a new Layer with only the tasks required to\n calculate `keys` and a map of external key dependencies.\n\n Examples\n --------\n >>> d = MaterializedLayer({'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)})\n >>> _, deps = d.cull({'out'}, d.keys())\n >>> deps\n {'out': {'x'}, 'x': set()}\n\n Returns\n -------\n layer: Layer\n Culled layer\n deps: Map\n Map of external key dependencies\n \"\"\"\n\n if len(keys) == len(self):\n # Nothing to cull if preserving all existing keys\n return (\n self,\n {k: self.get_dependencies(k, all_hlg_keys) for k in self.keys()},\n )\n\n ret_deps = {}\n seen = set()\n out = {}\n work = keys.copy()\n while work:\n k = work.pop()\n out[k] = self[k]\n ret_deps[k] = self.get_dependencies(k, all_hlg_keys)\n for d in ret_deps[k]:\n if d not in seen:\n if d in self:\n seen.add(d)\n work.add(d)\n\n return MaterializedLayer(out, annotations=self.annotations), ret_deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_unpack___Layer.__dask_distributed_unpack__.return._dsk_state_dsk_de": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_unpack___Layer.__dask_distributed_unpack__.return._dsk_state_dsk_de", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 428, "end_line": 462, "span_ids": ["Layer.__dask_distributed_unpack__"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n @classmethod\n def __dask_distributed_unpack__(\n cls,\n state: Any,\n dsk: Mapping[str, Any],\n dependencies: Mapping[str, set],\n ) -> dict:\n \"\"\"Unpack the state of a layer previously packed by __dask_distributed_pack__()\n\n This method is called by the scheduler in Distributed in order to unpack\n the state of a layer and merge it into its global task graph. The method\n can use `dsk` and `dependencies`, which are the already materialized\n state of the preceding layers in the high level graph. The layers of the\n high level graph are unpacked in topological order.\n\n See Layer.__dask_distributed_pack__() for packing detail.\n\n Parameters\n ----------\n state: Any\n The state returned by Layer.__dask_distributed_pack__()\n dsk: Mapping, read-only\n The materialized low level graph of the already unpacked layers\n dependencies: Mapping, read-only\n The dependencies of each key in `dsk`\n\n Returns\n -------\n unpacked-layer: dict\n layer_dsk: Mapping[str, Any]\n Materialized (stringified) graph of the layer\n layer_deps: Mapping[str, set]\n Dependencies of each key in `layer_dsk`\n \"\"\"\n return {\"dsk\": state[\"dsk\"], \"deps\": state[\"dependencies\"]}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__reduce___Layer._repr_html_.return.get_template_highlevelgr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__reduce___Layer._repr_html_.return.get_template_highlevelgr", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 464, "end_line": 500, "span_ids": ["Layer.__copy__", "Layer._repr_html_", "Layer.__reduce__"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n def __reduce__(self):\n \"\"\"Default serialization implementation, which materializes the Layer\"\"\"\n return (MaterializedLayer, (dict(self),))\n\n def __copy__(self):\n \"\"\"Default shallow copy implementation\"\"\"\n obj = type(self).__new__(self.__class__)\n obj.__dict__.update(self.__dict__)\n return obj\n\n def _repr_html_(self, layer_index=\"\", highlevelgraph_key=\"\"):\n if highlevelgraph_key != \"\":\n shortname = key_split(highlevelgraph_key)\n elif hasattr(self, \"name\"):\n shortname = key_split(self.name)\n else:\n shortname = self.__class__.__name__\n\n svg_repr = \"\"\n if (\n self.collection_annotations\n and self.collection_annotations.get(\"type\") == \"dask.array.core.Array\"\n ):\n chunks = self.collection_annotations.get(\"chunks\")\n if chunks:\n from .array.svg import svg\n\n svg_repr = svg(chunks)\n\n return get_template(\"highlevelgraph_layer.html.j2\").render(\n materialized=self.is_materialized(),\n shortname=shortname,\n layer_index=layer_index,\n highlevelgraph_key=highlevelgraph_key,\n info=self.layer_info_dict(),\n svg_repr=svg_repr,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.layer_info_dict_Layer.layer_info_dict.return.info": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.layer_info_dict_Layer.layer_info_dict.return.info", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 502, "end_line": 516, "span_ids": ["Layer.layer_info_dict"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n def layer_info_dict(self):\n info = {\n \"layer_type\": type(self).__name__,\n \"is_materialized\": self.is_materialized(),\n \"number of outputs\": f\"{len(self.get_output_keys())}\",\n }\n if self.annotations is not None:\n for key, val in self.annotations.items():\n info[key] = html.escape(str(val))\n if self.collection_annotations is not None:\n for key, val in self.collection_annotations.items():\n # Hide verbose chunk details from the HTML table\n if key != \"chunks\":\n info[key] = html.escape(str(val))\n return info", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.dependents_HighLevelGraph.merge.return.cls_layers_dependencies_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.dependents_HighLevelGraph.merge.return.cls_layers_dependencies_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 809, "end_line": 833, "span_ids": ["HighLevelGraph.merge", "HighLevelGraph.copy", "HighLevelGraph.dependents"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n @property\n def dependents(self):\n return reverse_dict(self.dependencies)\n\n def copy(self):\n return HighLevelGraph(\n ensure_dict(self.layers, copy=True),\n ensure_dict(self.dependencies, copy=True),\n self.key_dependencies.copy(),\n )\n\n @classmethod\n def merge(cls, *graphs):\n layers = {}\n dependencies = {}\n for g in graphs:\n if isinstance(g, HighLevelGraph):\n layers.update(g.layers)\n dependencies.update(g.dependencies)\n elif isinstance(g, Mapping):\n layers[id(g)] = g\n dependencies[id(g)] = set()\n else:\n raise TypeError(g)\n return cls(layers, dependencies)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.visualize_HighLevelGraph.visualize.return.g": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.visualize_HighLevelGraph.visualize.return.g", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 853, "end_line": 895, "span_ids": ["HighLevelGraph.visualize"], "tokens": 366}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def visualize(self, filename=\"dask-hlg.svg\", format=None, **kwargs):\n \"\"\"\n Visualize this dask high level graph.\n\n Requires ``graphviz`` to be installed.\n\n Parameters\n ----------\n filename : str or None, optional\n The name of the file to write to disk. If the provided `filename`\n doesn't include an extension, '.png' will be used by default.\n If `filename` is None, no file will be written, and the graph is\n rendered in the Jupyter notebook only.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'svg'.\n color : {None, 'layer_type'}, optional (default: None)\n Options to color nodes.\n - None, no colors.\n - layer_type, color nodes based on the layer type.\n **kwargs\n Additional keyword arguments to forward to ``to_graphviz``.\n\n Examples\n --------\n >>> x.dask.visualize(filename='dask.svg') # doctest: +SKIP\n >>> x.dask.visualize(filename='dask.svg', color='layer_type') # doctest: +SKIP\n\n Returns\n -------\n result : IPython.diplay.Image, IPython.display.SVG, or None\n See dask.dot.dot_graph for more information.\n\n See Also\n --------\n dask.dot.dot_graph\n dask.base.visualize # low level variant\n \"\"\"\n\n from .dot import graphviz_to_file\n\n g = to_graphviz(self, **kwargs)\n graphviz_to_file(g, filename, format)\n return g", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__dask_distributed_unpack___HighLevelGraph.__dask_distributed_unpack__.return._dsk_dsk_deps_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__dask_distributed_unpack___HighLevelGraph.__dask_distributed_unpack__.return._dsk_dsk_deps_deps", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1068, "end_line": 1117, "span_ids": ["HighLevelGraph.__dask_distributed_unpack__"], "tokens": 429}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n @staticmethod\n def __dask_distributed_unpack__(hlg: dict) -> dict:\n \"\"\"Unpack the high level graph for Scheduler -> Worker communication\n\n The approach is to delegate the unpackaging to each layer in the high level graph\n by calling ..._unpack__() and ..._annotations_unpack__()\n on each layer.\n\n Parameters\n ----------\n hlg: dict\n Packed high level graph layers\n\n Returns\n -------\n unpacked-graph: dict\n dsk: dict[str, Any]\n Materialized (stringified) graph of all nodes in the high level graph\n deps: dict[str, set]\n Dependencies of each key in `dsk`\n annotations: dict[str, Any]\n Annotations for `dsk`\n \"\"\"\n from distributed.protocol.serialize import import_allowed_module\n\n dsk = {}\n deps = {}\n anno = {}\n\n # Unpack each layer (in topological order)\n for layer in hlg[\"layers\"]:\n # Find the unpack functions\n if layer[\"__module__\"] is None: # Default implementation\n unpack_state = Layer.__dask_distributed_unpack__\n unpack_anno = Layer.__dask_distributed_annotations_unpack__\n else:\n mod = import_allowed_module(layer[\"__module__\"])\n cls = getattr(mod, layer[\"__name__\"])\n unpack_state = cls.__dask_distributed_unpack__\n unpack_anno = cls.__dask_distributed_annotations_unpack__\n\n # Unpack state into a graph and key dependencies\n unpacked_layer = unpack_state(layer[\"state\"], dsk, deps)\n dsk.update(unpacked_layer[\"dsk\"])\n for k, v in unpacked_layer[\"deps\"].items():\n deps[k] = deps.get(k, set()) | v\n\n # Unpack the annotations\n unpack_anno(anno, layer[\"annotations\"], unpacked_layer[\"dsk\"].keys())\n return {\"dsk\": dsk, \"deps\": deps, \"annotations\": anno}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__repr___HighLevelGraph._repr_html_.return.get_template_highlevelgr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__repr___HighLevelGraph._repr_html_.return.get_template_highlevelgr", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1119, "end_line": 1132, "span_ids": ["HighLevelGraph._repr_html_", "HighLevelGraph.__repr__"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def __repr__(self) -> str:\n representation = f\"{type(self).__name__} with {len(self.layers)} layers.\\n\"\n representation += f\"<{self.__class__.__module__}.{self.__class__.__name__} object at {hex(id(self))}>\\n\"\n for i, layerkey in enumerate(self._toposort_layers()):\n representation += f\" {i}. {layerkey}\\n\"\n return representation\n\n def _repr_html_(self):\n return get_template(\"highlevelgraph.html.j2\").render(\n type=type(self).__name__,\n layers=self.layers,\n toposort=self._toposort_layers(),\n n_outputs=len(self.get_all_external_keys()),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_to_graphviz_to_graphviz.if_color_layer_type_.layer_colors._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_to_graphviz_to_graphviz.if_color_layer_type_.layer_colors._", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1135, "end_line": 1182, "span_ids": ["to_graphviz"], "tokens": 391}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_graphviz(\n hg,\n data_attributes=None,\n function_attributes=None,\n rankdir=\"BT\",\n graph_attr=None,\n node_attr=None,\n edge_attr=None,\n **kwargs,\n):\n from .dot import graphviz, label, name\n\n data_attributes = data_attributes or {}\n function_attributes = function_attributes or {}\n graph_attr = graph_attr or {}\n node_attr = node_attr or {}\n edge_attr = edge_attr or {}\n\n graph_attr[\"rankdir\"] = rankdir\n node_attr[\"shape\"] = \"box\"\n node_attr[\"fontname\"] = \"helvetica\"\n\n graph_attr.update(kwargs)\n g = graphviz.Digraph(\n graph_attr=graph_attr, node_attr=node_attr, edge_attr=edge_attr\n )\n\n n_tasks = {}\n for layer in hg.dependencies:\n n_tasks[layer] = len(hg.layers[layer])\n\n min_tasks = min(n_tasks.values())\n max_tasks = max(n_tasks.values())\n\n cache = {}\n\n color = kwargs.get(\"color\")\n if color == \"layer_type\":\n layer_colors = {\n \"DataFrameIOLayer\": [\"#CCC7F9\", False], # purple\n \"ShuffleLayer\": [\"#F9CCC7\", False], # rose\n \"SimpleShuffleLayer\": [\"#F9CCC7\", False], # rose\n \"ArrayOverlayLayer\": [\"#FFD9F2\", False], # pink\n \"BroadcastJoinLayer\": [\"#D9F2FF\", False], # blue\n \"Blockwise\": [\"#D9FFE6\", False], # green\n \"BlockwiseLayer\": [\"#D9FFE6\", False], # green\n \"MaterializedLayer\": [\"#DBDEE5\", False], # gray\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_to_graphviz.None_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_to_graphviz.None_2_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1202, "end_line": 1290, "span_ids": ["_get_some_layer_name", "to_graphviz"], "tokens": 795}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_graphviz(\n hg,\n data_attributes=None,\n function_attributes=None,\n rankdir=\"BT\",\n graph_attr=None,\n node_attr=None,\n edge_attr=None,\n **kwargs,\n):\n # ... other code\n\n for layer in hg.dependencies:\n layer_name = name(layer)\n attrs = data_attributes.get(layer, {})\n\n node_label = label(layer, cache=cache)\n node_size = (\n 20\n if max_tasks == min_tasks\n else int(20 + ((n_tasks[layer] - min_tasks) / (max_tasks - min_tasks)) * 20)\n )\n\n layer_type = str(type(hg.layers[layer]).__name__)\n node_tooltips = (\n f\"A {layer_type.replace('Layer', '')} Layer with {n_tasks[layer]} Tasks.\\n\"\n )\n\n layer_ca = hg.layers[layer].collection_annotations\n if layer_ca:\n if layer_ca.get(\"type\") == \"dask.array.core.Array\":\n node_tooltips += (\n f\"Array Shape: {layer_ca.get('shape')}\\n\"\n f\"Data Type: {layer_ca.get('dtype')}\\n\"\n f\"Chunk Size: {layer_ca.get('chunksize')}\\n\"\n f\"Chunk Type: {layer_ca.get('chunk_type')}\\n\"\n )\n\n if layer_ca.get(\"type\") == \"dask.dataframe.core.DataFrame\":\n dftype = {\"pandas.core.frame.DataFrame\": \"pandas\"}\n cols = layer_ca.get(\"columns\")\n\n node_tooltips += (\n f\"Number of Partitions: {layer_ca.get('npartitions')}\\n\"\n f\"DataFrame Type: {dftype.get(layer_ca.get('dataframe_type'))}\\n\"\n f\"{len(cols)} DataFrame Columns: {str(cols) if len(str(cols)) <= 40 else '[...]'}\\n\"\n )\n\n attrs.setdefault(\"label\", str(node_label))\n attrs.setdefault(\"fontsize\", str(node_size))\n attrs.setdefault(\"tooltip\", str(node_tooltips))\n\n if color == \"layer_type\":\n node_color = layer_colors.get(layer_type)[0]\n layer_colors.get(layer_type)[1] = True\n\n attrs.setdefault(\"fillcolor\", str(node_color))\n attrs.setdefault(\"style\", \"filled\")\n\n g.node(layer_name, **attrs)\n\n for layer, deps in hg.dependencies.items():\n layer_name = name(layer)\n for dep in deps:\n dep_name = name(dep)\n g.edge(dep_name, layer_name)\n\n if color == \"layer_type\":\n legend_title = \"Key\"\n\n legend_label = (\n '<'\n \"\"\n )\n\n for layer_type, color in layer_colors.items():\n if color[1]:\n legend_label += f''\n\n legend_label += \"
Legend: Layer types
{layer_type}
>\"\n\n attrs = data_attributes.get(legend_title, {})\n attrs.setdefault(\"label\", str(legend_label))\n attrs.setdefault(\"fontsize\", \"20\")\n attrs.setdefault(\"margin\", \"0\")\n\n g.node(legend_title, **attrs)\n\n return g\n\n\ndef _get_some_layer_name(collection) -> str:\n \"\"\"Somehow get a unique name for a Layer from a non-HighLevelGraph dask mapping\"\"\"\n try:\n (name,) = collection.__dask_layers__()\n return name\n except (AttributeError, ValueError):\n # collection does not define the optional __dask_layers__ method\n # or it spuriously returns more than one layer\n return str(id(collection))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_from_concurrent_futures_i_if_os_name_nt_.else_.queue_get.return.q_get_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/local.py_from_concurrent_futures_i_if_os_name_nt_.else_.queue_get.return.q_get_", "embedding": null, "metadata": {"file_path": "dask/local.py", "file_name": "local.py", "file_type": "text/x-python", "category": "implementation", "start_line": 108, "end_line": 133, "span_ids": ["imports"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from concurrent.futures import Executor, Future\nfrom functools import partial\nfrom queue import Empty, Queue\n\nfrom . import config\nfrom .callbacks import local_callbacks, unpack_callbacks\nfrom .core import _execute_task, flatten, get_dependencies, has_tasks, reverse_dict\nfrom .order import order\nfrom .utils_test import add, inc # noqa: F401\n\nif os.name == \"nt\":\n # Python 3 windows Queue.get doesn't handle interrupts properly. To\n # workaround this we poll at a sufficiently large interval that it\n # shouldn't affect performance, but small enough that users trying to kill\n # an application shouldn't care.\n def queue_get(q):\n while True:\n try:\n return q.get(block=True, timeout=0.1)\n except Empty:\n pass\n\nelse:\n\n def queue_get(q):\n return q.get()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/ml.py___getattr___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/ml.py___getattr___", "embedding": null, "metadata": {"file_path": "dask/ml.py", "file_name": "ml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 13, "span_ids": ["__getattr__"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def __getattr__(value):\n try:\n import dask_ml\n except ImportError as e:\n msg = (\n \"Dask-ML is not installed.\\n\\n\"\n \"Please either conda or pip install dask-ml:\\n\\n\"\n \" conda install dask-ml # either conda install\\n\"\n \" python -m pip install dask-ml --upgrade # or pip install\"\n )\n raise ImportError(msg) from e\n return getattr(dask_ml, value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.finish_now_key_order._Although_running_a_task": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.finish_now_key_order._Although_running_a_task", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 213, "end_line": 287, "span_ids": ["order"], "tokens": 841}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n # ... other code\n\n def finish_now_key(x):\n \"\"\"Determine the order of dependents that are ready to run and be released\"\"\"\n return (-len(dependencies[x]), StrComparable(x))\n\n # Computing this for all keys can sometimes be relatively expensive :(\n partition_keys = {\n key: (\n (min_dependencies - total_dependencies[key] + 1)\n * (total_dependents - min_heights)\n )\n for key, (\n total_dependents,\n min_dependencies,\n _,\n min_heights,\n _,\n ) in metrics.items()\n }\n\n result = {}\n i = 0\n\n # `inner_stack` is used to perform a DFS along dependencies. Once emptied\n # (when traversing dependencies), this continue down a path along dependents\n # until a root node is reached.\n #\n # Sometimes, a better path along a dependent is discovered (i.e., something\n # that is easier to compute and doesn't requiring holding too much in memory).\n # In this case, the current `inner_stack` is appended to `inner_stacks` and\n # we begin a new DFS from the better node.\n #\n # A \"better path\" is determined by comparing `partition_keys`.\n inner_stack = [min(init_stack, key=initial_stack_key)]\n inner_stack_pop = inner_stack.pop\n inner_stacks = []\n inner_stacks_append = inner_stacks.append\n inner_stacks_extend = inner_stacks.extend\n inner_stacks_pop = inner_stacks.pop\n\n # Okay, now we get to the data structures used for fancy behavior.\n #\n # As we traverse nodes in the DFS along dependencies, we partition the dependents\n # via `partition_key`. A dependent goes to:\n # 1) `inner_stack` if it's better than our current target,\n # 2) `next_nodes` if the partition key is lower than it's parent,\n # 3) `later_nodes` otherwise.\n # When the inner stacks are depleted, we process `next_nodes`. If `next_nodes` is\n # empty (and `outer_stacks` is empty`), then we process `later_nodes` the same way.\n # These dicts use `partition_keys` as keys. We process them by placing the values\n # in `outer_stack` so that the smallest keys will be processed first.\n next_nodes = defaultdict(list)\n later_nodes = defaultdict(list)\n\n # `outer_stack` is used to populate `inner_stacks`. From the time we partition the\n # dependents of a node, we group them: one list per partition key per parent node.\n # This likely results in many small lists. We do this to avoid sorting many larger\n # lists (i.e., to avoid n*log(n) behavior). So, we have many small lists that we\n # partitioned, and we keep them in the order that we saw them (we will process them\n # in a FIFO manner). By delaying sorting for as long as we can, we can first filter\n # out nodes that have already been computed. All this complexity is worth it!\n outer_stack = []\n outer_stack_extend = outer_stack.extend\n outer_stack_pop = outer_stack.pop\n\n # Keep track of nodes that are in `inner_stack` or `inner_stacks` so we don't\n # process them again.\n if skip_root_node:\n seen = set(root_nodes)\n else:\n seen = set() # seen in an inner_stack (and has dependencies)\n seen_update = seen.update\n seen_add = seen.add\n\n # \"singles\" are tasks that are available to run, and when run may free a dependency.\n # Although running a task to free a dependency may seem like a wash (net zero), it\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order._can_be_beneficial_by_pr_order.is_init_sorted.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order._can_be_beneficial_by_pr_order.is_init_sorted.False", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 288, "end_line": 344, "span_ids": ["order"], "tokens": 838}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n # can be beneficial by providing more opportunities for a later task to free even\n # more data. So, it costs us little in the short term to more eagerly compute\n # chains of tasks that keep the same number of data in memory, and the longer term\n # rewards are potentially high. I would expect a dynamic scheduler to have similar\n # behavior, so I think it makes sense to do the same thing here in `dask.order`.\n #\n # When we gather tasks in `singles`, we do so optimistically: running the task *may*\n # free the parent, but it also may not, because other dependents of the parent may\n # be in the inner stacks. When we process `singles`, we run tasks that *will* free\n # the parent, otherwise we move the task to `later_singles`. `later_singles` is run\n # when there are no inner stacks, so it is safe to run all of them (because no other\n # dependents will be hiding in the inner stacks to keep hold of the parent).\n # `singles` is processed when the current item on the stack needs to compute\n # dependencies before it can be run.\n #\n # Processing singles is meant to be a detour. Doing so should not change our\n # tactical goal in most cases. Hence, we set `add_to_inner_stack = False`.\n #\n # In reality, this is a pretty limited strategy for running a task to free a\n # dependency. A thorough strategy would be to check whether running a dependent\n # with `num_needed[dep] == 0` would free *any* of its dependencies. This isn't\n # what we do. This data isn't readily or cheaply available. We only check whether\n # it will free its last dependency that was computed (the current `item`). This is\n # probably okay. In general, our tactics and strategies for ordering try to be\n # memory efficient, so we shouldn't try too hard to work around what we already do.\n # However, sometimes the DFS nature of it leaves \"easy-to-compute\" stragglers behind.\n # The current approach is very fast to compute, can be beneficial, and is generally\n # low-risk. There could be more we could do here though. Our static scheduling\n # here primarily looks at \"what dependent should we run next?\" instead of \"what\n # dependency should we try to free?\" Two sides to the same question, but a dynamic\n # scheduler is much better able to answer the latter one, because it knows the size\n # of data and can react to current state. Does adding a little more dynamic-like\n # behavior to `dask.order` add any tension to running with an actual dynamic\n # scheduler? Should we defer to dynamic schedulers and let them behave like this\n # if they so choose? Maybe. However, I'm sensitive to the multithreaded scheduler,\n # which is heavily dependent on the ordering obtained here.\n singles = {}\n singles_items = singles.items()\n singles_clear = singles.clear\n later_singles = []\n later_singles_append = later_singles.append\n later_singles_clear = later_singles.clear\n\n # Priority of being processed\n # 1. inner_stack\n # 2. singles (may be moved to later_singles)\n # 3. inner_stacks\n # 4. later_singles\n # 5. next_nodes\n # 6. outer_stack\n # 7. later_nodes\n # 8. init_stack\n\n # alias for speed\n set_difference = set.difference\n\n is_init_sorted = False\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True__order.while_True_.while_True_.if_already_seen_.deps.deps_already_seen": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True__order.while_True_.while_True_.if_already_seen_.deps.deps_already_seen", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 345, "end_line": 551, "span_ids": ["order"], "tokens": 1746}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n # ... other code\n while True:\n while True:\n # Perform a DFS along dependencies until we complete our tactical goal\n if inner_stack:\n item = inner_stack_pop()\n if item in result:\n continue\n if num_needed[item]:\n if not skip_root_node or item not in root_nodes:\n inner_stack.append(item)\n deps = set_difference(dependencies[item], result)\n if 1 < len(deps) < 1000:\n inner_stack.extend(\n sorted(deps, key=dependencies_key, reverse=True)\n )\n else:\n inner_stack.extend(deps)\n seen_update(deps)\n if not singles:\n continue\n process_singles = True\n else:\n result[item] = i\n i += 1\n deps = dependents[item]\n add_to_inner_stack = True\n\n if metrics[item][3] == 1: # min_height\n # Don't leave any dangling single nodes! Finish all dependents that are\n # ready and are also root nodes.\n finish_now = {\n dep\n for dep in deps\n if not dependents[dep] and num_needed[dep] == 1\n }\n if finish_now:\n deps -= finish_now # Safe to mutate\n if len(finish_now) > 1:\n finish_now = sorted(finish_now, key=finish_now_key)\n for dep in finish_now:\n result[dep] = i\n i += 1\n add_to_inner_stack = False\n elif skip_root_node:\n for dep in root_nodes:\n num_needed[dep] -= 1\n # Use remove here to complain loudly if our assumptions change\n deps.remove(dep) # Safe to mutate\n\n if deps:\n for dep in deps:\n num_needed[dep] -= 1\n process_singles = False\n else:\n continue\n elif inner_stacks:\n inner_stack = inner_stacks_pop()\n inner_stack_pop = inner_stack.pop\n continue\n elif singles:\n process_singles = True\n elif later_singles:\n # No need to be optimistic: all nodes in `later_singles` will free a dependency\n # when run, so no need to check whether dependents are in `seen`.\n deps = set()\n for single in later_singles:\n if single in result:\n continue\n while True:\n dep2 = dependents[single]\n result[single] = i\n i += 1\n if metrics[single][3] == 1: # min_height\n # Don't leave any dangling single nodes! Finish all dependents that are\n # ready and are also root nodes.\n finish_now = {\n dep\n for dep in dep2\n if not dependents[dep] and num_needed[dep] == 1\n }\n if finish_now:\n dep2 -= finish_now # Safe to mutate\n if len(finish_now) > 1:\n finish_now = sorted(finish_now, key=finish_now_key)\n for dep in finish_now:\n result[dep] = i\n i += 1\n elif skip_root_node:\n for dep in root_nodes:\n num_needed[dep] -= 1\n # Use remove here to complain loudly if our assumptions change\n dep2.remove(dep) # Safe to mutate\n if dep2:\n for dep in dep2:\n num_needed[dep] -= 1\n if len(dep2) == 1:\n # Fast path! We trim down `dep2` above hoping to reach here.\n (single,) = dep2\n if not num_needed[single]:\n # Keep it going!\n dep2 = dependents[single]\n continue\n deps |= dep2\n break\n later_singles_clear()\n deps = set_difference(deps, result)\n if not deps:\n continue\n add_to_inner_stack = False\n process_singles = True\n else:\n break\n\n if process_singles and singles:\n # We gather all dependents of all singles into `deps`, which we then process below.\n # A lingering question is: what should we use for `item`? `item_key` is used to\n # determine whether each dependent goes to `next_nodes` or `later_nodes`. Currently,\n # we use the last value of `item` (i.e., we don't do anything).\n deps = set()\n add_to_inner_stack = True if inner_stack or inner_stacks else False\n for single, parent in singles_items:\n if single in result:\n continue\n if (\n add_to_inner_stack\n and len(set_difference(dependents[parent], result)) > 1\n ):\n later_singles_append(single)\n continue\n\n while True:\n dep2 = dependents[single]\n result[single] = i\n i += 1\n if metrics[single][3] == 1: # min_height\n # Don't leave any dangling single nodes! Finish all dependents that are\n # ready and are also root nodes.\n finish_now = {\n dep\n for dep in dep2\n if not dependents[dep] and num_needed[dep] == 1\n }\n if finish_now:\n dep2 -= finish_now # Safe to mutate\n if len(finish_now) > 1:\n finish_now = sorted(finish_now, key=finish_now_key)\n for dep in finish_now:\n result[dep] = i\n i += 1\n elif skip_root_node:\n for dep in root_nodes:\n num_needed[dep] -= 1\n # Use remove here to complain loudly if our assumptions change\n dep2.remove(dep) # Safe to mutate\n if dep2:\n for dep in dep2:\n num_needed[dep] -= 1\n if add_to_inner_stack:\n already_seen = dep2 & seen\n if already_seen:\n if len(dep2) == len(already_seen):\n if len(already_seen) == 1:\n (single,) = already_seen\n if not num_needed[single]:\n dep2 = dependents[single]\n continue\n break\n dep2 = dep2 - already_seen\n else:\n already_seen = False\n if len(dep2) == 1:\n # Fast path! We trim down `dep2` above hoping to reach here.\n (single,) = dep2\n if not num_needed[single]:\n if not already_seen:\n # Keep it going!\n dep2 = dependents[single]\n continue\n later_singles_append(single)\n break\n deps |= dep2\n break\n\n deps = set_difference(deps, result)\n singles_clear()\n if not deps:\n continue\n add_to_inner_stack = False\n\n # If inner_stack is empty, then we typically add the best dependent to it.\n # However, we don't add to it if we complete a node early via \"finish_now\" above\n # or if a dependent is already on an inner_stack. In this case, we add the\n # dependents (not in an inner_stack) to next_nodes or later_nodes to handle later.\n # This serves three purposes:\n # 1. shrink `deps` so that it can be processed faster,\n # 2. make sure we don't process the same dependency repeatedly, and\n # 3. make sure we don't accidentally continue down an expensive-to-compute path.\n already_seen = deps & seen\n if already_seen:\n if len(deps) == len(already_seen):\n if len(already_seen) == 1:\n (dep,) = already_seen\n if not num_needed[dep]:\n singles[dep] = item\n continue\n add_to_inner_stack = False\n deps = deps - already_seen\n # ... other code\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.while_True_.if_len_deps_1__order.while_True_.while_True_.if_len_deps_1_.elif_len_deps_2_.if_inner_stack_.else_.if_add_to_inner_stack_.else_.if_key2_item_key_.else_.later_nodes_key2_append_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.while_True_.if_len_deps_1__order.while_True_.while_True_.if_len_deps_1_.elif_len_deps_2_.if_inner_stack_.else_.if_add_to_inner_stack_.else_.if_key2_item_key_.else_.later_nodes_key2_append_", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 553, "end_line": 668, "span_ids": ["order"], "tokens": 1038}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n while True:\n while True:\n # Perform a DFS along dependencies until we complete our tactical goal\n # ... other code\n\n if len(deps) == 1:\n # Fast path! We trim down `deps` above hoping to reach here.\n (dep,) = deps\n if not inner_stack:\n if add_to_inner_stack:\n inner_stack = [dep]\n inner_stack_pop = inner_stack.pop\n seen_add(dep)\n continue\n key = partition_keys[dep]\n else:\n key = partition_keys[dep]\n if key < partition_keys[inner_stack[0]]:\n # Run before `inner_stack` (change tactical goal!)\n inner_stacks_append(inner_stack)\n inner_stack = [dep]\n inner_stack_pop = inner_stack.pop\n seen_add(dep)\n continue\n if not num_needed[dep]:\n # We didn't put the single dependency on the stack, but we should still\n # run it soon, because doing so may free its parent.\n singles[dep] = item\n elif key < partition_keys[item]:\n next_nodes[key].append(deps)\n else:\n later_nodes[key].append(deps)\n elif len(deps) == 2:\n # We special-case when len(deps) == 2 so that we may place a dep on singles.\n # Otherwise, the logic here is the same as when `len(deps) > 2` below.\n #\n # Let me explain why this is a special case. If we put the better dependent\n # onto the inner stack, then it's guaranteed to run next. After it's run,\n # then running the other dependent *may* allow their parent to be freed.\n dep, dep2 = deps\n key = partition_keys[dep]\n key2 = partition_keys[dep2]\n if (\n key2 < key\n or key == key2\n and dependents_key(dep2) < dependents_key(dep)\n ):\n dep, dep2 = dep2, dep\n key, key2 = key2, key\n if inner_stack:\n prev_key = partition_keys[inner_stack[0]]\n if key2 < prev_key:\n inner_stacks_append(inner_stack)\n inner_stacks_append([dep2])\n inner_stack = [dep]\n inner_stack_pop = inner_stack.pop\n seen_update(deps)\n if not num_needed[dep2]:\n if process_singles:\n later_singles_append(dep2)\n else:\n singles[dep2] = item\n elif key < prev_key:\n inner_stacks_append(inner_stack)\n inner_stack = [dep]\n inner_stack_pop = inner_stack.pop\n seen_add(dep)\n if not num_needed[dep2]:\n if process_singles:\n later_singles_append(dep2)\n else:\n singles[dep2] = item\n elif key2 < partition_keys[item]:\n next_nodes[key2].append([dep2])\n else:\n later_nodes[key2].append([dep2])\n else:\n item_key = partition_keys[item]\n if key2 < item_key:\n next_nodes[key].append([dep])\n next_nodes[key2].append([dep2])\n elif key < item_key:\n next_nodes[key].append([dep])\n later_nodes[key2].append([dep2])\n else:\n later_nodes[key].append([dep])\n later_nodes[key2].append([dep2])\n else:\n if add_to_inner_stack:\n if not num_needed[dep2]:\n inner_stacks_append(inner_stack)\n inner_stack = [dep]\n inner_stack_pop = inner_stack.pop\n seen_add(dep)\n singles[dep2] = item\n elif key == key2 and 5 * partition_keys[item] > 22 * key:\n inner_stacks_append(inner_stack)\n inner_stacks_append([dep2])\n inner_stack = [dep]\n inner_stack_pop = inner_stack.pop\n seen_update(deps)\n else:\n inner_stacks_append(inner_stack)\n inner_stack = [dep]\n inner_stack_pop = inner_stack.pop\n seen_add(dep)\n if key2 < partition_keys[item]:\n next_nodes[key2].append([dep2])\n else:\n later_nodes[key2].append([dep2])\n else:\n item_key = partition_keys[item]\n if key2 < item_key:\n next_nodes[key].append([dep])\n next_nodes[key2].append([dep2])\n elif key < item_key:\n next_nodes[key].append([dep])\n later_nodes[key2].append([dep2])\n else:\n later_nodes[key].append([dep])\n later_nodes[key2].append([dep2])\n # ... other code\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.while_True_.if_len_deps_1_.else__order.while_True_.while_True_.if_len_deps_1_.else_.if_inner_stack_.else_.for_key_vals_in_dep_pool.if_key_item_key_.else_.later_nodes_key_append_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.while_True_.if_len_deps_1_.else__order.while_True_.while_True_.if_len_deps_1_.else_.if_inner_stack_.else_.for_key_vals_in_dep_pool.if_key_item_key_.else_.later_nodes_key_append_v", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 669, "end_line": 739, "span_ids": ["order"], "tokens": 789}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n while True:\n while True:\n # Perform a DFS along dependencies until we complete our tactical goal\n\n if len(deps) == 1:\n # Fast path! We trim down `deps` above hoping to reach here.\n # ... other code\n else:\n # Slow path :(. This requires grouping by partition_key.\n dep_pools = defaultdict(list)\n for dep in deps:\n dep_pools[partition_keys[dep]].append(dep)\n item_key = partition_keys[item]\n if inner_stack:\n # If we have an inner_stack, we need to look for a \"better\" path\n prev_key = partition_keys[inner_stack[0]]\n now_keys = [] # < inner_stack[0]\n for key, vals in dep_pools.items():\n if key < prev_key:\n now_keys.append(key)\n elif key < item_key:\n next_nodes[key].append(vals)\n else:\n later_nodes[key].append(vals)\n if now_keys:\n # Run before `inner_stack` (change tactical goal!)\n inner_stacks_append(inner_stack)\n if 1 < len(now_keys):\n now_keys.sort(reverse=True)\n for key in now_keys:\n pool = dep_pools[key]\n if 1 < len(pool) < 100:\n pool.sort(key=dependents_key, reverse=True)\n inner_stacks_extend([dep] for dep in pool)\n seen_update(pool)\n inner_stack = inner_stacks_pop()\n inner_stack_pop = inner_stack.pop\n else:\n # If we don't have an inner_stack, then we don't need to look\n # for a \"better\" path, but we do need traverse along dependents.\n if add_to_inner_stack:\n min_key = min(dep_pools)\n min_pool = dep_pools.pop(min_key)\n if len(min_pool) == 1:\n inner_stack = min_pool\n seen_update(inner_stack)\n elif (\n 10 * item_key > 11 * len(min_pool) * len(min_pool) * min_key\n ):\n # Put all items in min_pool onto inner_stacks.\n # I know this is a weird comparison. Hear me out.\n # Although it is often beneficial to put all of the items in `min_pool`\n # onto `inner_stacks` to process next, it is very easy to be overzealous.\n # Sometimes it is actually better to defer until `next_nodes` is handled.\n # We should only put items onto `inner_stacks` that we're reasonably\n # confident about. The above formula is a best effort heuristic given\n # what we have easily available. It is obviously very specific to our\n # choice of partition_key. Dask tests take this route about 40%.\n if len(min_pool) < 100:\n min_pool.sort(key=dependents_key, reverse=True)\n inner_stacks_extend([dep] for dep in min_pool)\n inner_stack = inner_stacks_pop()\n seen_update(min_pool)\n else:\n # Put one item in min_pool onto inner_stack and the rest into next_nodes.\n if len(min_pool) < 100:\n inner_stack = [min(min_pool, key=dependents_key)]\n else:\n inner_stack = [min_pool.pop()]\n next_nodes[min_key].append(min_pool)\n seen_update(inner_stack)\n\n inner_stack_pop = inner_stack.pop\n for key, vals in dep_pools.items():\n if key < item_key:\n next_nodes[key].append(vals)\n else:\n later_nodes[key].append(vals)\n # ... other code\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.if_len_dependencies_l_order.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_order.while_True_.if_len_dependencies_l_order.return.result", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 741, "end_line": 802, "span_ids": ["order"], "tokens": 568}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def order(dsk, dependencies=None):\n while True:\n # ... other code\n\n if len(dependencies) == len(result):\n break # all done!\n\n if next_nodes:\n for key in sorted(next_nodes, reverse=True):\n # `outer_stacks` may not be empty here--it has data from previous `next_nodes`.\n # Since we pop things off of it (onto `inner_nodes`), this means we handle\n # multiple `next_nodes` in a LIFO manner.\n outer_stack_extend(reversed(next_nodes[key]))\n next_nodes = defaultdict(list)\n\n while outer_stack:\n # Try to add a few items to `inner_stacks`\n deps = [x for x in outer_stack_pop() if x not in result]\n if deps:\n if 1 < len(deps) < 100:\n deps.sort(key=dependents_key, reverse=True)\n inner_stacks_extend([dep] for dep in deps)\n seen_update(deps)\n break\n\n if inner_stacks:\n continue\n\n if later_nodes:\n # You know all those dependents with large keys we've been hanging onto to run \"later\"?\n # Well, \"later\" has finally come.\n next_nodes, later_nodes = later_nodes, next_nodes\n continue\n\n # We just finished computing a connected group.\n # Let's choose the first `item` in the next group to compute.\n # If we have few large groups left, then it's best to find `item` by taking a minimum.\n # If we have many small groups left, then it's best to sort.\n # If we have many tiny groups left, then it's best to simply iterate.\n if not is_init_sorted:\n prev_len = len(init_stack)\n if type(init_stack) is dict:\n init_stack = set(init_stack)\n init_stack = set_difference(init_stack, result)\n N = len(init_stack)\n m = prev_len - N\n # is `min` likely better than `sort`?\n if m >= N or N + (N - m) * log(N - m) < N * log(N):\n item = min(init_stack, key=initial_stack_key)\n continue\n\n if len(init_stack) < 10000:\n init_stack = sorted(init_stack, key=initial_stack_key, reverse=True)\n else:\n init_stack = list(init_stack)\n init_stack_pop = init_stack.pop\n is_init_sorted = True\n\n if skip_root_node and item in root_nodes:\n item = init_stack_pop()\n\n while item in result:\n item = init_stack_pop()\n inner_stack.append(item)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_StrComparable_OrderInfo.namedtuple_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_StrComparable_OrderInfo.namedtuple_", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 980, "end_line": 1018, "span_ids": ["impl", "StrComparable", "StrComparable.__lt__", "StrComparable.__init__"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StrComparable:\n \"\"\"Wrap object so that it defaults to string comparison\n\n When comparing two objects of different types Python fails\n\n >>> 'a' < 1\n Traceback (most recent call last):\n ...\n TypeError: '<' not supported between instances of 'str' and 'int'\n\n This class wraps the object so that, when this would occur it instead\n compares the string representation\n\n >>> StrComparable('a') < StrComparable(1)\n False\n \"\"\"\n\n __slots__ = (\"obj\",)\n\n def __init__(self, obj):\n self.obj = obj\n\n def __lt__(self, other):\n try:\n return self.obj < other.obj\n except Exception:\n return str(self.obj) < str(other.obj)\n\n\nOrderInfo = namedtuple(\n \"OrderInfo\",\n (\n \"order\",\n \"age\",\n \"num_data_when_run\",\n \"num_data_when_released\",\n \"num_dependencies_freed\",\n ),\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_diagnostics_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/order.py_diagnostics_", "embedding": null, "metadata": {"file_path": "dask/order.py", "file_name": "order.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1021, "end_line": 1074, "span_ids": ["diagnostics"], "tokens": 448}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def diagnostics(dsk, o=None, dependencies=None):\n \"\"\"Simulate runtime metrics as though running tasks one at a time in order.\n\n These diagnostics can help reveal behaviors of and issues with ``order``.\n\n Returns a dict of `namedtuple(\"OrderInfo\")` and a list of the number of outputs held over time.\n\n OrderInfo fields:\n - order : the order in which the node is run.\n - age : how long the output of a node is held.\n - num_data_when_run : the number of outputs held in memory when a node is run.\n - num_data_when_released : the number of outputs held in memory when the output is released.\n - num_dependencies_freed : the number of dependencies freed by running the node.\n \"\"\"\n if dependencies is None:\n dependencies, dependents = get_deps(dsk)\n else:\n dependents = reverse_dict(dependencies)\n if o is None:\n o = order(dsk, dependencies=dependencies)\n\n pressure = []\n num_in_memory = 0\n age = {}\n runpressure = {}\n releasepressure = {}\n freed = {}\n num_needed = {key: len(val) for key, val in dependents.items()}\n for i, key in enumerate(sorted(dsk, key=o.__getitem__)):\n pressure.append(num_in_memory)\n runpressure[key] = num_in_memory\n released = 0\n for dep in dependencies[key]:\n num_needed[dep] -= 1\n if num_needed[dep] == 0:\n age[dep] = i - o[dep]\n releasepressure[dep] = num_in_memory\n released += 1\n freed[key] = released\n if dependents[key]:\n num_in_memory -= released - 1\n else:\n age[key] = 0\n releasepressure[key] = num_in_memory\n num_in_memory -= released\n\n rv = {\n key: OrderInfo(\n val, age[key], runpressure[key], releasepressure[key], freed[key]\n )\n for key, val in o.items()\n }\n return rv, pressure", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_array_on_object_dtype_test_tokenize_numpy_array_on_object_dtype.with_dask_config_set_to.with_pytest_raises_Runtim.tokenize_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_numpy_array_on_object_dtype_test_tokenize_numpy_array_on_object_dtype.with_dask_config_set_to.with_pytest_raises_Runtim.tokenize_x_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 130, "end_line": 150, "span_ids": ["test_tokenize_numpy_array_on_object_dtype"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_numpy_array_on_object_dtype():\n a = np.array([\"a\", \"aa\", \"aaa\"], dtype=object)\n assert tokenize(a) == tokenize(a)\n assert tokenize(np.array([\"a\", None, \"aaa\"], dtype=object)) == tokenize(\n np.array([\"a\", None, \"aaa\"], dtype=object)\n )\n assert tokenize(\n np.array([(1, \"a\"), (1, None), (1, \"aaa\")], dtype=object)\n ) == tokenize(np.array([(1, \"a\"), (1, None), (1, \"aaa\")], dtype=object))\n\n # Trigger non-deterministic hashing for object dtype\n class NoPickle:\n pass\n\n x = np.array([\"a\", None, NoPickle], dtype=object)\n assert tokenize(x) != tokenize(x)\n\n with dask.config.set({\"tokenize.ensure-deterministic\": True}):\n with pytest.raises(RuntimeError, match=\"cannot be deterministically hashed\"):\n tokenize(x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_index_test_tokenize_same_repr.assert_tokenize_Foo_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_pandas_index_test_tokenize_same_repr.assert_tokenize_Foo_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 336, "end_line": 361, "span_ids": ["test_tokenize_kwargs", "test_tokenize_same_repr", "test_tokenize_pandas_index"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not pd\")\ndef test_tokenize_pandas_index():\n idx = pd.Index([\"a\", \"b\"])\n assert tokenize(idx) == tokenize(idx)\n\n idx = pd.MultiIndex.from_product([[\"a\", \"b\"], [0, 1]])\n assert tokenize(idx) == tokenize(idx)\n\n\ndef test_tokenize_kwargs():\n assert tokenize(5, x=1) == tokenize(5, x=1)\n assert tokenize(5) != tokenize(5, x=1)\n assert tokenize(5, x=1) != tokenize(5, x=2)\n assert tokenize(5, x=1) != tokenize(5, y=1)\n assert tokenize(5, foo=\"bar\") != tokenize(5, {\"foo\": \"bar\"})\n\n\ndef test_tokenize_same_repr():\n class Foo:\n def __init__(self, x):\n self.x = x\n\n def __repr__(self):\n return \"a foo\"\n\n assert tokenize(Foo(1)) != tokenize(Foo(2))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_method_test_tokenize_method.assert_before_after": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_method_test_tokenize_method.assert_before_after", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 354, "end_line": 374, "span_ids": ["test_tokenize_method"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize_method():\n class Foo:\n def __init__(self, x):\n self.x = x\n\n def __dask_tokenize__(self):\n return self.x\n\n a, b = Foo(1), Foo(2)\n assert tokenize(a) == tokenize(a)\n assert tokenize(a) != tokenize(b)\n\n for ensure in [True, False]:\n with dask.config.set({\"tokenize.ensure-deterministic\": ensure}):\n assert tokenize(a) == tokenize(a)\n\n # dispatch takes precedence\n before = tokenize(a)\n normalize_token.register(Foo, lambda self: self.x + 1)\n after = tokenize(a)\n assert before != after", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_with_recursion_error_test_tokenize_datetime_date.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_object_with_recursion_error_test_tokenize_datetime_date.None_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 474, "end_line": 493, "span_ids": ["test_tokenize_object_with_recursion_error", "test_tokenize_datetime_date"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize_object_with_recursion_error():\n cycle = dict(a=None)\n cycle[\"a\"] = cycle\n\n assert len(tokenize(cycle)) == 32\n\n with dask.config.set({\"tokenize.ensure-deterministic\": True}):\n with pytest.raises(RuntimeError, match=\"cannot be deterministically hashed\"):\n tokenize(cycle)\n\n\ndef test_tokenize_datetime_date():\n # Same date\n assert tokenize(datetime.date(2021, 6, 25)) == tokenize(datetime.date(2021, 6, 25))\n # Different year\n assert tokenize(datetime.date(2021, 6, 25)) != tokenize(datetime.date(2022, 6, 25))\n # Different month\n assert tokenize(datetime.date(2021, 6, 25)) != tokenize(datetime.date(2021, 7, 25))\n # Different day\n assert tokenize(datetime.date(2021, 6, 25)) != tokenize(datetime.date(2021, 6, 26))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_highlevelgraph_test_visualize_highlevelgraph.with_tmpdir_as_d_.assert_isinstance_viz_gr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_visualize_highlevelgraph_test_visualize_highlevelgraph.with_tmpdir_as_d_.assert_isinstance_viz_gr", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1011, "end_line": 1021, "span_ids": ["test_visualize_highlevelgraph"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not da\")\n@pytest.mark.skipif(\n sys.flags.optimize, reason=\"graphviz exception with Python -OO flag\"\n)\ndef test_visualize_highlevelgraph():\n graphviz = pytest.importorskip(\"graphviz\")\n with tmpdir() as d:\n x = da.arange(5, chunks=2)\n viz = x.dask.visualize(filename=os.path.join(d, \"mydask.png\"))\n # check visualization will automatically render in the jupyter notebook\n assert isinstance(viz, graphviz.Digraph)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_raise_get_keyword_test_get_scheduler.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_raise_get_keyword_test_get_scheduler.None_6", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1362, "end_line": 1384, "span_ids": ["test_get_scheduler", "MyExecutor", "test_raise_get_keyword"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_raise_get_keyword():\n x = delayed(inc)(1)\n\n with pytest.raises(TypeError) as info:\n x.compute(get=dask.get)\n\n assert \"scheduler=\" in str(info.value)\n\n\nclass MyExecutor(Executor):\n _max_workers = None\n\n\ndef test_get_scheduler():\n assert get_scheduler() is None\n assert get_scheduler(scheduler=dask.local.get_sync) is dask.local.get_sync\n assert get_scheduler(scheduler=\"threads\") is dask.threaded.get\n assert get_scheduler(scheduler=\"sync\") is dask.local.get_sync\n assert callable(get_scheduler(scheduler=dask.local.synchronous_executor))\n assert callable(get_scheduler(scheduler=MyExecutor()))\n with dask.config.set(scheduler=\"threads\"):\n assert get_scheduler() is dask.threaded.get\n assert get_scheduler() is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_os_test_canonical_name.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_os_test_canonical_name.None_5", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 40, "span_ids": ["imports", "test_canonical_name"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport site\nimport stat\nimport sys\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\n\nimport pytest\nimport yaml\n\nimport dask.config\nfrom dask.config import (\n _get_paths,\n canonical_name,\n collect,\n collect_env,\n collect_yaml,\n config,\n deserialize,\n ensure_file,\n expand_environment_variables,\n get,\n merge,\n refresh,\n rename,\n serialize,\n update,\n update_defaults,\n)\nfrom dask.utils import tmpfile\n\n\ndef test_canonical_name():\n c = {\"foo-bar\": 1, \"fizz_buzz\": 2}\n assert canonical_name(\"foo-bar\", c) == \"foo-bar\"\n assert canonical_name(\"foo_bar\", c) == \"foo-bar\"\n assert canonical_name(\"fizz-buzz\", c) == \"fizz_buzz\"\n assert canonical_name(\"fizz_buzz\", c) == \"fizz_buzz\"\n assert canonical_name(\"new-key\", c) == \"new-key\"\n assert canonical_name(\"new_key\", c) == \"new_key\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_dask_layers_test_dask_layers.explicit_dask_validate_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_dask_layers_test_dask_layers.explicit_dask_validate_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 698, "end_line": 714, "span_ids": ["test_dask_layers"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dask_layers():\n d1 = delayed(1)\n assert d1.dask.layers.keys() == {d1.key}\n assert d1.dask.dependencies == {d1.key: set()}\n assert d1.__dask_layers__() == (d1.key,)\n d2 = modlevel_delayed1(d1)\n assert d2.dask.layers.keys() == {d1.key, d2.key}\n assert d2.dask.dependencies == {d1.key: set(), d2.key: {d1.key}}\n assert d2.__dask_layers__() == (d2.key,)\n\n hlg = HighLevelGraph.from_collections(\"foo\", {\"alias\": d2.key}, dependencies=[d2])\n with pytest.raises(ValueError, match=\"not in\"):\n Delayed(\"alias\", hlg)\n\n explicit = Delayed(\"alias\", hlg, layer=\"foo\")\n assert explicit.__dask_layers__() == (\"foo\",)\n explicit.dask.validate()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_annotations_survive_optimization_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_delayed.py_test_annotations_survive_optimization_", "embedding": null, "metadata": {"file_path": "dask/tests/test_delayed.py", "file_name": "test_delayed.py", "file_type": "text/x-python", "category": "test", "start_line": 717, "end_line": 738, "span_ids": ["test_annotations_survive_optimization"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_annotations_survive_optimization():\n with dask.annotate(foo=\"bar\"):\n graph = HighLevelGraph.from_collections(\n \"b\",\n {\"a\": 1, \"b\": (inc, \"a\"), \"c\": (inc, \"b\")},\n [],\n )\n d = Delayed(\"b\", graph)\n\n assert type(d.dask) is HighLevelGraph\n assert len(d.dask.layers) == 1\n assert len(d.dask.layers[\"b\"]) == 3\n assert d.dask.layers[\"b\"].annotations == {\"foo\": \"bar\"}\n\n # Ensure optimizing a Delayed object returns a HighLevelGraph\n # and doesn't loose annotations\n (d_opt,) = dask.optimize(d)\n assert type(d_opt.dask) is HighLevelGraph\n assert len(d_opt.dask.layers) == 1\n assert len(d_opt.dask.layers[\"b\"]) == 2 # c is culled\n assert d_opt.dask.layers[\"b\"].annotations == {\"foo\": \"bar\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_annotation_pack_unpack_test_annotation_pack_unpack.assert_annotations_w": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_annotation_pack_unpack_test_annotation_pack_unpack.assert_annotations_w", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 629, "end_line": 638, "span_ids": ["test_annotation_pack_unpack"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_annotation_pack_unpack(c, s, a, b):\n hlg = HighLevelGraph({\"l1\": MaterializedLayer({\"n\": 42})}, {\"l1\": set()})\n\n annotations = {\"workers\": (\"alice\",)}\n packed_hlg = hlg.__dask_distributed_pack__(c, [\"n\"], annotations)\n\n unpacked_hlg = HighLevelGraph.__dask_distributed_unpack__(packed_hlg)\n annotations = unpacked_hlg[\"annotations\"]\n assert annotations == {\"workers\": {\"n\": (\"alice\",)}}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_docs.py_from_pathlib_import_Path_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_docs.py_from_pathlib_import_Path_", "embedding": null, "metadata": {"file_path": "dask/tests/test_docs.py", "file_name": "test_docs.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 18, "span_ids": ["imports", "test_development_guidelines_matches_ci"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from pathlib import Path\n\n\ndef test_development_guidelines_matches_ci():\n \"\"\"When the environment.yaml changes in CI, make sure to change it in the docs as well\"\"\"\n root_dir = Path(__file__).parent.parent.parent\n\n development_doc_file = root_dir / \"docs\" / \"source\" / \"develop.rst\"\n additional_ci_file = root_dir / \".github\" / \"workflows\" / \"additional.yml\"\n upstream_ci_file = root_dir / \".github\" / \"workflows\" / \"upstream.yml\"\n latest_env = \"environment-3.9.yaml\"\n\n for filename in [development_doc_file, additional_ci_file, upstream_ci_file]:\n with open(filename, encoding=\"utf8\") as f:\n assert any(\n latest_env in line for line in f\n ), f\"{latest_env} not found in {filename}\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_test_to_graphviz_custom.assert_set_shapes_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_test_to_graphviz_custom.assert_set_shapes_b", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 108, "span_ids": ["test_to_graphviz", "test_to_graphviz_custom"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_graphviz():\n g = to_graphviz(dsk)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 10 # 10 nodes total\n assert set(labels) == {\"c\", \"d\", \"e\", \"f\", '\"\"'}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == {\"box\", \"circle\"}\n\n\ndef test_to_graphviz_custom():\n g = to_graphviz(\n dsk,\n data_attributes={\"a\": {\"shape\": \"square\"}},\n function_attributes={\"c\": {\"label\": \"neg_c\", \"shape\": \"ellipse\"}},\n )\n labels = set(filter(None, map(get_label, g.body)))\n assert labels == {\"neg_c\", \"d\", \"e\", \"f\", '\"\"'}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == {\"box\", \"circle\", \"square\", \"ellipse\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_attributes_test_to_graphviz_collapse_outputs_and_verbose.assert_set_shapes_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_to_graphviz_attributes_test_to_graphviz_collapse_outputs_and_verbose.assert_set_shapes_b", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 149, "span_ids": ["test_to_graphviz_attributes", "test_aliases", "test_to_graphviz_collapse_outputs_and_verbose", "test_to_graphviz_collapse_outputs", "test_to_graphviz_verbose"], "tokens": 470}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_graphviz_attributes():\n assert to_graphviz(dsk).graph_attr[\"rankdir\"] == \"BT\"\n assert to_graphviz(dsk, rankdir=\"LR\").graph_attr[\"rankdir\"] == \"LR\"\n assert to_graphviz(dsk, node_attr={\"color\": \"white\"}).node_attr[\"color\"] == \"white\"\n assert to_graphviz(dsk, edge_attr={\"color\": \"white\"}).edge_attr[\"color\"] == \"white\"\n\n\ndef test_aliases():\n g = to_graphviz({\"x\": 1, \"y\": \"x\"})\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 2\n assert len(g.body) - len(labels) == 1 # Single edge\n\n\ndef test_to_graphviz_verbose():\n g = to_graphviz(dsk, verbose=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 10 # 10 nodes total\n assert set(labels) == {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == {\"box\", \"circle\"}\n\n\ndef test_to_graphviz_collapse_outputs():\n g = to_graphviz(dsk, collapse_outputs=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 6 # 6 nodes total\n assert set(labels) == {\"c\", \"d\", \"e\", \"f\", '\"\"'}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == {\"box\", \"circle\"}\n\n\ndef test_to_graphviz_collapse_outputs_and_verbose():\n g = to_graphviz(dsk, collapse_outputs=True, verbose=True)\n labels = list(filter(None, map(get_label, g.body)))\n assert len(labels) == 6 # 6 nodes total\n assert set(labels) == {\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"}\n shapes = list(filter(None, map(get_shape, g.body)))\n assert set(shapes) == {\"box\", \"circle\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_multiple_annotations_test_multiple_annotations.assert_clayer_annotations": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_multiple_annotations_test_multiple_annotations.assert_clayer_annotations", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 177, "end_line": 194, "span_ids": ["test_multiple_annotations"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_multiple_annotations():\n da = pytest.importorskip(\"dask.array\")\n with dask.annotate(block_id=annot_map_fn):\n with dask.annotate(resources={\"GPU\": 1}):\n A = da.ones((10, 10), chunks=(5, 5))\n\n B = A + 1\n\n C = B + 1\n\n assert dask.config.get(\"annotations\", None) is None\n\n alayer = A.__dask_graph__().layers[A.name]\n blayer = B.__dask_graph__().layers[B.name]\n clayer = C.__dask_graph__().layers[C.name]\n assert alayer.annotations == {\"resources\": {\"GPU\": 1}, \"block_id\": annot_map_fn}\n assert blayer.annotations == {\"block_id\": annot_map_fn}\n assert clayer.annotations is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_annotation_pack_unpack_test_materializedlayer_cull_preserves_annotations.assert_culled_layer_annot": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_annotation_pack_unpack_test_materializedlayer_cull_preserves_annotations.assert_culled_layer_annot", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 197, "end_line": 215, "span_ids": ["test_materializedlayer_cull_preserves_annotations", "test_annotation_pack_unpack"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_annotation_pack_unpack():\n layer = MaterializedLayer({\"n\": 42}, annotations={\"workers\": (\"alice\",)})\n packed_anno = layer.__dask_distributed_annotations_pack__()\n annotations = {}\n Layer.__dask_distributed_annotations_unpack__(\n annotations, packed_anno, layer.keys()\n )\n assert annotations == {\"workers\": {\"n\": (\"alice\",)}}\n\n\ndef test_materializedlayer_cull_preserves_annotations():\n layer = MaterializedLayer(\n {\"a\": 42, \"b\": 3.14},\n annotations={\"foo\": \"bar\"},\n )\n\n culled_layer, _ = layer.cull({\"a\"}, [])\n assert len(culled_layer) == 1\n assert culled_layer.annotations == {\"foo\": \"bar\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_node_tooltips_exist_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_node_tooltips_exist_", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 282, "end_line": 300, "span_ids": ["test_node_tooltips_exist"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_node_tooltips_exist():\n da = pytest.importorskip(\"dask.array\")\n pytest.importorskip(\"graphviz\")\n\n a = da.ones((1000, 1000), chunks=(100, 100))\n b = a + a.T\n c = b.sum(axis=1)\n\n hg = c.dask\n g = to_graphviz(hg)\n\n for layer in g.body:\n if \"label\" in layer:\n assert \"tooltip\" in layer\n start = layer.find('tooltip=\"') + len('tooltip=\"')\n end = layer.find('\"', start)\n tooltip = layer[start:end]\n assert len(tooltip) > 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_pytest_test_start_state.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_pytest_test_start_state.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 31, "span_ids": ["test_start_state", "imports"], "tokens": 304}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nimport dask\nfrom dask.local import finish_task, get_sync, sortkey, start_state_from_dask\nfrom dask.order import order\nfrom dask.utils_test import GetFunctionTestMixin, add, inc\n\nfib_dask = {\"f0\": 0, \"f1\": 1, \"f2\": 1, \"f3\": 2, \"f4\": 3, \"f5\": 5, \"f6\": 8}\n\n\ndef test_start_state():\n dsk = {\"x\": 1, \"y\": 2, \"z\": (inc, \"x\"), \"w\": (add, \"z\", \"y\")}\n result = start_state_from_dask(dsk)\n\n expected = {\n \"cache\": {\"x\": 1, \"y\": 2},\n \"dependencies\": {\n \"w\": {\"y\", \"z\"},\n \"x\": set(),\n \"y\": set(),\n \"z\": {\"x\"},\n },\n \"dependents\": {\"w\": set(), \"x\": {\"z\"}, \"y\": {\"w\"}, \"z\": {\"w\"}},\n \"finished\": set(),\n \"released\": set(),\n \"running\": set(),\n \"ready\": [\"z\"],\n \"waiting\": {\"w\": {\"z\"}},\n \"waiting_data\": {\"x\": {\"z\"}, \"y\": {\"w\"}, \"z\": {\"w\"}},\n }\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_exceptions_propagate_test_ordering.assert_L_sorted_L_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_exceptions_propagate_test_ordering.assert_L_sorted_L_", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 134, "end_line": 170, "span_ids": ["test_exceptions_propagate", "test_ordering"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_exceptions_propagate():\n class MyException(Exception):\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def __str__(self):\n return \"My Exception!\"\n\n def f():\n raise MyException(1, 2)\n\n from dask.threaded import get\n\n try:\n get({\"x\": (f,)}, \"x\")\n assert False\n except MyException as e:\n assert \"My Exception!\" in str(e)\n assert \"a\" in dir(e)\n assert e.a == 1\n assert e.b == 2\n\n\ndef test_ordering():\n L = []\n\n def append(i):\n L.append(i)\n\n dsk = {(\"x\", i): (append, i) for i in range(10)}\n x_keys = sorted(dsk)\n dsk[\"y\"] = (lambda *args: None, list(x_keys))\n\n get_sync(dsk, \"y\")\n\n assert L == sorted(L)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_complex_ordering_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_local.py_test_complex_ordering_", "embedding": null, "metadata": {"file_path": "dask/tests/test_local.py", "file_name": "test_local.py", "file_type": "text/x-python", "category": "test", "start_line": 173, "end_line": 190, "span_ids": ["test_complex_ordering"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_complex_ordering():\n da = pytest.importorskip(\"dask.array\")\n from dask.diagnostics import Callback\n\n actual_order = []\n\n def track_order(key, dask, state):\n actual_order.append(key)\n\n x = da.random.normal(size=(20, 20), chunks=(-1, -1))\n res = (x.dot(x.T) - x.mean(axis=0)).std()\n dsk = dict(res.__dask_graph__())\n exp_order_dict = order(dsk)\n exp_order = sorted(exp_order_dict.keys(), key=exp_order_dict.get)\n with Callback(pretask=track_order):\n get_sync(dsk, exp_order[-1])\n assert actual_order == exp_order", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_ml.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_ml.py__", "embedding": null, "metadata": {"file_path": "dask/tests/test_ml.py", "file_name": "test_ml.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 13, "span_ids": ["test_basic"], "tokens": 91}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_basic():\n try:\n import dask_ml # noqa: F401\n except ImportError:\n try:\n from dask.ml.model_selection import GridSearchCV # noqa: F401\n except ImportError as e:\n assert \"conda install dask-ml\" in str(e)\n else:\n assert False\n else:\n from dask.ml.model_selection import GridSearchCV # noqa: F401", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_base_of_reduce_preferred_test_base_of_reduce_preferred.assert_o_b_1_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_base_of_reduce_preferred_test_base_of_reduce_preferred.assert_o_b_1_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 82, "end_line": 107, "span_ids": ["test_base_of_reduce_preferred"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_base_of_reduce_preferred(abcde):\n r\"\"\"\n a3\n /|\n a2 |\n /| |\n a1 | |\n /| | |\n a0 | | |\n | | | |\n b0 b1 b2 b3\n \\ \\ / /\n c\n\n We really want to run b0 quickly\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {(a, i): (f, (a, i - 1), (b, i)) for i in [1, 2, 3]}\n dsk[(a, 0)] = (f, (b, 0))\n dsk.update({(b, i): (f, c, 1) for i in [0, 1, 2, 3]})\n dsk[c] = 1\n\n o = order(dsk)\n\n assert o[(b, 0)] <= 1\n assert o[(b, 1)] <= 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_array_store_final_order_test_array_store_final_order.assert_connected_max_di": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_array_store_final_order_test_array_store_final_order.assert_connected_max_di", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 853, "end_line": 878, "span_ids": ["test_array_store_final_order"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_store_final_order(tmpdir):\n # https://github.com/dask/dask/issues/6745\n # This essentially tests the same thing as test_terminal_node_backtrack,\n # but with the graph actually generated by da.store.\n da = pytest.importorskip(\"dask.array\")\n zarr = pytest.importorskip(\"zarr\")\n import numpy as np\n\n arrays = [da.from_array(np.ones((110, 4)), chunks=(100, 2)) for i in range(4)]\n x = da.concatenate(arrays, axis=0).rechunk((100, 2))\n\n store = zarr.DirectoryStore(tmpdir)\n root = zarr.group(store, overwrite=True)\n dest = root.empty_like(name=\"dest\", data=x, chunks=x.chunksize, overwrite=True)\n d = x.store(dest, lock=False, compute=False)\n o = order(d.dask)\n\n # Find the lowest store. Dask starts here.\n stores = [k for k in o if isinstance(k, tuple) and k[0].startswith(\"store-map-\")]\n first_store = min(stores, key=lambda k: o[k])\n connected_stores = [k for k in stores if k[-1] == first_store[-1]]\n disconnected_stores = [k for k in stores if k[-1] != first_store[-1]]\n\n connected_max = max(v for k, v in o.items() if k in connected_stores)\n disconnected_min = min(v for k, v in o.items() if k in disconnected_stores)\n assert connected_max < disconnected_min", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_eager_to_compute_dependent_to_free_parent_test_eager_to_compute_dependent_to_free_parent.r_https_github_com_da": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_eager_to_compute_dependent_to_free_parent_test_eager_to_compute_dependent_to_free_parent.r_https_github_com_da", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 881, "end_line": 896, "span_ids": ["test_eager_to_compute_dependent_to_free_parent"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_eager_to_compute_dependent_to_free_parent():\n r\"\"\"https://github.com/dask/dask/pull/7929\n\n This graph begins with many motifs like the following:\n\n | |\n c1 c2\n \\ /\n b\n |\n a\n\n We want to compute c2 and c3 pretty close together, because if we choose to\n compute c1, then we should also compute c2 so we can release b. Being\n greedy here allows us to release memory sooner and be more globally optimal.\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_eager_to_compute_dependent_to_free_parent.dsk_test_eager_to_compute_dependent_to_free_parent.dsk._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_eager_to_compute_dependent_to_free_parent.dsk_test_eager_to_compute_dependent_to_free_parent.dsk._", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 897, "end_line": 970, "span_ids": ["test_eager_to_compute_dependent_to_free_parent"], "tokens": 944}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_eager_to_compute_dependent_to_free_parent():\n dsk = {\n \"a00\": (f, \"a06\", \"a08\"),\n \"a01\": (f, \"a28\", \"a26\"),\n \"a02\": (f, \"a24\", \"a21\"),\n \"a03\": (f, \"a22\", \"a25\"),\n \"a04\": (f, \"a29\", \"a20\"),\n \"a05\": (f, \"a23\", \"a27\"),\n \"a06\": (f, \"a04\", \"a02\"),\n \"a07\": (f, \"a00\", \"a01\"),\n \"a08\": (f, \"a05\", \"a03\"),\n \"a09\": (f, \"a43\"),\n \"a10\": (f, \"a36\"),\n \"a11\": (f, \"a33\"),\n \"a12\": (f, \"a47\"),\n \"a13\": (f, \"a44\"),\n \"a14\": (f, \"a42\"),\n \"a15\": (f, \"a37\"),\n \"a16\": (f, \"a48\"),\n \"a17\": (f, \"a49\"),\n \"a18\": (f, \"a35\"),\n \"a19\": (f, \"a46\"),\n \"a20\": (f, \"a55\"),\n \"a21\": (f, \"a53\"),\n \"a22\": (f, \"a60\"),\n \"a23\": (f, \"a54\"),\n \"a24\": (f, \"a59\"),\n \"a25\": (f, \"a56\"),\n \"a26\": (f, \"a61\"),\n \"a27\": (f, \"a52\"),\n \"a28\": (f, \"a57\"),\n \"a29\": (f, \"a58\"),\n \"a30\": (f, \"a19\"),\n \"a31\": (f, \"a07\"),\n \"a32\": (f, \"a30\", \"a31\"),\n \"a33\": (f, \"a58\"),\n \"a34\": (f, \"a11\", \"a09\"),\n \"a35\": (f, \"a60\"),\n \"a36\": (f, \"a52\"),\n \"a37\": (f, \"a61\"),\n \"a38\": (f, \"a14\", \"a10\"),\n \"a39\": (f, \"a38\", \"a40\"),\n \"a40\": (f, \"a18\", \"a17\"),\n \"a41\": (f, \"a34\", \"a50\"),\n \"a42\": (f, \"a54\"),\n \"a43\": (f, \"a55\"),\n \"a44\": (f, \"a53\"),\n \"a45\": (f, \"a16\", \"a15\"),\n \"a46\": (f, \"a51\", \"a45\"),\n \"a47\": (f, \"a59\"),\n \"a48\": (f, \"a57\"),\n \"a49\": (f, \"a56\"),\n \"a50\": (f, \"a12\", \"a13\"),\n \"a51\": (f, \"a41\", \"a39\"),\n \"a52\": (f, \"a62\"),\n \"a53\": (f, \"a68\"),\n \"a54\": (f, \"a70\"),\n \"a55\": (f, \"a67\"),\n \"a56\": (f, \"a71\"),\n \"a57\": (f, \"a64\"),\n \"a58\": (f, \"a65\"),\n \"a59\": (f, \"a63\"),\n \"a60\": (f, \"a69\"),\n \"a61\": (f, \"a66\"),\n \"a62\": (f, f),\n \"a63\": (f, f),\n \"a64\": (f, f),\n \"a65\": (f, f),\n \"a66\": (f, f),\n \"a67\": (f, f),\n \"a68\": (f, f),\n \"a69\": (f, f),\n \"a70\": (f, f),\n \"a71\": (f, f),\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_eager_to_compute_dependent_to_free_parent.dependencies_dependents__test_eager_to_compute_dependent_to_free_parent.assert_sum_costs_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_eager_to_compute_dependent_to_free_parent.dependencies_dependents__test_eager_to_compute_dependent_to_free_parent.assert_sum_costs_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 971, "end_line": 982, "span_ids": ["test_eager_to_compute_dependent_to_free_parent"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_eager_to_compute_dependent_to_free_parent():\n # ... other code\n dependencies, dependents = get_deps(dsk)\n o = order(dsk)\n parents = {deps.pop() for key, deps in dependents.items() if not dependencies[key]}\n\n def cost(deps):\n a, b = deps\n return abs(o[a] - o[b])\n\n cost_of_pairs = {key: cost(dependents[key]) for key in parents}\n # Allow one to be bad, b/c this is hard!\n costs = sorted(cost_of_pairs.values())\n assert sum(costs[:-1]) <= 25 or sum(costs) <= 31", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_diagnostics_test_diagnostics.assert_key_val_num_depe": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_diagnostics_test_diagnostics.assert_key_val_num_depe", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 985, "end_line": 1043, "span_ids": ["test_diagnostics"], "tokens": 681}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diagnostics(abcde):\n r\"\"\"\n a1 b1 c2 d1 e1\n /|\\ /|\\ /|\\ /| /\n / | X | X | X | /\n / |/ \\|/ \\|/ \\|/\n a0 b0 c0 d0 e0\n \"\"\"\n a, b, c, d, e = abcde\n dsk = {\n (a, 0): (f,),\n (b, 0): (f,),\n (c, 0): (f,),\n (d, 0): (f,),\n (e, 0): (f,),\n (a, 1): (f, (a, 0), (b, 0), (c, 0)),\n (b, 1): (f, (b, 0), (c, 0), (d, 0)),\n (c, 1): (f, (c, 0), (d, 0), (e, 0)),\n (d, 1): (f, (d, 0), (e, 0)),\n (e, 1): (f, (e, 0)),\n }\n info, memory_over_time = diagnostics(dsk)\n assert memory_over_time == [0, 1, 2, 3, 2, 3, 2, 3, 2, 1]\n assert {key: val.order for key, val in info.items()} == {\n (a, 0): 0,\n (b, 0): 1,\n (c, 0): 2,\n (d, 0): 4,\n (e, 0): 6,\n (a, 1): 3,\n (b, 1): 5,\n (c, 1): 7,\n (d, 1): 8,\n (e, 1): 9,\n }\n assert {key: val.age for key, val in info.items()} == {\n (a, 0): 3,\n (b, 0): 4,\n (c, 0): 5,\n (d, 0): 4,\n (e, 0): 3,\n (a, 1): 0,\n (b, 1): 0,\n (c, 1): 0,\n (d, 1): 0,\n (e, 1): 0,\n }\n assert {key: val.num_dependencies_freed for key, val in info.items()} == {\n (a, 0): 0,\n (b, 0): 0,\n (c, 0): 0,\n (d, 0): 0,\n (e, 0): 0,\n (a, 1): 1,\n (b, 1): 1,\n (c, 1): 1,\n (d, 1): 1,\n (e, 1): 1,\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_diagnostics.assert_key_val_num_data_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_order.py_test_diagnostics.assert_key_val_num_data_", "embedding": null, "metadata": {"file_path": "dask/tests/test_order.py", "file_name": "test_order.py", "file_type": "text/x-python", "category": "test", "start_line": 1044, "end_line": 1068, "span_ids": ["test_diagnostics"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diagnostics(abcde):\n # ... other code\n assert {key: val.num_data_when_run for key, val in info.items()} == {\n (a, 0): 0,\n (b, 0): 1,\n (c, 0): 2,\n (d, 0): 2,\n (e, 0): 2,\n (a, 1): 3,\n (b, 1): 3,\n (c, 1): 3,\n (d, 1): 2,\n (e, 1): 1,\n }\n assert {key: val.num_data_when_released for key, val in info.items()} == {\n (a, 0): 3,\n (b, 0): 3,\n (c, 0): 3,\n (d, 0): 2,\n (e, 0): 1,\n (a, 1): 3,\n (b, 1): 3,\n (c, 1): 3,\n (d, 1): 2,\n (e, 1): 1,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_lazy_walks_mro_test_dispatch_lazy_walks_mro.assert_foo_Eager_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_dispatch_lazy_walks_mro_test_dispatch_lazy_walks_mro.assert_foo_Eager_1_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 164, "end_line": 193, "span_ids": ["test_dispatch_lazy_walks_mro"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dispatch_lazy_walks_mro():\n \"\"\"Check that subclasses of classes with lazily registered handlers still\n use their parent class's handler by default\"\"\"\n import decimal\n\n class Lazy(decimal.Decimal):\n pass\n\n class Eager(Lazy):\n pass\n\n foo = Dispatch()\n\n @foo.register(Eager)\n def eager_handler(x):\n return \"eager\"\n\n def lazy_handler(a):\n return \"lazy\"\n\n @foo.register_lazy(\"decimal\")\n def register_decimal():\n foo.register(decimal.Decimal, lazy_handler)\n\n assert foo.dispatch(Lazy) == lazy_handler\n assert foo(Lazy(1)) == \"lazy\"\n assert foo.dispatch(decimal.Decimal) == lazy_handler\n assert foo(decimal.Decimal(1)) == \"lazy\"\n assert foo.dispatch(Eager) == eager_handler\n assert foo(Eager(1)) == \"eager\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils_test.py_test_hlg_layer_topological_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils_test.py_test_hlg_layer_topological_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils_test.py", "file_name": "test_utils_test.py", "file_type": "text/x-python", "category": "test", "start_line": 23, "end_line": 49, "span_ids": ["test_hlg_layer_topological", "test__check_warning"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hlg_layer_topological():\n a = {\"x\": 1}\n b = {\"y\": (utils_test.inc, \"x\")}\n c = {\"z\": (utils_test.inc, \"x\")}\n d = {\"r\": (sum, [\"y\", \"z\"])}\n layers = {\"a\": a, \"b\": b, \"c\": c, \"d\": d}\n dependencies = {\"a\": set(), \"b\": {\"a\"}, \"c\": {\"a\"}, \"d\": {\"b\", \"c\"}}\n hg = HighLevelGraph(layers, dependencies)\n\n assert utils_test.hlg_layer_topological(hg, -1) is hg.layers[\"d\"]\n assert utils_test.hlg_layer_topological(hg, 0) is hg.layers[\"a\"]\n assert utils_test.hlg_layer_topological(hg, 1) in (hg.layers[\"b\"], hg.layers[\"c\"])\n\n\ndef test__check_warning():\n class MyWarning(Warning):\n pass\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\")\n with _check_warning(True, MyWarning, \"foo\"):\n warnings.warn(\"foo\", MyWarning)\n\n with pytest.warns(MyWarning, match=\"foo\"):\n with _check_warning(False, MyWarning, \"foo\"):\n warnings.warn(\"foo\", MyWarning)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_from___future___import_an_apply.if_kwargs_.else_.return.func_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_from___future___import_an_apply.if_kwargs_.else_.return.func_args_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 41, "span_ids": ["apply", "imports"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport functools\nimport inspect\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport uuid\nimport warnings\nfrom collections.abc import Hashable, Iterable, Iterator, Mapping\nfrom contextlib import contextmanager, nullcontext, suppress\nfrom datetime import datetime, timedelta\nfrom errno import ENOENT\nfrom functools import lru_cache\nfrom importlib import import_module\nfrom numbers import Integral, Number\nfrom operator import add\nfrom threading import Lock\nfrom typing import ClassVar, TypeVar\nfrom weakref import WeakValueDictionary\n\nimport tlz as toolz\n\nfrom .core import get_deps\n\nK = TypeVar(\"K\")\nV = TypeVar(\"V\")\n\n\nsystem_encoding = sys.getdefaultencoding()\nif system_encoding == \"ascii\":\n system_encoding = \"utf-8\"\n\n\ndef apply(func, args, kwargs=None):\n if kwargs:\n return func(*args, **kwargs)\n else:\n return func(*args)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_homogeneous_deepmap_import_required.try_.except_ImportError_as_e_.raise_RuntimeError_error_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_homogeneous_deepmap_import_required.try_.except_ImportError_as_e_.raise_RuntimeError_error_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 164, "span_ids": ["import_required", "homogeneous_deepmap", "ndeepmap"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@_deprecated()\ndef homogeneous_deepmap(func, seq):\n if not seq:\n return seq\n n = 0\n tmp = seq\n while isinstance(tmp, list):\n n += 1\n tmp = tmp[0]\n\n return ndeepmap(n, func, seq)\n\n\ndef ndeepmap(n, func, seq):\n \"\"\"Call a function on every element within a nested container\n\n >>> def inc(x):\n ... return x + 1\n >>> L = [[1, 2], [3, 4, 5]]\n >>> ndeepmap(2, inc, L)\n [[2, 3], [4, 5, 6]]\n \"\"\"\n if n == 1:\n return [func(item) for item in seq]\n elif n > 1:\n return [ndeepmap(n - 1, func, item) for item in seq]\n elif isinstance(seq, list):\n return func(seq[0])\n else:\n return func(seq)\n\n\ndef import_required(mod_name, error_msg):\n \"\"\"Attempt to import a required dependency.\n\n Raises a RuntimeError if the requested module is not available.\n \"\"\"\n try:\n return import_module(mod_name)\n except ImportError as e:\n raise RuntimeError(error_msg) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_tmpfile_tmpfile.try_.finally_.if_os_path_exists_filenam.with_suppress_OSError_.if_os_path_isdir_filename.else_.os_remove_filename_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_tmpfile_tmpfile.try_.finally_.if_os_path_exists_filenam.with_suppress_OSError_.if_os_path_isdir_filename.else_.os_remove_filename_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 176, "end_line": 216, "span_ids": ["tmpfile"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef tmpfile(extension=\"\", dir=None):\n \"\"\"\n Function to create and return a unique temporary file with the given extension, if provided.\n\n Parameters\n ----------\n extension : str\n The extension of the temporary file to be created\n dir : str\n If ``dir`` is not None, the file will be created in that directory; otherwise,\n Python's default temporary directory is used.\n\n Returns\n -------\n out : str\n Path to the temporary file\n\n See Also\n --------\n NamedTemporaryFile : Built-in alternative for creating temporary files\n tmp_path : pytest fixture for creating a temporary directory unique to the test invocation\n\n Notes\n -----\n This context manager is particularly useful on Windows for opening temporary files multiple times.\n \"\"\"\n extension = \".\" + extension.lstrip(\".\")\n handle, filename = tempfile.mkstemp(extension, dir=dir)\n os.close(handle)\n os.remove(filename)\n\n try:\n yield filename\n finally:\n if os.path.exists(filename):\n with suppress(OSError): # sometimes we can't remove a generated temp file\n if os.path.isdir(filename):\n shutil.rmtree(filename)\n else:\n os.remove(filename)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_tmpdir_tmpdir.try_.finally_.if_os_path_exists_dirname.if_os_path_isdir_dirname_.else_.with_suppress_OSError_.os_remove_dirname_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_tmpdir_tmpdir.try_.finally_.if_os_path_exists_dirname.if_os_path_isdir_dirname_.else_.with_suppress_OSError_.os_remove_dirname_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 219, "end_line": 250, "span_ids": ["tmpdir"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef tmpdir(dir=None):\n \"\"\"\n Function to create and return a unique temporary directory.\n\n Parameters\n ----------\n dir : str\n If ``dir`` is not None, the directory will be created in that directory; otherwise,\n Python's default temporary directory is used.\n\n Returns\n -------\n out : str\n Path to the temporary directory\n\n Notes\n -----\n This context manager is particularly useful on Windows for opening temporary directories multiple times.\n \"\"\"\n dirname = tempfile.mkdtemp(dir=dir)\n\n try:\n yield dirname\n finally:\n if os.path.exists(dirname):\n if os.path.isdir(dirname):\n with suppress(OSError):\n shutil.rmtree(dirname)\n else:\n with suppress(OSError):\n os.remove(dirname)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_filetext_IndexCallable.__getitem__.return.self_fn_key_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_filetext_IndexCallable.__getitem__.return.self_fn_key_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 244, "end_line": 293, "span_ids": ["IndexCallable", "filetext", "IndexCallable.__init__", "IndexCallable.__getitem__", "changed_cwd", "tmp_cwd"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef filetext(text, extension=\"\", open=open, mode=\"w\"):\n with tmpfile(extension=extension) as filename:\n f = open(filename, mode=mode)\n try:\n f.write(text)\n finally:\n try:\n f.close()\n except AttributeError:\n pass\n\n yield filename\n\n\n@contextmanager\ndef changed_cwd(new_cwd):\n old_cwd = os.getcwd()\n os.chdir(new_cwd)\n try:\n yield\n finally:\n os.chdir(old_cwd)\n\n\n@contextmanager\ndef tmp_cwd(dir=None):\n with tmpdir(dir) as dirname:\n with changed_cwd(dirname):\n yield dirname\n\n\nclass IndexCallable:\n \"\"\"Provide getitem syntax for functions\n\n >>> def inc(x):\n ... return x + 1\n\n >>> I = IndexCallable(inc)\n >>> I[3]\n 4\n \"\"\"\n\n __slots__ = (\"fn\",)\n\n def __init__(self, fn):\n self.fn = fn\n\n def __getitem__(self, key):\n return self.fn(key)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from_derived_from.ua_args.ua_args_or_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_derived_from_derived_from.ua_args.ua_args_or_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 779, "end_line": 800, "span_ids": ["derived_from"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def derived_from(original_klass, version=None, ua_args=None, skipblocks=0):\n \"\"\"Decorator to attach original class's docstring to the wrapped method.\n\n The output structure will be: top line of docstring, disclaimer about this\n being auto-derived, any extra text associated with the method being patched,\n the body of the docstring and finally, the list of keywords that exist in\n the original method but not in the dask version.\n\n Parameters\n ----------\n original_klass: type\n Original class which the method is derived from\n version : str\n Original package version which supports the wrapped method\n ua_args : list\n List of keywords which Dask doesn't support. Keywords existing in\n original but not in Dask will automatically be added.\n skipblocks : int\n How many text blocks (paragraphs) to skip from the start of the\n docstring. Useful for cases where the target has extra front-matter.\n \"\"\"\n ua_args = ua_args or []\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_typename_typename.try_.except_AttributeError_.return.str_typ_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_typename_typename.try_.except_AttributeError_.return.str_typ_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 862, "end_line": 889, "span_ids": ["typename"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def typename(typ, short=False):\n \"\"\"\n Return the name of a type\n\n Examples\n --------\n >>> typename(int)\n 'int'\n\n >>> from dask.core import literal\n >>> typename(literal)\n 'dask.core.literal'\n >>> typename(literal, short=True)\n 'dask.literal'\n \"\"\"\n if not isinstance(typ, type):\n return typename(type(typ))\n try:\n if not typ.__module__ or typ.__module__ == \"builtins\":\n return typ.__name__\n else:\n if short:\n module, *_ = typ.__module__.split(\".\")\n else:\n module = typ.__module__\n return module + \".\" + typ.__name__\n except AttributeError:\n return str(typ)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ensure_bytes_memory_repr.for_x_in_bytes_KB_.num_1024_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_ensure_bytes_memory_repr.for_x_in_bytes_KB_.num_1024_0", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 882, "end_line": 959, "span_ids": ["memory_repr", "ensure_bytes", "ensure_unicode", "insert", "digit", "dependency_depth"], "tokens": 477}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ensure_bytes(s):\n \"\"\"Turn string or bytes to bytes\n\n >>> ensure_bytes('123')\n b'123'\n >>> ensure_bytes('123')\n b'123'\n >>> ensure_bytes(b'123')\n b'123'\n \"\"\"\n if isinstance(s, bytes):\n return s\n if hasattr(s, \"encode\"):\n return s.encode()\n msg = \"Object %s is neither a bytes object nor has an encode method\"\n raise TypeError(msg % s)\n\n\ndef ensure_unicode(s):\n \"\"\"Turn string or bytes to bytes\n\n >>> ensure_unicode('123')\n '123'\n >>> ensure_unicode(b'123')\n '123'\n \"\"\"\n if isinstance(s, str):\n return s\n if hasattr(s, \"decode\"):\n return s.decode()\n raise TypeError(f\"Object {s} is neither a str object nor has an decode method\")\n\n\ndef digit(n, k, base):\n \"\"\"\n\n >>> digit(1234, 0, 10)\n 4\n >>> digit(1234, 1, 10)\n 3\n >>> digit(1234, 2, 10)\n 2\n >>> digit(1234, 3, 10)\n 1\n \"\"\"\n return n // base**k % base\n\n\ndef insert(tup, loc, val):\n \"\"\"\n\n >>> insert(('a', 'b', 'c'), 0, 'x')\n ('x', 'b', 'c')\n \"\"\"\n L = list(tup)\n L[loc] = val\n return tuple(L)\n\n\ndef dependency_depth(dsk):\n deps, _ = get_deps(dsk)\n\n @lru_cache(maxsize=None)\n def max_depth_by_deps(key):\n if not deps[key]:\n return 1\n\n d = 1 + max(max_depth_by_deps(dep_key) for dep_key in deps[key])\n return d\n\n return max(max_depth_by_deps(dep_key) for dep_key in deps.keys())\n\n\ndef memory_repr(num):\n for x in [\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\"]:\n if num < 1024.0:\n return f\"{num:3.1f} {x}\"\n num /= 1024.0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_dataframe_like_is_dataframe_like.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_dataframe_like_is_dataframe_like.return._", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1270, "end_line": 1283, "span_ids": ["is_dataframe_like"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def is_dataframe_like(df):\n \"\"\"Looks like a Pandas DataFrame\"\"\"\n if (df.__class__.__module__, df.__class__.__name__) == (\n \"pandas.core.frame\",\n \"DataFrame\",\n ):\n # fast exec for most likely input\n return True\n typ = df.__class__\n return (\n all(hasattr(typ, name) for name in (\"groupby\", \"head\", \"merge\", \"mean\"))\n and all(hasattr(df, name) for name in (\"dtypes\", \"columns\"))\n and not any(hasattr(typ, name) for name in (\"name\", \"dtype\"))\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_series_like_is_cupy_type.return._cupy_in_str_type_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_is_series_like_is_cupy_type.return._cupy_in_str_type_x_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1286, "end_line": 1307, "span_ids": ["is_series_like", "is_index_like", "is_cupy_type"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def is_series_like(s):\n \"\"\"Looks like a Pandas Series\"\"\"\n typ = s.__class__\n return (\n all(hasattr(typ, name) for name in (\"groupby\", \"head\", \"mean\"))\n and all(hasattr(s, name) for name in (\"dtype\", \"name\"))\n and \"index\" not in typ.__name__.lower()\n )\n\n\ndef is_index_like(s):\n \"\"\"Looks like a Pandas Index\"\"\"\n typ = s.__class__\n return (\n all(hasattr(s, name) for name in (\"name\", \"dtype\"))\n and \"index\" in typ.__name__.lower()\n )\n\n\ndef is_cupy_type(x):\n # TODO: avoid explicit reference to CuPy\n return \"cupy\" in str(type(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_from___future___import_an_GetFunctionTestMixin.test_get_with_list.assert_self_get_d_z_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_from___future___import_an_GetFunctionTestMixin.test_get_with_list.assert_self_get_d_z_", "embedding": null, "metadata": {"file_path": "dask/utils_test.py", "file_name": "utils_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 84, "span_ids": ["GetFunctionTestMixin.test_get", "slowadd", "inc", "imports", "GetFunctionTestMixin.test_badkey", "GetFunctionTestMixin.test_data_not_in_dict_is_ok", "GetFunctionTestMixin.test_nested_badkey", "dec", "add", "GetFunctionTestMixin.test_get_with_list", "GetFunctionTestMixin"], "tokens": 621}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport contextlib\nimport importlib\nimport time\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from .highlevelgraph import HighLevelGraph, Layer\n\n\ndef inc(x):\n return x + 1\n\n\ndef dec(x):\n return x - 1\n\n\ndef add(x, y):\n return x + y\n\n\ndef slowadd(a, b, delay=0.1):\n time.sleep(delay)\n return a + b\n\n\nclass GetFunctionTestMixin:\n \"\"\"\n The GetFunctionTestCase class can be imported and used to test foreign\n implementations of the `get` function specification. It aims to enforce all\n known expectations of `get` functions.\n\n To use the class, inherit from it and override the `get` function. For\n example:\n\n > from dask.utils_test import GetFunctionTestMixin\n > class TestCustomGet(GetFunctionTestMixin):\n get = staticmethod(myget)\n\n Note that the foreign `myget` function has to be explicitly decorated as a\n staticmethod.\n \"\"\"\n\n def test_get(self):\n d = {\":x\": 1, \":y\": (inc, \":x\"), \":z\": (add, \":x\", \":y\")}\n\n assert self.get(d, \":x\") == 1\n assert self.get(d, \":y\") == 2\n assert self.get(d, \":z\") == 3\n\n def test_badkey(self):\n d = {\":x\": 1, \":y\": (inc, \":x\"), \":z\": (add, \":x\", \":y\")}\n try:\n result = self.get(d, \"badkey\")\n except KeyError:\n pass\n else:\n msg = \"Expected `{}` with badkey to raise KeyError.\\n\"\n msg += f\"Obtained '{result}' instead.\"\n assert False, msg.format(self.get.__name__)\n\n def test_nested_badkey(self):\n d = {\"x\": 1, \"y\": 2, \"z\": (sum, [\"x\", \"y\"])}\n\n try:\n result = self.get(d, [[\"badkey\"], \"y\"])\n except KeyError:\n pass\n else:\n msg = \"Expected `{}` with badkey to raise KeyError.\\n\"\n msg += f\"Obtained '{result}' instead.\"\n assert False, msg.format(self.get.__name__)\n\n def test_data_not_in_dict_is_ok(self):\n d = {\"x\": 1, \"y\": (add, \"x\", 10)}\n assert self.get(d, \"y\") == 11\n\n def test_get_with_list(self):\n d = {\"x\": 1, \"y\": 2, \"z\": (sum, [\"x\", \"y\"])}\n\n assert self.get(d, [\"x\", \"y\"]) == (1, 2)\n assert self.get(d, \"z\") == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_nested_list_GetFunctionTestMixin.test_with_HighLevelGraph.assert_self_get_graph_z": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_GetFunctionTestMixin.test_get_with_nested_list_GetFunctionTestMixin.test_with_HighLevelGraph.assert_self_get_graph_z", "embedding": null, "metadata": {"file_path": "dask/utils_test.py", "file_name": "utils_test.py", "file_type": "text/x-python", "category": "test", "start_line": 95, "end_line": 123, "span_ids": ["GetFunctionTestMixin.test_get_with_nested_list", "GetFunctionTestMixin.test_with_HighLevelGraph", "GetFunctionTestMixin.test_get_works_with_unhashables_in_values", "GetFunctionTestMixin.test_nested_tasks", "GetFunctionTestMixin.test_get_stack_limit"], "tokens": 354}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class GetFunctionTestMixin:\n\n def test_get_with_nested_list(self):\n d = {\"x\": 1, \"y\": 2, \"z\": (sum, [\"x\", \"y\"])}\n\n assert self.get(d, [[\"x\"], \"y\"]) == ((1,), 2)\n assert self.get(d, \"z\") == 3\n\n def test_get_works_with_unhashables_in_values(self):\n f = lambda x, y: x + len(y)\n d = {\"x\": 1, \"y\": (f, \"x\", {1})}\n\n assert self.get(d, \"y\") == 2\n\n def test_nested_tasks(self):\n d = {\"x\": 1, \"y\": (inc, \"x\"), \"z\": (add, (inc, \"x\"), \"y\")}\n\n assert self.get(d, \"z\") == 4\n\n def test_get_stack_limit(self):\n d = {\"x%d\" % (i + 1): (inc, \"x%d\" % i) for i in range(10000)}\n d[\"x0\"] = 0\n assert self.get(d, \"x10000\") == 10000\n\n def test_with_HighLevelGraph(self):\n from .highlevelgraph import HighLevelGraph\n\n layers = {\"a\": {\"x\": 1, \"y\": (inc, \"x\")}, \"b\": {\"z\": (add, (inc, \"x\"), \"y\")}}\n dependencies = {\"a\": (), \"b\": {\"a\"}}\n graph = HighLevelGraph(layers, dependencies)\n assert self.get(graph, \"z\") == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_import_or_none_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils_test.py_import_or_none_", "embedding": null, "metadata": {"file_path": "dask/utils_test.py", "file_name": "utils_test.py", "file_type": "text/x-python", "category": "test", "start_line": 133, "end_line": 165, "span_ids": ["hlg_layer", "hlg_layer_topological", "import_or_none", "_check_warning"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def import_or_none(name):\n \"\"\"Import a module and return it; in case of failure; return None\"\"\"\n try:\n return importlib.import_module(name)\n except (ImportError, AttributeError):\n return None\n\n\ndef hlg_layer(hlg: HighLevelGraph, prefix: str) -> Layer:\n \"Get the first layer from a HighLevelGraph whose name starts with a prefix\"\n for key, lyr in hlg.layers.items():\n if key.startswith(prefix):\n return lyr\n raise KeyError(f\"No layer starts with {prefix!r}: {list(hlg.layers)}\")\n\n\ndef hlg_layer_topological(hlg: HighLevelGraph, i: int) -> Layer:\n \"Get the layer from a HighLevelGraph at position ``i``, topologically\"\n return hlg.layers[hlg._toposort_layers()[i]]\n\n\n@contextlib.contextmanager\ndef _check_warning(condition: bool, category: type[Warning], message: str):\n \"\"\"Conditionally check if a warning is raised\"\"\"\n if condition:\n import pytest\n\n with pytest.warns(category, match=message) as ctx:\n yield ctx\n else:\n with contextlib.nullcontext() as ctx:\n yield ctx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/widgets/__init__.py_try__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/widgets/__init__.py_try__", "embedding": null, "metadata": {"file_path": "dask/widgets/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["impl"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "try:\n from .widgets import FILTERS, TEMPLATE_PATHS, get_environment, get_template\n\nexcept ImportError as e:\n msg = (\n \"Dask diagnostics requirements are not installed.\\n\\n\"\n \"Please either conda or pip install as follows:\\n\\n\"\n \" conda install dask # either conda install\\n\"\n ' python -m pip install \"dask[diagnostics]\" --upgrade # or python -m pip install'\n )\n exception = e # Explicit reference for e as it will be lost outside the try block\n FILTERS = {}\n TEMPLATE_PATHS = []\n\n def get_environment():\n raise ImportError(msg) from exception\n\n def get_template(name: str):\n raise ImportError(msg) from exception", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/widgets/tests/test_widgets.py_os.path_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/widgets/tests/test_widgets.py_os.path_", "embedding": null, "metadata": {"file_path": "dask/widgets/tests/test_widgets.py", "file_name": "test_widgets.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 47, "span_ids": ["test_filters", "imports", "setup_testing", "test_unknown_template", "test_widgets", "test_environment"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os.path\n\nimport pytest\n\njinja2 = pytest.importorskip(\"jinja2\")\n\nfrom dask.utils import format_bytes\nfrom dask.widgets import FILTERS, TEMPLATE_PATHS, get_environment, get_template\n\n\n@pytest.fixture(autouse=True)\ndef setup_testing():\n TEMPLATE_PATHS.append(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"templates\")\n )\n FILTERS[\"custom_filter\"] = lambda x: \"baz\"\n\n\ndef test_widgets():\n template = get_template(\"example.html.j2\")\n assert isinstance(template, jinja2.Template)\n rendered = template.render(foo=\"bar\")\n assert \"Hello bar\" in rendered\n\n\ndef test_environment():\n environment = get_environment()\n assert isinstance(environment, jinja2.Environment)\n\n\ndef test_unknown_template():\n with pytest.raises(jinja2.TemplateNotFound) as e:\n get_template(\"does_not_exist.html.j2\")\n\n # The error should contain all the registered template directories to help the user\n # understand where jinja2 is looking. Including the one we registered in the fixture.\n assert os.path.dirname(os.path.abspath(__file__)) in str(e)\n\n\ndef test_filters():\n template = get_template(\"bytes.html.j2\")\n assert format_bytes in FILTERS.values()\n assert format_bytes(2e9) in template.render(foo=2e9)\n\n template = get_template(\"custom_filter.html.j2\")\n assert \"baz\" in template.render(foo=None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/widgets/widgets.py_datetime_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/widgets/widgets.py_datetime_", "embedding": null, "metadata": {"file_path": "dask/widgets/widgets.py", "file_name": "widgets.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 39, "span_ids": ["imports", "get_template", "get_environment"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import datetime\nimport html\nimport os.path\n\nfrom jinja2 import Environment, FileSystemLoader, Template\nfrom jinja2.exceptions import TemplateNotFound\n\nfrom ..utils import format_bytes, format_time, format_time_ago, key_split, typename\n\nFILTERS = {\n \"datetime_from_timestamp\": datetime.datetime.fromtimestamp,\n \"format_bytes\": format_bytes,\n \"format_time\": format_time,\n \"format_time_ago\": format_time_ago,\n \"html_escape\": html.escape,\n \"key_split\": key_split,\n \"type\": type,\n \"typename\": typename,\n}\n\nTEMPLATE_PATHS = [os.path.join(os.path.dirname(os.path.abspath(__file__)), \"templates\")]\n\n\ndef get_environment() -> Environment:\n loader = FileSystemLoader(TEMPLATE_PATHS)\n environment = Environment(loader=loader)\n environment.filters.update(FILTERS)\n\n return environment\n\n\ndef get_template(name: str) -> Template:\n try:\n return get_environment().get_template(name)\n except TemplateNotFound as e:\n raise TemplateNotFound(\n f\"Unable to find {name} in dask.widgets.TEMPLATE_PATHS {TEMPLATE_PATHS}\"\n ) from e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_texinfo_documents__https_tech_signavio_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_texinfo_documents__https_tech_signavio_c", "embedding": null, "metadata": {"file_path": "docs/source/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 243, "end_line": 337, "span_ids": ["docstring:137", "docstring:103"], "tokens": 691}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "texinfo_documents = [\n (\n master_doc,\n \"Dask\",\n \"dask Documentation\",\n \"Dask Development Team\",\n \"Dask\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = \"Dask\"\nepub_author = \"Dask Development Team\"\nepub_publisher = \"Anaconda Inc\"\nepub_copyright = \"2014-2018, Anaconda, Inc. and contributors\"\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n# epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n# epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n# epub_identifier = ''\n\n# A unique identification for the text.\n# epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n# epub_cover = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_pre_files = []\n\n# HTML files that should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n# epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n# epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n# epub_tocdup = True\n\nextlinks = {\n \"issue\": (\"https://github.com/dask/dask/issues/%s\", \"GH#\"),\n \"pr\": (\"https://github.com/dask/dask/pull/%s\", \"GH#\"),\n}\n\n# --Options for sphinx extensions -----------------------------------------------\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"pandas\": (\n \"https://pandas.pydata.org/pandas-docs/stable/\",\n \"https://pandas.pydata.org/pandas-docs/stable/objects.inv\",\n ),\n \"numpy\": (\n \"https://numpy.org/doc/stable/\",\n \"https://numpy.org/doc/stable/objects.inv\",\n ),\n \"asyncssh\": (\n \"https://asyncssh.readthedocs.io/en/latest/\",\n \"https://asyncssh.readthedocs.io/en/latest/objects.inv\",\n ),\n \"pyarrow\": (\"https://arrow.apache.org/docs/\", None),\n \"zarr\": (\n \"https://zarr.readthedocs.io/en/latest/\",\n \"https://zarr.readthedocs.io/en/latest/objects.inv\",\n ),\n \"skimage\": (\"https://scikit-image.org/docs/dev/\", None),\n}\n\n# Redirects\n# https://tech.signavio.com/2017/managing-sphinx-redirects", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_argtopk.return.np_take_along_axis_a_idx": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/chunk.py_argtopk_argtopk.return.np_take_along_axis_a_idx", "embedding": null, "metadata": {"file_path": "dask/array/chunk.py", "file_name": "chunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 205, "end_line": 231, "span_ids": ["argtopk"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def argtopk(a_plus_idx, k, axis, keepdims):\n \"\"\"Chunk and combine function of argtopk\n\n Extract the indices of the k largest elements from a on the given axis.\n If k is negative, extract the indices of the -k smallest elements instead.\n Note that, unlike in the parent function, the returned elements\n are not sorted internally.\n \"\"\"\n assert keepdims is True\n axis = axis[0]\n\n if isinstance(a_plus_idx, list):\n a_plus_idx = list(flatten(a_plus_idx))\n a = np.concatenate([ai for ai, _ in a_plus_idx], axis)\n idx = np.concatenate(\n [np.broadcast_to(idxi, ai.shape) for ai, idxi in a_plus_idx], axis\n )\n else:\n a, idx = a_plus_idx\n\n if abs(k) >= a.shape[axis]:\n return a_plus_idx\n\n idx2 = np.argpartition(a, -k, axis=axis)\n k_slice = slice(-k, None) if k > 0 else slice(-k)\n idx2 = idx2[tuple(k_slice if i == axis else slice(None) for i in range(a.ndim))]\n return np.take_along_axis(a, idx2, axis), np.take_along_axis(idx, idx2, axis)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_graph_from_arraylike_graph_from_arraylike.if_inline_array_.else_.return.HighLevelGraph_layers_de": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_graph_from_arraylike_graph_from_arraylike.if_inline_array_.else_.return.HighLevelGraph_layers_de", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 238, "end_line": 319, "span_ids": ["graph_from_arraylike"], "tokens": 749}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def graph_from_arraylike(\n arr, # Any array-like which supports slicing\n chunks,\n shape,\n name,\n getitem=getter,\n lock=False,\n asarray=True,\n dtype=None,\n inline_array=False,\n) -> HighLevelGraph:\n \"\"\"\n HighLevelGraph for slicing chunks from an array-like according to a chunk pattern.\n\n If ``inline_array`` is True, this make a Blockwise layer of slicing tasks where the\n array-like is embedded into every task.,\n\n If ``inline_array`` is False, this inserts the array-like as a standalone value in\n a MaterializedLayer, then generates a Blockwise layer of slicing tasks that refer\n to it.\n\n >>> dict(graph_from_arraylike(arr, chunks=(2, 3), shape=(4, 6), name=\"X\", inline_array=True)) # doctest: +SKIP\n {(arr, 0, 0): (getter, arr, (slice(0, 2), slice(0, 3))),\n (arr, 1, 0): (getter, arr, (slice(2, 4), slice(0, 3))),\n (arr, 1, 1): (getter, arr, (slice(2, 4), slice(3, 6))),\n (arr, 0, 1): (getter, arr, (slice(0, 2), slice(3, 6)))}\n\n >>> dict( # doctest: +SKIP\n graph_from_arraylike(arr, chunks=((2, 2), (3, 3)), shape=(4,6), name=\"X\", inline_array=False)\n )\n {\"original-X\": arr,\n ('X', 0, 0): (getter, 'original-X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'original-X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'original-X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'original-X', (slice(0, 2), slice(3, 6)))}\n \"\"\"\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n out_ind = tuple(range(len(shape)))\n\n if (\n has_keyword(getitem, \"asarray\")\n and has_keyword(getitem, \"lock\")\n and (not asarray or lock)\n ):\n getter = partial(getitem, asarray=asarray, lock=lock)\n else:\n # Common case, drop extra parameters\n getter = getitem\n\n if inline_array:\n layer = core_blockwise(\n getter,\n name,\n out_ind,\n arr,\n None,\n ArraySliceDep(chunks),\n out_ind,\n numblocks={},\n )\n return HighLevelGraph.from_collections(name, layer)\n else:\n original_name = \"original-\" + name\n\n layers = {}\n layers[original_name] = MaterializedLayer({original_name: arr})\n layers[name] = core_blockwise(\n getter,\n name,\n out_ind,\n original_name,\n None,\n ArraySliceDep(chunks),\n out_ind,\n numblocks={},\n )\n\n deps = {\n original_name: set(),\n name: {original_name},\n }\n return HighLevelGraph(layers, deps)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_concatenate.if_n_0_.elif_n_1_.return.seq2_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate_concatenate.if_n_0_.elif_n_1_.return.seq2_0_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3970, "end_line": 4061, "span_ids": ["concatenate"], "tokens": 663}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concatenate(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Concatenate arrays along an existing axis\n\n Given a sequence of dask Arrays form a new dask Array by stacking them\n along an existing dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays. If axis is None,\n arrays are flattened before use.\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [da.from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.concatenate(data, axis=0)\n >>> x.shape\n (12, 4)\n\n >>> da.concatenate(data, axis=1).shape\n (4, 12)\n\n Result is a new dask Array\n\n See Also\n --------\n stack\n \"\"\"\n from . import wrap\n\n seq = [asarray(a, allow_unknown_chunksizes=allow_unknown_chunksizes) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to concatenate\")\n\n if axis is None:\n seq = [a.flatten() for a in seq]\n axis = 0\n\n seq_metas = [meta_from_array(s) for s in seq]\n _concatenate = concatenate_lookup.dispatch(\n type(max(seq_metas, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n meta = _concatenate(seq_metas, axis=axis)\n\n # Promote types to match meta\n seq = [a.astype(meta.dtype) for a in seq]\n\n # Find output array shape\n ndim = len(seq[0].shape)\n shape = tuple(\n sum(a.shape[i] for a in seq) if i == axis else seq[0].shape[i]\n for i in range(ndim)\n )\n\n # Drop empty arrays\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n if axis < 0:\n axis = ndim + axis\n if axis >= ndim:\n msg = (\n \"Axis must be less than than number of dimensions\"\n \"\\nData has %d dimensions, but got axis=%d\"\n )\n raise ValueError(msg % (ndim, axis))\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n elif n == 1:\n return seq2[0]\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate.if_not_allow_unknown_chun_concatenate.return.Array_graph_name_chunks": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_concatenate.if_not_allow_unknown_chun_concatenate.return.Array_graph_name_chunks", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4063, "end_line": 4111, "span_ids": ["concatenate"], "tokens": 497}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def concatenate(seq, axis=0, allow_unknown_chunksizes=False):\n # ... other code\n\n if not allow_unknown_chunksizes and not all(\n i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)\n for i in range(ndim)\n ):\n if any(map(np.isnan, seq2[0].shape)):\n raise ValueError(\n \"Tried to concatenate arrays with unknown\"\n \" shape %s.\\n\\nTwo solutions:\\n\"\n \" 1. Force concatenation pass\"\n \" allow_unknown_chunksizes=True.\\n\"\n \" 2. Compute shapes with \"\n \"[x.compute_chunk_sizes() for x in seq]\" % str(seq2[0].shape)\n )\n raise ValueError(\"Shapes do not align: %s\", [x.shape for x in seq2])\n\n inds = [list(range(ndim)) for i in range(n)]\n for i, ind in enumerate(inds):\n ind[axis] = -(i + 1)\n\n uc_args = list(concat(zip(seq2, inds)))\n _, seq2 = unify_chunks(*uc_args, warn=False)\n\n bds = [a.chunks for a in seq2]\n\n chunks = (\n seq2[0].chunks[:axis]\n + (sum((bd[axis] for bd in bds), ()),)\n + seq2[0].chunks[axis + 1 :]\n )\n\n cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))\n\n names = [a.name for a in seq2]\n\n name = \"concatenate-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n values = [\n (names[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[1 : axis + 1]\n + (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[axis + 2 :]\n for key in keys\n ]\n\n dsk = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asanyarray_asanyarray.return.from_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/core.py_asanyarray_asanyarray.return.from_array_", "embedding": null, "metadata": {"file_path": "dask/array/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4346, "end_line": 4420, "span_ids": ["asanyarray"], "tokens": 799}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def asanyarray(a, dtype=None, order=None, *, like=None, inline_array=False):\n \"\"\"Convert the input to a dask array.\n\n Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array. This\n includes lists, lists of tuples, tuples, tuples of tuples, tuples of\n lists and ndarrays.\n dtype : data-type, optional\n By default, the data-type is inferred from the input data.\n order : {\u2018C\u2019, \u2018F\u2019, \u2018A\u2019, \u2018K\u2019}, optional\n Memory layout. \u2018A\u2019 and \u2018K\u2019 depend on the order of input array a.\n \u2018C\u2019 row-major (C-style), \u2018F\u2019 column-major (Fortran-style) memory\n representation. \u2018A\u2019 (any) means \u2018F\u2019 if a is Fortran contiguous, \u2018C\u2019\n otherwise \u2018K\u2019 (keep) preserve input order. Defaults to \u2018C\u2019.\n like: array-like\n Reference object to allow the creation of Dask arrays with chunks\n that are not NumPy arrays. If an array-like passed in as ``like``\n supports the ``__array_function__`` protocol, the chunk type of the\n resulting array will be definde by it. In this case, it ensures the\n creation of a Dask array compatible with that passed in via this\n argument. If ``like`` is a Dask array, the chunk type of the\n resulting array will be defined by the chunk type of ``like``.\n Requires NumPy 1.20.0 or higher.\n inline_array:\n Whether to inline the array in the resulting dask graph. For more information,\n see the documentation for ``dask.array.from_array()``.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asanyarray(x)\n dask.array\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asanyarray(y)\n dask.array\n \"\"\"\n if like is None:\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.split(\".\")[0] == \"xarray\" and hasattr(a, \"data\"):\n return asanyarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asanyarray(a, dtype=dtype, order=order)\n else:\n if not _numpy_120:\n raise RuntimeError(\"The use of ``like`` required NumPy >= 1.20\")\n\n like_meta = meta_from_array(like)\n if isinstance(a, Array):\n return a.map_blocks(np.asanyarray, like=like_meta, dtype=dtype, order=order)\n else:\n a = np.asanyarray(a, like=like_meta, dtype=dtype, order=order)\n return from_array(\n a,\n chunks=a.shape,\n getitem=getter_inline,\n asarray=False,\n inline_array=inline_array,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diag_diag.if_k_0_.None_1.return.pad_diag_v_k_0_0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diag_diag.if_k_0_.None_1.return.pad_diag_v_k_0_0", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 571, "end_line": 628, "span_ids": ["diag"], "tokens": 651}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef diag(v, k=0):\n if not isinstance(v, np.ndarray) and not isinstance(v, Array):\n raise TypeError(f\"v must be a dask array or numpy array, got {type(v)}\")\n\n name = \"diag-\" + tokenize(v, k)\n\n meta = meta_from_array(v, 2 if v.ndim == 1 else 1)\n\n if isinstance(v, np.ndarray) or (\n hasattr(v, \"__array_function__\") and not isinstance(v, Array)\n ):\n if v.ndim == 1:\n m = abs(k)\n chunks = ((v.shape[0] + m,), (v.shape[0] + m,))\n dsk = {(name, 0, 0): (np.diag, v, k)}\n elif v.ndim == 2:\n kdiag_row_start = max(0, -k)\n kdiag_row_stop = min(v.shape[0], v.shape[1] - k)\n len_kdiag = kdiag_row_stop - kdiag_row_start\n chunks = ((0,),) if len_kdiag <= 0 else ((len_kdiag,),)\n dsk = {(name, 0): (np.diag, v, k)}\n else:\n raise ValueError(\"Array must be 1d or 2d only\")\n return Array(dsk, name, chunks, meta=meta)\n\n if v.ndim != 1:\n if v.ndim != 2:\n raise ValueError(\"Array must be 1d or 2d only\")\n if k == 0 and v.chunks[0] == v.chunks[1]:\n dsk = {\n (name, i): (np.diag, row[i]) for i, row in enumerate(v.__dask_keys__())\n }\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[v])\n return Array(graph, name, (v.chunks[0],), meta=meta)\n else:\n return diagonal(v, k)\n\n if k == 0:\n chunks_1d = v.chunks[0]\n blocks = v.__dask_keys__()\n dsk = {}\n for i, m in enumerate(chunks_1d):\n for j, n in enumerate(chunks_1d):\n key = (name, i, j)\n if i == j:\n dsk[key] = (np.diag, blocks[i])\n else:\n dsk[key] = (np.zeros, (m, n))\n dsk[key] = (partial(np.zeros_like, shape=(m, n)), meta)\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[v])\n return Array(graph, name, (chunks_1d, chunks_1d), meta=meta)\n\n elif k > 0:\n return pad(diag(v), [[0, k], [k, 0]], mode=\"constant\")\n elif k < 0:\n return pad(diag(v), [[-k, 0], [0, -k]], mode=\"constant\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diagonal_diagonal.kdiag_chunks._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diagonal_diagonal.kdiag_chunks._", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 631, "end_line": 720, "span_ids": ["diagonal"], "tokens": 804}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n name = \"diagonal-\" + tokenize(a, offset, axis1, axis2)\n\n if a.ndim < 2:\n # NumPy uses `diag` as we do here.\n raise ValueError(\"diag requires an array of at least two dimensions\")\n\n def _axis_fmt(axis, name, ndim):\n if axis < 0:\n t = ndim + axis\n if t < 0:\n msg = \"{}: axis {} is out of bounds for array of dimension {}\"\n raise np.AxisError(msg.format(name, axis, ndim))\n axis = t\n return axis\n\n def pop_axes(chunks, axis1, axis2):\n chunks = list(chunks)\n chunks.pop(axis2)\n chunks.pop(axis1)\n return tuple(chunks)\n\n axis1 = _axis_fmt(axis1, \"axis1\", a.ndim)\n axis2 = _axis_fmt(axis2, \"axis2\", a.ndim)\n\n if axis1 == axis2:\n raise ValueError(\"axis1 and axis2 cannot be the same\")\n\n a = asarray(a)\n k = offset\n if axis1 > axis2:\n axis1, axis2 = axis2, axis1\n k = -offset\n\n free_axes = set(range(a.ndim)) - {axis1, axis2}\n free_indices = list(product(*(range(a.numblocks[i]) for i in free_axes)))\n ndims_free = len(free_axes)\n\n # equation of diagonal: i = j - k\n kdiag_row_start = max(0, -k)\n kdiag_col_start = max(0, k)\n kdiag_row_stop = min(a.shape[axis1], a.shape[axis2] - k)\n len_kdiag = kdiag_row_stop - kdiag_row_start\n\n if len_kdiag <= 0:\n xp = np\n\n if is_cupy_type(a._meta):\n import cupy\n\n xp = cupy\n\n out_chunks = pop_axes(a.chunks, axis1, axis2) + ((0,),)\n dsk = dict()\n for free_idx in free_indices:\n shape = tuple(\n out_chunks[axis][free_idx[axis]] for axis in range(ndims_free)\n )\n dsk[(name,) + free_idx + (0,)] = (\n partial(xp.empty, dtype=a.dtype),\n shape + (0,),\n )\n\n meta = meta_from_array(a, ndims_free + 1)\n return Array(dsk, name, out_chunks, meta=meta)\n\n # compute row index ranges for chunks along axis1:\n row_stops_ = np.cumsum(a.chunks[axis1])\n row_starts = np.roll(row_stops_, 1)\n row_starts[0] = 0\n\n # compute column index ranges for chunks along axis2:\n col_stops_ = np.cumsum(a.chunks[axis2])\n col_starts = np.roll(col_stops_, 1)\n col_starts[0] = 0\n\n # locate first chunk containing diagonal:\n row_blockid = np.arange(a.numblocks[axis1])\n col_blockid = np.arange(a.numblocks[axis2])\n\n row_filter = (row_starts <= kdiag_row_start) & (kdiag_row_start < row_stops_)\n col_filter = (col_starts <= kdiag_col_start) & (kdiag_col_start < col_stops_)\n (I,) = row_blockid[row_filter]\n (J,) = col_blockid[col_filter]\n\n # follow k-diagonal through chunks while constructing dask graph:\n dsk = dict()\n i = 0\n kdiag_chunks = ()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diagonal.while_kdiag_row_start_a_diagonal.return.Array_graph_name_out_ch": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/creation.py_diagonal.while_kdiag_row_start_a_diagonal.return.Array_graph_name_out_ch", "embedding": null, "metadata": {"file_path": "dask/array/creation.py", "file_name": "creation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 721, "end_line": 759, "span_ids": ["diagonal"], "tokens": 430}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n # ... other code\n while kdiag_row_start < a.shape[axis1] and kdiag_col_start < a.shape[axis2]:\n # localize block info:\n nrows, ncols = a.chunks[axis1][I], a.chunks[axis2][J]\n kdiag_row_start -= row_starts[I]\n kdiag_col_start -= col_starts[J]\n k = -kdiag_row_start if kdiag_row_start > 0 else kdiag_col_start\n kdiag_row_end = min(nrows, ncols - k)\n kdiag_len = kdiag_row_end - kdiag_row_start\n\n # increment dask graph:\n for free_idx in free_indices:\n input_idx = (\n free_idx[:axis1]\n + (I,)\n + free_idx[axis1 : axis2 - 1]\n + (J,)\n + free_idx[axis2 - 1 :]\n )\n output_idx = free_idx + (i,)\n dsk[(name,) + output_idx] = (\n np.diagonal,\n (a.name,) + input_idx,\n k,\n axis1,\n axis2,\n )\n\n kdiag_chunks += (kdiag_len,)\n # prepare for next iteration:\n i += 1\n kdiag_row_start = kdiag_row_end + row_starts[I]\n kdiag_col_start = min(ncols, nrows + k) + col_starts[J]\n I = I + 1 if kdiag_row_start == row_stops_[I] else I\n J = J + 1 if kdiag_col_start == col_stops_[J] else J\n\n out_chunks = pop_axes(a.chunks, axis1, axis2) + (kdiag_chunks,)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[a])\n meta = meta_from_array(a, ndims_free + 1)\n return Array(graph, name, out_chunks, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Look_for_invocation_usi_map_overlap.assert_int_chunksize_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap._Look_for_invocation_usi_map_overlap.assert_int_chunksize_x_", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 624, "end_line": 692, "span_ids": ["map_overlap"], "tokens": 760}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_overlap(\n func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs\n):\n # Look for invocation using deprecated single-array signature\n # map_overlap(x, func, depth, boundary=None, trim=True, **kwargs)\n if isinstance(func, Array) and callable(args[0]):\n warnings.warn(\n \"The use of map_overlap(array, func, **kwargs) is deprecated since dask 2.17.0 \"\n \"and will be an error in a future release. To silence this warning, use the syntax \"\n \"map_overlap(func, array0,[ array1, ...,] **kwargs) instead.\",\n FutureWarning,\n )\n sig = [\"func\", \"depth\", \"boundary\", \"trim\"]\n depth = get(sig.index(\"depth\"), args, depth)\n boundary = get(sig.index(\"boundary\"), args, boundary)\n trim = get(sig.index(\"trim\"), args, trim)\n func, args = args[0], [func]\n\n if not callable(func):\n raise TypeError(\n \"First argument must be callable function, not {}\\n\"\n \"Usage: da.map_overlap(function, x)\\n\"\n \" or: da.map_overlap(function, x, y, z)\".format(type(func).__name__)\n )\n if not all(isinstance(x, Array) for x in args):\n raise TypeError(\n \"All variadic arguments must be arrays, not {}\\n\"\n \"Usage: da.map_overlap(function, x)\\n\"\n \" or: da.map_overlap(function, x, y, z)\".format(\n [type(x).__name__ for x in args]\n )\n )\n\n # Coerce depth and boundary arguments to lists of individual\n # specifications for each array argument\n def coerce(xs, arg, fn):\n if not isinstance(arg, list):\n arg = [arg] * len(xs)\n return [fn(x.ndim, a) for x, a in zip(xs, arg)]\n\n depth = coerce(args, depth, coerce_depth)\n boundary = coerce(args, boundary, coerce_boundary)\n\n # Align chunks in each array to a common size\n if align_arrays:\n # Reverse unification order to allow block broadcasting\n inds = [list(reversed(range(x.ndim))) for x in args]\n _, args = unify_chunks(*list(concat(zip(args, inds))), warn=False)\n\n # Escape to map_blocks if depth is zero (a more efficient computation)\n if all([all(depth_val == 0 for depth_val in d.values()) for d in depth]):\n return map_blocks(func, *args, **kwargs)\n\n for i, x in enumerate(args):\n for j in range(x.ndim):\n if isinstance(depth[i][j], tuple) and boundary[i][j] != \"none\":\n raise NotImplementedError(\n \"Asymmetric overlap is currently only implemented \"\n \"for boundary='none', however boundary for dimension \"\n \"{} in array argument {} is {}\".format(j, i, boundary[i][j])\n )\n\n def assert_int_chunksize(xs):\n assert all(type(c) is int for x in xs for cc in x.chunks for c in cc)\n\n assert_int_chunksize(args)\n if not trim and \"chunks\" not in kwargs:\n kwargs[\"chunks\"] = args[0].chunks\n args = [overlap(x, depth=d, boundary=b) for x, d, b in zip(args, depth, boundary)]\n assert_int_chunksize(args)\n x = map_blocks(func, *args, **kwargs)\n assert_int_chunksize([x])\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap.if_trim__map_overlap.if_trim_.else_.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/overlap.py_map_overlap.if_trim__map_overlap.if_trim_.else_.return.x", "embedding": null, "metadata": {"file_path": "dask/array/overlap.py", "file_name": "overlap.py", "file_type": "text/x-python", "category": "implementation", "start_line": 693, "end_line": 715, "span_ids": ["map_overlap"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def map_overlap(\n func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs\n):\n # ... other code\n if trim:\n # Find index of array argument with maximum rank and break ties by choosing first provided\n i = sorted(enumerate(args), key=lambda v: (v[1].ndim, -v[0]))[-1][0]\n # Trim using depth/boundary setting for array of highest rank\n depth = depth[i]\n boundary = boundary[i]\n # remove any dropped axes from depth and boundary variables\n drop_axis = kwargs.pop(\"drop_axis\", None)\n if drop_axis is not None:\n if isinstance(drop_axis, Number):\n drop_axis = [drop_axis]\n\n # convert negative drop_axis to equivalent positive value\n ndim_out = max(a.ndim for a in args if isinstance(a, Array))\n drop_axis = [d % ndim_out for d in drop_axis]\n\n kept_axes = tuple(ax for ax in range(args[i].ndim) if ax not in drop_axis)\n # note that keys are relabeled to match values in range(x.ndim)\n depth = {n: depth[ax] for n, ax in enumerate(kept_axes)}\n boundary = {n: boundary[ax] for n, ax in enumerate(kept_axes)}\n return trim_internal(x, depth, boundary)\n else:\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_contextlib_RandomState.seed.self__numpy_state_seed_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/random.py_contextlib_RandomState.seed.self__numpy_state_seed_se", "embedding": null, "metadata": {"file_path": "dask/array/random.py", "file_name": "random.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 61, "span_ids": ["imports", "RandomState.seed", "RandomState", "RandomState.__init__"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\nimport numbers\nfrom itertools import chain, product\nfrom numbers import Integral\nfrom operator import getitem\n\nimport numpy as np\n\nfrom ..base import tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import derived_from, random_state_data\nfrom .core import (\n Array,\n asarray,\n broadcast_shapes,\n broadcast_to,\n normalize_chunks,\n slices_from_chunks,\n)\nfrom .creation import arange\n\n\nclass RandomState:\n \"\"\"\n Mersenne Twister pseudo-random number generator\n\n This object contains state to deterministically generate pseudo-random\n numbers from a variety of probability distributions. It is identical to\n ``np.random.RandomState`` except that all functions also take a ``chunks=``\n keyword argument.\n\n Parameters\n ----------\n seed: Number\n Object to pass to RandomState to serve as deterministic seed\n RandomState: Callable[seed] -> RandomState\n A callable that, when provided with a ``seed`` keyword provides an\n object that operates identically to ``np.random.RandomState`` (the\n default). This might also be a function that returns a\n ``randomgen.RandomState``, ``mkl_random``, or\n ``cupy.random.RandomState`` object.\n\n Examples\n --------\n >>> import dask.array as da\n >>> state = da.random.RandomState(1234) # a seed\n >>> x = state.normal(10, 0.1, size=3, chunks=(2,))\n >>> x.compute()\n array([10.01867852, 10.04812289, 9.89649746])\n\n See Also\n --------\n np.random.RandomState\n \"\"\"\n\n def __init__(self, seed=None, RandomState=None):\n self._numpy_state = np.random.RandomState(seed)\n self._RandomState = RandomState\n\n def seed(self, seed=None):\n self._numpy_state.seed(seed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__intersect_1d__intersect_1d._will_hold_the_list_of_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__intersect_1d__intersect_1d._will_hold_the_list_of_s", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 58, "end_line": 110, "span_ids": ["_intersect_1d"], "tokens": 667}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _intersect_1d(breaks):\n \"\"\"\n Internal utility to intersect chunks for 1d after preprocessing.\n\n >>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')\n >>> old = cumdims_label(((2, 2, 1), (5,)), 'o')\n\n >>> _intersect_1d(_breakpoints(old[0], new[0])) # doctest: +NORMALIZE_WHITESPACE\n [[(0, slice(0, 2, None))],\n [(1, slice(0, 2, None)), (2, slice(0, 1, None))]]\n >>> _intersect_1d(_breakpoints(old[1], new[1])) # doctest: +NORMALIZE_WHITESPACE\n [[(0, slice(0, 2, None))],\n [(0, slice(2, 4, None))],\n [(0, slice(4, 5, None))]]\n\n Parameters\n ----------\n\n breaks: list of tuples\n Each tuple is ('o', 8) or ('n', 8)\n These are pairs of 'o' old or new 'n'\n indicator with a corresponding cumulative sum,\n or breakpoint (a position along the chunking axis).\n The list of pairs is already ordered by breakpoint.\n Note that an 'o' pair always occurs BEFORE\n an 'n' pair if both share the same breakpoint.\n Uses 'o' and 'n' to make new tuples of slices for\n the new block crosswalk to old blocks.\n \"\"\"\n # EXPLANATION:\n # We know each new chunk is obtained from the old chunks, but\n # from which ones and how? This function provides the answer.\n # On return, each new chunk is represented as a list of slices\n # of the old chunks. Therefore, paired with each slice is the\n # index of the old chunk to which that slice refers.\n # NOTE: if any nonzero-size new chunks extend beyond the total\n # span of the old chunks, then those new chunks are assumed\n # to be obtained from an imaginary old chunk that extends\n # from the end of that total span to infinity. The chunk-\n # index of this imaginary chunk follows in consecutive order\n # from the chunk-indices of the actual old chunks.\n\n # First, let us determine the index of the last old_chunk:\n o_pairs = [pair for pair in breaks if pair[0] == \"o\"]\n last_old_chunk_idx = len(o_pairs) - 2\n last_o_br = o_pairs[-1][1] # end of range spanning all old chunks\n\n start = 0 # start of a slice of an old chunk\n last_end = 0\n old_idx = 0 # index of old chunk\n last_o_end = 0\n ret = [] # will hold the list of new chunks\n ret_next = [] # will hold the list of slices comprising one new chunk\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__intersect_1d.for_idx_in_range_1_len_b__intersect_1d.return.ret": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/rechunk.py__intersect_1d.for_idx_in_range_1_len_b__intersect_1d.return.ret", "embedding": null, "metadata": {"file_path": "dask/array/rechunk.py", "file_name": "rechunk.py", "file_type": "text/x-python", "category": "implementation", "start_line": 111, "end_line": 154, "span_ids": ["_intersect_1d"], "tokens": 398}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _intersect_1d(breaks):\n # ... other code\n for idx in range(1, len(breaks)): # Note start from the 2nd pair\n # the interval between any two consecutive breakpoints is a potential\n # new chunk:\n label, br = breaks[idx]\n last_label, last_br = breaks[idx - 1]\n if last_label == \"n\":\n # This always denotes the end of a new chunk or the start\n # of the next new chunk or both\n start = last_end\n if ret_next:\n ret.append(ret_next)\n ret_next = []\n else:\n start = 0\n end = br - last_br + start # end of a slice of an old chunk\n last_end = end\n if br == last_br:\n # Here we have a zero-size interval between the previous and\n # current breakpoints. This should not result in a slice unless\n # this interval's end-points (`last_label` and `label`) are both\n # equal to 'n'\n if label == \"o\":\n old_idx += 1\n last_o_end = end\n if label == \"n\" and last_label == \"n\":\n if br == last_o_br:\n # zero-size new chunks located at the edge of the range\n # spanning all the old chunks are assumed to come from the\n # end of the last old chunk:\n slc = slice(last_o_end, last_o_end)\n ret_next.append((last_old_chunk_idx, slc))\n continue\n else:\n continue\n ret_next.append((old_idx, slice(start, end)))\n if label == \"o\":\n old_idx += 1\n start = 0\n last_o_end = end\n\n if ret_next:\n ret.append(ret_next)\n\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_from___future___import_an_result_type.return.np_result_type_args_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_from___future___import_an_result_type.return.np_result_type_args_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 61, "span_ids": ["result_type", "imports", "array"], "tokens": 430}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport math\nimport warnings\nfrom collections.abc import Iterable\nfrom functools import partial, reduce, wraps\nfrom numbers import Integral, Real\n\nimport numpy as np\nfrom tlz import concat, interleave, sliding_window\n\nfrom ..base import is_dask_collection, tokenize\nfrom ..core import flatten\nfrom ..delayed import Delayed, unpack_collections\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..utils import apply, derived_from, funcname, is_arraylike, is_cupy_type\nfrom . import chunk\nfrom .core import (\n Array,\n asanyarray,\n asarray,\n blockwise,\n broadcast_arrays,\n broadcast_shapes,\n broadcast_to,\n concatenate,\n elemwise,\n implements,\n is_scalar_for_elemwise,\n map_blocks,\n stack,\n tensordot_lookup,\n)\nfrom .creation import arange, diag, empty, indices, tri\nfrom .einsumfuncs import einsum # noqa\nfrom .numpy_compat import _numpy_120\nfrom .reductions import reduction\nfrom .ufunc import multiply, sqrt\nfrom .utils import array_safe, asarray_safe, meta_from_array, safe_wraps, validate_axis\nfrom .wrap import ones\n\n# save built-in for histogram functions which use range as a kwarg.\n_range = range\n\n\n@derived_from(np)\ndef array(x, dtype=None, ndmin=None, *, like=None):\n if not _numpy_120 and like is not None:\n raise RuntimeError(\"The use of ``like`` required NumPy >= 1.20\")\n x = asarray(x, like=like)\n while ndmin is not None and x.ndim < ndmin:\n x = x[None, :]\n if dtype is not None and x.dtype != dtype:\n x = x.astype(dtype)\n return x\n\n\n@derived_from(np)\ndef result_type(*args):\n args = [a if is_scalar_for_elemwise(a) else a.dtype for a in args]\n return np.result_type(*args)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_shape_expand_dims.return.a_reshape_shape_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_shape_expand_dims.return.a_reshape_shape_", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1874, "end_line": 1900, "span_ids": ["ravel", "shape", "expand_dims", "union1d"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef shape(array):\n return array.shape\n\n\n@derived_from(np)\ndef union1d(ar1, ar2):\n return unique(concatenate((ar1.ravel(), ar2.ravel())))\n\n\n@derived_from(np)\ndef ravel(array_like):\n return asanyarray(array_like).reshape((-1,))\n\n\n@derived_from(np)\ndef expand_dims(a, axis):\n if type(axis) not in (tuple, list):\n axis = (axis,)\n\n out_ndim = len(axis) + a.ndim\n axis = validate_axis(axis, out_ndim)\n\n shape_it = iter(a.shape)\n shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]\n\n return a.reshape(shape)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_squeeze_squeeze.return.a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py_squeeze_squeeze.return.a", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1903, "end_line": 1919, "span_ids": ["squeeze"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@derived_from(np)\ndef squeeze(a, axis=None):\n if axis is None:\n axis = tuple(i for i, d in enumerate(a.shape) if d == 1)\n elif not isinstance(axis, tuple):\n axis = (axis,)\n\n if any(a.shape[i] != 1 for i in axis):\n raise ValueError(\"cannot squeeze axis with size other than one\")\n\n axis = validate_axis(axis, a.ndim)\n\n sl = tuple(0 if i in axis else slice(None) for i, s in enumerate(a.shape))\n\n a = a[sl]\n\n return a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__partition__partition.return.multiples_remainder": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/routines.py__partition__partition.return.multiples_remainder", "embedding": null, "metadata": {"file_path": "dask/array/routines.py", "file_name": "routines.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2225, "end_line": 2233, "span_ids": ["_partition"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _partition(total: int, divisor: int) -> tuple[tuple[int, ...], tuple[int, ...]]:\n \"\"\"Given a total and a divisor, return two tuples: A tuple containing `divisor`\n repeated the number of times it divides `total`, and length-1 or empty tuple\n containing the remainder when `total` is divided by `divisor`. If `divisor` factors\n `total`, i.e. if the remainder is 0, then `remainder` is empty.\n \"\"\"\n multiples = (divisor,) * (total // divisor)\n remainder = (total % divisor,) if total % divisor else ()\n return multiples, remainder", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_parse_assignment_indices_parse_assignment_indices._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_parse_assignment_indices_parse_assignment_indices._", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1291, "end_line": 1358, "span_ids": ["parse_assignment_indices"], "tokens": 753}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def parse_assignment_indices(indices, shape):\n \"\"\"Reformat the indices for assignment.\n\n The aim of this is to convert the indices to a standardised form\n so that it is easier to ascertain which chunks are touched by the\n indices.\n\n This function is intended to be called by `setitem_array`.\n\n A slice object that is decreasing (i.e. with a negative step), is\n recast as an increasing slice (i.e. with a postive step. For\n example ``slice(7,3,-1)`` would be cast as ``slice(4,8,1)``. This\n is to facilitate finding which blocks are touched by the\n index. The dimensions for which this has occured are returned by\n the function.\n\n Parameters\n ----------\n indices : numpy-style indices\n Indices to array defining the elements to be assigned.\n shape : sequence of `int`\n The shape of the array.\n\n Returns\n -------\n parsed_indices : `list`\n The reformated indices that are equivalent to the input\n indices.\n implied_shape : `list`\n The shape implied by the parsed indices. For instance, indices\n of ``(slice(0,2), 5, [4,1,-1])`` will have implied shape\n ``[2,3]``.\n reverse : `list`\n The positions of the dimensions whose indices in the\n parsed_indices output are reversed slices.\n implied_shape_positions: `list`\n The positions of the dimensions whose indices contribute to\n the implied_shape. For instance, indices of ``(slice(0,2), 5,\n [4,1,-1])`` will have implied_shape ``[2,3]`` and\n implied_shape_positions ``[0,2]``.\n\n Examples\n --------\n >>> parse_assignment_indices((slice(1, -1),), (8,))\n ([slice(1, 7, 1)], [6], [], [0])\n\n >>> parse_assignment_indices(([1, 2, 6, 5],), (8,))\n ([array([1, 2, 6, 5])], [4], [], [0])\n\n >>> parse_assignment_indices((3, slice(-1, 2, -1)), (7, 8))\n ([3, slice(3, 8, 1)], [5], [1], [1])\n\n >>> parse_assignment_indices((slice(-1, 2, -1), 3, [1, 2]), (7, 8, 9))\n ([slice(3, 7, 1), 3, array([1, 2])], [4, 2], [0], [0, 2])\n\n >>> parse_assignment_indices((slice(0, 5), slice(3, None, 2)), (5, 4))\n ([slice(0, 5, 1), slice(3, 4, 2)], [5, 1], [], [0, 1])\n\n >>> parse_assignment_indices((slice(0, 5), slice(3, 3, 2)), (5, 4))\n ([slice(0, 5, 1), slice(3, 3, 2)], [5, 0], [], [0])\n\n \"\"\"\n if not isinstance(indices, tuple):\n indices = (indices,)\n\n # Disallow scalar boolean indexing, and also indexing by scalar\n # numpy or dask array.\n #\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_parse_assignment_indices._numpy_allows_these_but_parse_assignment_indices.n_lists.0": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_parse_assignment_indices._numpy_allows_these_but_parse_assignment_indices.n_lists.0", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1359, "end_line": 1380, "span_ids": ["parse_assignment_indices"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def parse_assignment_indices(indices, shape):\n # numpy allows these, but Array.__getitem__ does not yet implement\n # them properly, so disallow it for now in __setitem__\n for index in indices:\n if index is True or index is False:\n raise NotImplementedError(\n \"dask does not yet implement assignment to a scalar \"\n f\"boolean index: {index!r}\"\n )\n\n if (is_arraylike(index) or is_dask_collection(index)) and not index.ndim:\n raise NotImplementedError(\n \"dask does not yet implement assignment to a scalar \"\n f\"numpy or dask array index: {index!r}\"\n )\n\n # Inititalize output variables\n implied_shape = []\n implied_shape_positions = []\n reverse = []\n parsed_indices = list(normalize_index(indices, shape))\n\n n_lists = 0\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_parse_assignment_indices.for_i_index_size_in_e_parse_assignment_indices.return.parsed_indices_implied_s": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/slicing.py_parse_assignment_indices.for_i_index_size_in_e_parse_assignment_indices.return.parsed_indices_implied_s", "embedding": null, "metadata": {"file_path": "dask/array/slicing.py", "file_name": "slicing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1382, "end_line": 1480, "span_ids": ["parse_assignment_indices"], "tokens": 803}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def parse_assignment_indices(indices, shape):\n # ... other code\n\n for i, (index, size) in enumerate(zip(parsed_indices, shape)):\n is_slice = isinstance(index, slice)\n if is_slice:\n # Index is a slice\n start, stop, step = index.indices(size)\n if step < 0 and stop == -1:\n stop = None\n\n index = slice(start, stop, step)\n\n if step < 0:\n # When the slice step is negative, transform the\n # original slice to a new slice with a positive step\n # such that the result of the new slice is the reverse\n # of the result of the original slice.\n #\n # For example, if the original slice is slice(6,0,-2)\n # then the new slice will be slice(2,7,2).\n #\n # >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n # >>> a[slice(6, 0, -2)]\n # [6, 4, 2]\n # >>> a[slice(2, 7, 2)]\n # [2, 4, 6]\n # >>> a[slice(6, 0, -2)] == list(reversed(a[slice(2, 7, 2)]))\n # True\n start, stop, step = index.indices(size)\n step *= -1\n div, mod = divmod(start - stop - 1, step)\n div_step = div * step\n start -= div_step\n stop = start + div_step + 1\n\n index = slice(start, stop, step)\n reverse.append(i)\n\n start, stop, step = index.indices(size)\n\n # Note: We now have stop >= start and step >= 0\n\n div, mod = divmod(stop - start, step)\n if not div and not mod:\n # stop equals start => zero-sized slice for this\n # dimension\n implied_shape.append(0)\n else:\n if mod != 0:\n div += 1\n\n implied_shape.append(div)\n implied_shape_positions.append(i)\n\n elif isinstance(index, (int, np.integer)):\n # Index is an integer\n index = int(index)\n\n elif isinstance(index, np.ndarray) or is_dask_collection(index):\n # Index is 1-d array\n n_lists += 1\n if n_lists > 1:\n raise NotImplementedError(\n \"dask is currently limited to at most one \"\n \"dimension's assignment index being a \"\n \"1-d array of integers or booleans. \"\n f\"Got: {indices}\"\n )\n\n if index.ndim != 1:\n raise IndexError(\n f\"Incorrect shape ({index.shape}) of integer \"\n f\"indices for dimension with size {size}\"\n )\n\n index_size = index.size\n if (\n index.dtype == bool\n and not math.isnan(index_size)\n and index_size != size\n ):\n raise IndexError(\n \"boolean index did not match indexed array along \"\n f\"dimension {i}; dimension is {size} but \"\n f\"corresponding boolean dimension is {index_size}\"\n )\n\n # Posify an integer dask array (integer numpy arrays were\n # posified in `normalize_index`)\n if is_dask_collection(index):\n if index.dtype == bool:\n index_size = np.nan\n else:\n index = np.where(index < 0, index + size, index)\n\n implied_shape.append(index_size)\n implied_shape_positions.append(i)\n\n parsed_indices[i] = index\n\n return parsed_indices, implied_shape, reverse, implied_shape_positions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_None_24_ttest_ind.return.delayed_Ttest_indResult_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/stats.py_None_24_ttest_ind.return.delayed_Ttest_indResult_", "embedding": null, "metadata": {"file_path": "dask/array/stats.py", "file_name": "stats.py", "file_type": "text/x-python", "category": "implementation", "start_line": 83, "end_line": 102, "span_ids": ["ttest_ind", "impl:16"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -----------------\n# Statistical Tests\n# -----------------\n\n\n@derived_from(scipy.stats)\ndef ttest_ind(a, b, axis=0, equal_var=True):\n v1 = da.var(a, axis, ddof=1) # XXX: np -> da\n v2 = da.var(b, axis, ddof=1) # XXX: np -> da\n n1 = a.shape[axis]\n n2 = b.shape[axis]\n\n if equal_var:\n df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)\n else:\n df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)\n\n res = _ttest_ind_from_stats(da.mean(a, axis), da.mean(b, axis), denom, df)\n\n return delayed(Ttest_indResult, nout=2)(*res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_contextlib_test_graph_from_arraylike.assert_any_arr_is_v_for_v": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_contextlib_test_graph_from_arraylike.assert_any_arr_is_v_for_v", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 86, "span_ids": ["imports", "test_graph_from_arraylike"], "tokens": 570}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\nimport copy\nimport pathlib\nimport xml.etree.ElementTree\nfrom unittest import mock\n\nimport pytest\n\nnp = pytest.importorskip(\"numpy\")\n\nimport operator\nimport os\nimport time\nimport warnings\nfrom functools import reduce\nfrom io import StringIO\nfrom operator import add, sub\nfrom threading import Lock\n\nfrom numpy import nancumprod, nancumsum\nfrom tlz import concat, countby, merge\nfrom tlz.curried import identity\n\nimport dask\nimport dask.array as da\nfrom dask.array.core import (\n Array,\n BlockView,\n PerformanceWarning,\n blockdims_from_blockshape,\n broadcast_chunks,\n broadcast_shapes,\n broadcast_to,\n common_blockdim,\n concatenate,\n concatenate3,\n concatenate_axes,\n dotmany,\n from_array,\n from_delayed,\n from_func,\n getter,\n graph_from_arraylike,\n normalize_chunks,\n optimize,\n stack,\n store,\n)\nfrom dask.array.utils import assert_eq, same_keys\nfrom dask.base import compute_as_if_collection, tokenize\nfrom dask.blockwise import broadcast_dimensions\nfrom dask.blockwise import make_blockwise_graph as top\nfrom dask.blockwise import optimize_blockwise\nfrom dask.delayed import Delayed, delayed\nfrom dask.highlevelgraph import HighLevelGraph, MaterializedLayer\nfrom dask.layers import Blockwise\nfrom dask.utils import SerializableLock, apply, key_split, parse_bytes, tmpdir, tmpfile\nfrom dask.utils_test import dec, hlg_layer_topological, inc\n\nfrom ..chunk import getitem\nfrom .test_dispatch import EncapsulateNDArray\n\n\n@pytest.mark.parametrize(\"inline_array\", [True, False])\ndef test_graph_from_arraylike(inline_array):\n d = 2\n chunk = (2, 3)\n shape = tuple(d * n for n in chunk)\n arr = np.ones(shape)\n\n dsk = graph_from_arraylike(\n arr, chunk, shape=shape, name=\"X\", inline_array=inline_array\n )\n\n assert isinstance(dsk, HighLevelGraph)\n if inline_array:\n assert len(dsk.layers) == 1\n assert isinstance(hlg_layer_topological(dsk, 0), Blockwise)\n else:\n assert len(dsk.layers) == 2\n assert isinstance(hlg_layer_topological(dsk, 0), MaterializedLayer)\n assert isinstance(hlg_layer_topological(dsk, 1), Blockwise)\n dsk = dict(dsk)\n\n # Somewhat odd membership check to avoid numpy elemwise __in__ overload\n assert any(arr is v for v in dsk.values()) is not inline_array", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_zarr_unique_name_test_tiledb_roundtrip.None_2.assert_a_chunks_tdb_ch": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_zarr_unique_name_test_tiledb_roundtrip.None_2.assert_a_chunks_tdb_ch", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4480, "end_line": 4686, "span_ids": ["test_tiledb_roundtrip", "test_zarr_existing_array", "test_from_zarr_name", "test_zarr_nocompute", "test_zarr_roundtrip_with_path_like", "test_zarr_regions", "test_zarr_group", "test_regular_chunks", "test_zarr_return_stored", "test_zarr_roundtrip", "test_read_zarr_chunks", "test_from_zarr_unique_name", "test_to_zarr_unknown_chunks_raises", "test_zarr_pass_mapper", "test_zarr_inline_array"], "tokens": 1922}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_zarr_unique_name():\n zarr = pytest.importorskip(\"zarr\")\n a = zarr.array([1, 2, 3])\n b = zarr.array([4, 5, 6])\n\n assert da.from_zarr(a).name != da.from_zarr(b).name\n\n\ndef test_from_zarr_name():\n zarr = pytest.importorskip(\"zarr\")\n a = zarr.array([1, 2, 3])\n assert da.from_zarr(a, name=\"foo\").name == \"foo\"\n\n\ndef test_zarr_roundtrip():\n pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n a.to_zarr(d)\n a2 = da.from_zarr(d)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks\n\n\ndef test_zarr_roundtrip_with_path_like():\n pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n path = pathlib.Path(d)\n a = da.zeros((3, 3), chunks=(1, 1))\n a.to_zarr(path)\n a2 = da.from_zarr(path)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks\n\n\n@pytest.mark.parametrize(\"compute\", [False, True])\ndef test_zarr_return_stored(compute):\n pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n a2 = a.to_zarr(d, compute=compute, return_stored=True)\n assert isinstance(a2, Array)\n assert_eq(a, a2, check_graph=False)\n assert a2.chunks == a.chunks\n\n\n@pytest.mark.parametrize(\"inline_array\", [True, False])\ndef test_zarr_inline_array(inline_array):\n zarr = pytest.importorskip(\"zarr\")\n a = zarr.array([1, 2, 3])\n dsk = dict(da.from_zarr(a, inline_array=inline_array).dask)\n assert len(dsk) == (0 if inline_array else 1) + 1\n assert (a in dsk.values()) is not inline_array\n\n\ndef test_zarr_existing_array():\n zarr = pytest.importorskip(\"zarr\")\n c = (1, 1)\n a = da.ones((3, 3), chunks=c)\n z = zarr.zeros_like(a, chunks=c)\n a.to_zarr(z)\n a2 = da.from_zarr(z)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks\n\n\ndef test_to_zarr_unknown_chunks_raises():\n pytest.importorskip(\"zarr\")\n a = da.random.random((10,), chunks=(3,))\n a = a[a > 0.5]\n with pytest.raises(ValueError, match=\"unknown chunk sizes\"):\n a.to_zarr({})\n\n\ndef test_read_zarr_chunks():\n pytest.importorskip(\"zarr\")\n a = da.zeros((9,), chunks=(3,))\n with tmpdir() as d:\n a.to_zarr(d)\n arr = da.from_zarr(d, chunks=(5,))\n assert arr.chunks == ((5, 4),)\n\n\ndef test_zarr_pass_mapper():\n pytest.importorskip(\"zarr\")\n import zarr.storage\n\n with tmpdir() as d:\n mapper = zarr.storage.DirectoryStore(d)\n a = da.zeros((3, 3), chunks=(1, 1))\n a.to_zarr(mapper)\n a2 = da.from_zarr(mapper)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks\n\n\ndef test_zarr_group():\n zarr = pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n a.to_zarr(d, component=\"test\")\n with pytest.raises((OSError, ValueError)):\n a.to_zarr(d, component=\"test\", overwrite=False)\n a.to_zarr(d, component=\"test\", overwrite=True)\n\n # second time is fine, group exists\n a.to_zarr(d, component=\"test2\", overwrite=False)\n a.to_zarr(d, component=\"nested/test\", overwrite=False)\n group = zarr.open_group(d, mode=\"r\")\n assert list(group) == [\"nested\", \"test\", \"test2\"]\n assert \"test\" in group[\"nested\"]\n\n a2 = da.from_zarr(d, component=\"test\")\n assert_eq(a, a2)\n assert a2.chunks == a.chunks\n\n\n@pytest.mark.parametrize(\n \"data\",\n [\n [(), True],\n [((1,),), True],\n [((1, 1, 1),), True],\n [((1,), (1,)), True],\n [((2, 2, 1),), True],\n [((2, 2, 3),), False],\n [((1, 1, 1), (2, 2, 3)), False],\n [((1, 2, 1),), False],\n ],\n)\ndef test_regular_chunks(data):\n chunkset, expected = data\n assert da.core._check_regular_chunks(chunkset) == expected\n\n\ndef test_zarr_nocompute():\n pytest.importorskip(\"zarr\")\n with tmpdir() as d:\n a = da.zeros((3, 3), chunks=(1, 1))\n out = a.to_zarr(d, compute=False)\n assert isinstance(out, Delayed)\n dask.compute(out)\n a2 = da.from_zarr(d)\n assert_eq(a, a2)\n assert a2.chunks == a.chunks\n\n\ndef test_zarr_regions():\n zarr = pytest.importorskip(\"zarr\")\n\n a = da.arange(16).reshape((4, 4)).rechunk(2)\n z = zarr.zeros_like(a, chunks=2)\n\n a[:2, :2].to_zarr(z, region=(slice(2), slice(2)))\n a2 = da.from_zarr(z)\n expected = [[0, 1, 0, 0], [4, 5, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n assert_eq(a2, expected)\n assert a2.chunks == a.chunks\n\n a[:3, 3:4].to_zarr(z, region=(slice(1, 4), slice(2, 3)))\n a2 = da.from_zarr(z)\n expected = [[0, 1, 0, 0], [4, 5, 3, 0], [0, 0, 7, 0], [0, 0, 11, 0]]\n assert_eq(a2, expected)\n assert a2.chunks == a.chunks\n\n a[3:, 3:].to_zarr(z, region=(slice(2, 3), slice(1, 2)))\n a2 = da.from_zarr(z)\n expected = [[0, 1, 0, 0], [4, 5, 3, 0], [0, 15, 7, 0], [0, 0, 11, 0]]\n assert_eq(a2, expected)\n assert a2.chunks == a.chunks\n\n with pytest.raises(ValueError):\n with tmpdir() as d:\n a.to_zarr(d, region=(slice(2), slice(2)))\n\n\ndef test_tiledb_roundtrip():\n tiledb = pytest.importorskip(\"tiledb\")\n # 1) load with default chunking\n # 2) load from existing tiledb.DenseArray\n # 3) write to existing tiledb.DenseArray\n a = da.random.random((3, 3))\n with tmpdir() as uri:\n da.to_tiledb(a, uri)\n tdb = da.from_tiledb(uri)\n\n assert_eq(a, tdb)\n assert a.chunks == tdb.chunks\n\n # from tiledb.array\n with tiledb.open(uri) as t:\n tdb2 = da.from_tiledb(t)\n assert_eq(a, tdb2)\n\n with tmpdir() as uri2:\n with tiledb.empty_like(uri2, a) as t:\n a.to_tiledb(t)\n assert_eq(da.from_tiledb(uri2), a)\n\n # specific chunking\n with tmpdir() as uri:\n a = da.random.random((3, 3), chunks=(1, 1))\n a.to_tiledb(uri)\n tdb = da.from_tiledb(uri)\n\n assert_eq(a, tdb)\n assert a.chunks == tdb.chunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_tiledb_multiattr_test_blockview.with_pytest_raises_IndexE.blockview_100_100_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_tiledb_multiattr_test_blockview.with_pytest_raises_IndexE.blockview_100_100_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4689, "end_line": 4766, "span_ids": ["test_tiledb_multiattr", "test_blockview"], "tokens": 838}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tiledb_multiattr():\n tiledb = pytest.importorskip(\"tiledb\")\n dom = tiledb.Domain(\n tiledb.Dim(\"x\", (0, 1000), tile=100), tiledb.Dim(\"y\", (0, 1000), tile=100)\n )\n schema = tiledb.ArraySchema(\n attrs=(tiledb.Attr(\"attr1\"), tiledb.Attr(\"attr2\")), domain=dom\n )\n\n with tmpdir() as uri:\n tiledb.DenseArray.create(uri, schema)\n tdb = tiledb.DenseArray(uri, \"w\")\n\n ar1 = np.random.randn(*tdb.schema.shape)\n ar2 = np.random.randn(*tdb.schema.shape)\n\n tdb[:] = {\"attr1\": ar1, \"attr2\": ar2}\n tdb = tiledb.DenseArray(uri, \"r\")\n\n # basic round-trip from dask.array\n d = da.from_tiledb(uri, attribute=\"attr2\")\n assert_eq(d, ar2)\n\n # smoke-test computation directly on the TileDB view\n d = da.from_tiledb(uri, attribute=\"attr2\")\n assert_eq(np.mean(ar2), d.mean().compute(scheduler=\"threads\"))\n\n\ndef test_blockview():\n x = da.arange(10, chunks=2)\n blockview = BlockView(x)\n assert x.blocks == blockview\n assert isinstance(blockview[0], da.Array)\n\n assert_eq(blockview[0], x[:2])\n assert_eq(blockview[-1], x[-2:])\n assert_eq(blockview[:3], x[:6])\n assert_eq(blockview[[0, 1, 2]], x[:6])\n assert_eq(blockview[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))\n assert_eq(blockview.shape, tuple(map(len, x.chunks)))\n assert_eq(blockview.size, np.prod(blockview.shape))\n assert_eq(\n blockview.ravel(), [blockview[idx] for idx in np.ndindex(blockview.shape)]\n )\n\n x = da.random.random((20, 20), chunks=(4, 5))\n blockview = BlockView(x)\n assert_eq(blockview[0], x[:4])\n assert_eq(blockview[0, :3], x[:4, :15])\n assert_eq(blockview[:, :3], x[:, :15])\n assert_eq(blockview.shape, tuple(map(len, x.chunks)))\n assert_eq(blockview.size, np.prod(blockview.shape))\n assert_eq(\n blockview.ravel(), [blockview[idx] for idx in np.ndindex(blockview.shape)]\n )\n\n x = da.ones((40, 40, 40), chunks=(10, 10, 10))\n blockview = BlockView(x)\n assert_eq(blockview[0, :, 0], np.ones((10, 40, 10)))\n assert_eq(blockview.shape, tuple(map(len, x.chunks)))\n assert_eq(blockview.size, np.prod(blockview.shape))\n assert_eq(\n blockview.ravel(), [blockview[idx] for idx in np.ndindex(blockview.shape)]\n )\n\n x = da.ones((2, 2), chunks=1)\n with pytest.raises(ValueError):\n blockview[[0, 1], [0, 1]]\n with pytest.raises(ValueError):\n blockview[np.array([0, 1]), [0, 1]]\n with pytest.raises(ValueError) as info:\n blockview[np.array([0, 1]), np.array([0, 1])]\n assert \"list\" in str(info.value)\n with pytest.raises(ValueError) as info:\n blockview[None, :, :]\n assert \"newaxis\" in str(info.value) and \"not supported\" in str(info.value)\n with pytest.raises(IndexError) as info:\n blockview[100, 100]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blocks_indexer_test_nbytes_auto.None_3.normalize_chunks_10B_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_blocks_indexer_test_nbytes_auto.None_3.normalize_chunks_10B_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4763, "end_line": 4961, "span_ids": ["test_3851", "test_blockwise_large_inputs_delayed", "test_map_blocks_large_inputs_delayed", "test_map_blocks_chunks", "test_partitions_indexer", "test_scipy_sparse_concatenate", "test_3925", "test_slice_reversed", "test_blocks_indexer", "test_nbytes_auto", "test_dask_array_holds_scipy_sparse_containers"], "tokens": 2111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blocks_indexer():\n x = da.arange(10, chunks=2)\n\n assert isinstance(x.blocks[0], da.Array)\n\n assert_eq(x.blocks[0], x[:2])\n assert_eq(x.blocks[-1], x[-2:])\n assert_eq(x.blocks[:3], x[:6])\n assert_eq(x.blocks[[0, 1, 2]], x[:6])\n assert_eq(x.blocks[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))\n\n x = da.random.random((20, 20), chunks=(4, 5))\n assert_eq(x.blocks[0], x[:4])\n assert_eq(x.blocks[0, :3], x[:4, :15])\n assert_eq(x.blocks[:, :3], x[:, :15])\n\n x = da.ones((40, 40, 40), chunks=(10, 10, 10))\n assert_eq(x.blocks[0, :, 0], np.ones((10, 40, 10)))\n\n x = da.ones((2, 2), chunks=1)\n with pytest.raises(ValueError):\n x.blocks[[0, 1], [0, 1]]\n with pytest.raises(ValueError):\n x.blocks[np.array([0, 1]), [0, 1]]\n with pytest.raises(ValueError) as info:\n x.blocks[np.array([0, 1]), np.array([0, 1])]\n assert \"list\" in str(info.value)\n with pytest.raises(ValueError) as info:\n x.blocks[None, :, :]\n assert \"newaxis\" in str(info.value) and \"not supported\" in str(info.value)\n with pytest.raises(IndexError) as info:\n x.blocks[100, 100]\n\n\ndef test_partitions_indexer():\n # .partitions is an alias of .blocks for dask arrays\n x = da.arange(10, chunks=2)\n\n assert isinstance(x.partitions[0], da.Array)\n\n assert_eq(x.partitions[0], x[:2])\n assert_eq(x.partitions[-1], x[-2:])\n assert_eq(x.partitions[:3], x[:6])\n assert_eq(x.partitions[[0, 1, 2]], x[:6])\n assert_eq(x.partitions[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))\n\n x = da.random.random((20, 20), chunks=(4, 5))\n assert_eq(x.partitions[0], x[:4])\n assert_eq(x.partitions[0, :3], x[:4, :15])\n assert_eq(x.partitions[:, :3], x[:, :15])\n\n x = da.ones((40, 40, 40), chunks=(10, 10, 10))\n assert_eq(x.partitions[0, :, 0], np.ones((10, 40, 10)))\n\n x = da.ones((2, 2), chunks=1)\n with pytest.raises(ValueError):\n x.partitions[[0, 1], [0, 1]]\n with pytest.raises(ValueError):\n x.partitions[np.array([0, 1]), [0, 1]]\n with pytest.raises(ValueError) as info:\n x.partitions[np.array([0, 1]), np.array([0, 1])]\n assert \"list\" in str(info.value)\n with pytest.raises(ValueError) as info:\n x.partitions[None, :, :]\n assert \"newaxis\" in str(info.value) and \"not supported\" in str(info.value)\n with pytest.raises(IndexError) as info:\n x.partitions[100, 100]\n\n\n@pytest.mark.filterwarnings(\"ignore:the matrix subclass:PendingDeprecationWarning\")\ndef test_dask_array_holds_scipy_sparse_containers():\n pytest.importorskip(\"scipy.sparse\")\n import scipy.sparse\n\n x = da.random.random((1000, 10), chunks=(100, 10))\n x[x < 0.9] = 0\n xx = x.compute()\n y = x.map_blocks(scipy.sparse.csr_matrix)\n\n vs = y.to_delayed().flatten().tolist()\n values = dask.compute(*vs, scheduler=\"single-threaded\")\n assert all(isinstance(v, scipy.sparse.csr_matrix) for v in values)\n\n yy = y.compute(scheduler=\"single-threaded\")\n assert isinstance(yy, scipy.sparse.spmatrix)\n assert (yy == xx).all()\n\n z = x.T.map_blocks(scipy.sparse.csr_matrix)\n zz = z.compute(scheduler=\"single-threaded\")\n assert isinstance(zz, scipy.sparse.spmatrix)\n assert (zz == xx.T).all()\n\n\n@pytest.mark.parametrize(\"axis\", [0, 1])\ndef test_scipy_sparse_concatenate(axis):\n pytest.importorskip(\"scipy.sparse\")\n import scipy.sparse\n\n rs = da.random.RandomState(RandomState=np.random.RandomState)\n\n xs = []\n ys = []\n for i in range(2):\n x = rs.random((1000, 10), chunks=(100, 10))\n x[x < 0.9] = 0\n xs.append(x)\n ys.append(x.map_blocks(scipy.sparse.csr_matrix))\n\n z = da.concatenate(ys, axis=axis)\n z = z.compute()\n\n if axis == 0:\n sp_concatenate = scipy.sparse.vstack\n elif axis == 1:\n sp_concatenate = scipy.sparse.hstack\n z_expected = sp_concatenate([scipy.sparse.csr_matrix(e.compute()) for e in xs])\n\n assert (z != z_expected).nnz == 0\n\n\ndef test_3851():\n with warnings.catch_warnings(record=True) as record:\n Y = da.random.random((10, 10), chunks=\"auto\")\n da.argmax(Y, axis=0).compute()\n assert not record\n\n\ndef test_3925():\n x = da.from_array(np.array([\"a\", \"b\", \"c\"], dtype=object), chunks=-1)\n assert (x[0] == x[0]).compute(scheduler=\"sync\")\n\n\ndef test_map_blocks_large_inputs_delayed():\n a = da.ones(10, chunks=(5,))\n b = np.ones(1000000)\n\n c = a.map_blocks(add, b)\n assert any(b is v for v in c.dask.values())\n assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence\n\n d = a.map_blocks(lambda x, y: x + y.sum(), y=b)\n assert_eq(d, d)\n assert any(b is v for v in d.dask.values())\n assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence\n\n\ndef test_blockwise_large_inputs_delayed():\n a = da.ones(10, chunks=(5,))\n b = np.ones(1000000)\n\n c = da.blockwise(add, \"i\", a, \"i\", b, None, dtype=a.dtype)\n assert any(b is v for v in c.dask.values())\n assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence\n\n d = da.blockwise(lambda x, y: x + y, \"i\", a, \"i\", y=b, dtype=a.dtype)\n assert any(b is v for v in d.dask.values())\n assert repr(dict(c.dask)).count(repr(b)[:10]) == 1 # only one occurrence\n\n\ndef test_slice_reversed():\n x = da.ones(10, chunks=-1)\n y = x[6:3]\n\n assert_eq(y, np.ones(0))\n\n\ndef test_map_blocks_chunks():\n x = da.arange(400, chunks=(100,))\n y = da.arange(40, chunks=(10,))\n\n def func(a, b):\n return np.array([a.max(), b.max()])\n\n assert_eq(\n da.map_blocks(func, x, y, chunks=(2,), dtype=x.dtype),\n np.array([99, 9, 199, 19, 299, 29, 399, 39]),\n )\n\n\ndef test_nbytes_auto():\n chunks = normalize_chunks(\"800B\", shape=(500,), dtype=\"float64\")\n assert chunks == ((100, 100, 100, 100, 100),)\n chunks = normalize_chunks(\"200B\", shape=(10, 10), dtype=\"float64\")\n assert chunks == ((5, 5), (5, 5))\n chunks = normalize_chunks((5, \"200B\"), shape=(10, 10), dtype=\"float64\")\n assert chunks == ((5, 5), (5, 5))\n chunks = normalize_chunks(\"33B\", shape=(10, 10), dtype=\"float64\")\n assert chunks == ((2, 2, 2, 2, 2), (2, 2, 2, 2, 2))\n chunks = normalize_chunks(\"1800B\", shape=(10, 20, 30), dtype=\"float64\")\n assert chunks == ((5, 5), (5, 5, 5, 5), (6, 6, 6, 6, 6))\n\n with pytest.raises(ValueError):\n normalize_chunks(\"10B\", shape=(10,), limit=20, dtype=\"float64\")\n with pytest.raises(ValueError):\n normalize_chunks(\"100B\", shape=(10, 10), limit=20, dtype=\"float64\")\n with pytest.raises(ValueError):\n normalize_chunks((\"100B\", \"10B\"), shape=(10, 10), dtype=\"float64\")\n with pytest.raises(ValueError):\n normalize_chunks((\"10B\", \"10B\"), shape=(10, 10), limit=20, dtype=\"float64\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_auto_chunks_h5py_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_auto_chunks_h5py_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 4964, "end_line": 5201, "span_ids": ["test_len_object_with_unknown_size", "test_compute_chunk_sizes_warning_fixes_rechunk", "test_compute_chunk_sizes_2d_array", "test_rechunk_auto", "test_compute_chunk_sizes_warning_fixes_concatenate", "test_compute_chunk_sizes", "unknown", "test_map_blocks_series", "test_compute_chunk_sizes_warning_fixes_to_svg", "_known", "test_no_warnings_from_blockwise", "test_compute_chunk_sizes_warning_fixes_slicing", "test_dask_layers", "test_compute_chunk_sizes_3d_array", "test_from_array_meta", "test_compute_chunk_sizes_warning_fixes_to_zarr", "test_chunk_assignment_invalidates_cached_properties", "test_map_blocks_dataframe", "test_compute_chunk_sizes_warning_fixes_reduction", "test_auto_chunks_h5py", "test_compute_chunk_sizes_warning_fixes_reshape"], "tokens": 2036}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_auto_chunks_h5py():\n h5py = pytest.importorskip(\"h5py\")\n\n with tmpfile(\".hdf5\") as fn:\n with h5py.File(fn, mode=\"a\") as f:\n d = f.create_dataset(\n \"/x\", shape=(1000, 1000), chunks=(32, 64), dtype=\"float64\"\n )\n d[:] = 1\n\n with h5py.File(fn, mode=\"a\") as f:\n d = f[\"x\"]\n with dask.config.set({\"array.chunk-size\": \"1 MiB\"}):\n x = da.from_array(d)\n assert isinstance(x._meta, np.ndarray)\n assert x.chunks == ((256, 256, 256, 232), (512, 488))\n\n\ndef test_no_warnings_from_blockwise():\n with warnings.catch_warnings(record=True) as record:\n x = da.ones((3, 10, 10), chunks=(3, 2, 2))\n da.map_blocks(lambda y: np.mean(y, axis=0), x, dtype=x.dtype, drop_axis=0)\n assert not record\n\n with warnings.catch_warnings(record=True) as record:\n x = da.ones((15, 15), chunks=(5, 5))\n (x.dot(x.T + 1) - x.mean(axis=0)).std()\n assert not record\n\n with warnings.catch_warnings(record=True) as record:\n x = da.ones((1,), chunks=(1,))\n 1 / x[0]\n assert not record\n\n\ndef test_from_array_meta():\n sparse = pytest.importorskip(\"sparse\")\n x = np.ones(10)\n meta = sparse.COO.from_numpy(x)\n y = da.from_array(x, meta=meta)\n assert isinstance(y._meta, sparse.COO)\n\n\ndef test_compute_chunk_sizes():\n x = da.from_array(np.linspace(-1, 1, num=50), chunks=10)\n y = x[x < 0]\n assert np.isnan(y.shape[0])\n assert y.chunks == ((np.nan,) * 5,)\n\n z = y.compute_chunk_sizes()\n assert y is z\n assert z.chunks == ((10, 10, 5, 0, 0),)\n assert len(z) == 25\n\n # check that dtype of chunk dimensions is `int`\n assert isinstance(z.chunks[0][0], int)\n\n\ndef test_compute_chunk_sizes_2d_array():\n X = np.linspace(-1, 1, num=9 * 4).reshape(9, 4)\n X = da.from_array(X, chunks=(3, 4))\n idx = X.sum(axis=1) > 0\n Y = X[idx]\n\n # This is very similar to the DataFrame->Array conversion\n assert np.isnan(Y.shape[0]) and Y.shape[1] == 4\n assert Y.chunks == ((np.nan, np.nan, np.nan), (4,))\n\n Z = Y.compute_chunk_sizes()\n assert Y is Z\n assert Z.chunks == ((0, 1, 3), (4,))\n assert Z.shape == (4, 4)\n\n\ndef test_compute_chunk_sizes_3d_array(N=8):\n X = np.linspace(-1, 2, num=8 * 8 * 8).reshape(8, 8, 8)\n X = da.from_array(X, chunks=(4, 4, 4))\n idx = X.sum(axis=0).sum(axis=0) > 0\n Y = X[idx]\n idx = X.sum(axis=1).sum(axis=1) < 0\n Y = Y[:, idx]\n idx = X.sum(axis=2).sum(axis=1) > 0.1\n Y = Y[:, :, idx]\n\n # Checking to make sure shapes are different on outputs\n assert Y.compute().shape == (8, 3, 5)\n assert X.compute().shape == (8, 8, 8)\n\n assert Y.chunks == ((np.nan, np.nan),) * 3\n assert all(np.isnan(s) for s in Y.shape)\n Z = Y.compute_chunk_sizes()\n assert Z is Y\n assert Z.shape == (8, 3, 5)\n assert Z.chunks == ((4, 4), (3, 0), (1, 4))\n\n\ndef _known(num=50):\n return da.from_array(np.linspace(-1, 1, num=num), chunks=10)\n\n\n@pytest.fixture()\ndef unknown():\n x = _known()\n y = x[x < 0]\n assert y.chunks == ((np.nan,) * 5,)\n return y\n\n\ndef test_compute_chunk_sizes_warning_fixes_rechunk(unknown):\n y = unknown\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n y.rechunk(\"auto\")\n y.compute_chunk_sizes()\n y.rechunk(\"auto\")\n\n\ndef test_compute_chunk_sizes_warning_fixes_to_zarr(unknown):\n pytest.importorskip(\"zarr\")\n y = unknown\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n with StringIO() as f:\n y.to_zarr(f)\n y.compute_chunk_sizes()\n\n with pytest.raises(ValueError, match=\"irregular chunking\"):\n with StringIO() as f:\n y.to_zarr(f)\n\n\ndef test_compute_chunk_sizes_warning_fixes_to_svg(unknown):\n y = unknown\n with pytest.raises(NotImplementedError, match=\"compute_chunk_sizes\"):\n y.to_svg()\n y.compute_chunk_sizes()\n y.to_svg()\n\n\ndef test_compute_chunk_sizes_warning_fixes_concatenate():\n x = _known(num=100).reshape(10, 10)\n idx = x.sum(axis=0) > 0\n y1 = x[idx]\n y2 = x[idx]\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n da.concatenate((y1, y2), axis=1)\n y1.compute_chunk_sizes()\n y2.compute_chunk_sizes()\n da.concatenate((y1, y2), axis=1)\n\n\ndef test_compute_chunk_sizes_warning_fixes_reduction(unknown):\n y = unknown\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n da.argmin(y)\n y.compute_chunk_sizes()\n da.argmin(y)\n\n\ndef test_compute_chunk_sizes_warning_fixes_reshape(unknown):\n y = unknown\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n da.reshape(y, (5, 5))\n y.compute_chunk_sizes()\n da.reshape(y, (5, 5))\n\n\ndef test_compute_chunk_sizes_warning_fixes_slicing():\n x = _known(num=100).reshape(10, 10)\n y = x[x.sum(axis=0) < 0]\n with pytest.raises(ValueError, match=\"compute_chunk_sizes\"):\n y[:3, :]\n y.compute_chunk_sizes()\n y[:3, :]\n\n\ndef test_rechunk_auto():\n x = da.ones(10, chunks=(1,))\n y = x.rechunk()\n\n assert y.npartitions == 1\n\n\ndef test_chunk_assignment_invalidates_cached_properties():\n x = da.ones((4,), chunks=(1,))\n y = x.copy()\n # change chunks directly, which should change all of the tested properties\n y._chunks = ((2, 2), (0, 0, 0, 0))\n assert not x.ndim == y.ndim\n assert not x.shape == y.shape\n assert not x.size == y.size\n assert not x.numblocks == y.numblocks\n assert not x.npartitions == y.npartitions\n assert not x.__dask_keys__() == y.__dask_keys__()\n assert not np.array_equal(x._key_array, y._key_array)\n\n\ndef test_map_blocks_series():\n pd = pytest.importorskip(\"pandas\")\n import dask.dataframe as dd\n from dask.dataframe.utils import assert_eq as dd_assert_eq\n\n x = da.ones(10, chunks=(5,))\n s = x.map_blocks(pd.Series)\n assert isinstance(s, dd.Series)\n assert s.npartitions == x.npartitions\n\n dd_assert_eq(s, s)\n\n\n@pytest.mark.xfail(reason=\"need to remove singleton index dimension\")\ndef test_map_blocks_dataframe():\n pd = pytest.importorskip(\"pandas\")\n import dask.dataframe as dd\n from dask.dataframe.utils import assert_eq as dd_assert_eq\n\n x = da.ones((10, 2), chunks=(5, 2))\n s = x.map_blocks(pd.DataFrame)\n assert isinstance(s, dd.DataFrame)\n assert s.npartitions == x.npartitions\n dd_assert_eq(s, s)\n\n\ndef test_dask_layers():\n a = da.ones(1)\n assert a.dask.layers.keys() == {a.name}\n assert a.dask.dependencies == {a.name: set()}\n assert a.__dask_layers__() == (a.name,)\n b = a + 1\n assert b.dask.layers.keys() == {a.name, b.name}\n assert b.dask.dependencies == {a.name: set(), b.name: {a.name}}\n assert b.__dask_layers__() == (b.name,)\n\n\ndef test_len_object_with_unknown_size():\n a = da.random.random(size=(20, 2))\n b = a[a < 0.5]\n with pytest.raises(ValueError, match=\"on object with unknown chunk size\"):\n assert len(b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_test_meta_from_array_literal_test_meta_from_array_type_inputs.assert_da_from_array_np_o": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_test_meta_from_array_literal_test_meta_from_array_type_inputs.assert_da_from_array_np_o", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_utils.py", "file_name": "test_array_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 51, "end_line": 78, "span_ids": ["test_meta_from_array_literal", "test_meta_from_array_type_inputs"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"meta\", [\"\", \"str\", \"\", \"str\", b\"\", b\"str\"])\n@pytest.mark.parametrize(\"dtype\", [None, \"bool\", \"int\", \"float\"])\ndef test_meta_from_array_literal(meta, dtype):\n if dtype is None:\n assert meta_from_array(meta, dtype=dtype).dtype.kind in \"SU\"\n else:\n assert (\n meta_from_array(meta, dtype=dtype).dtype == np.array([], dtype=dtype).dtype\n )\n\n\ndef test_meta_from_array_type_inputs():\n x = meta_from_array(np.ndarray, ndim=2, dtype=np.float32)\n assert isinstance(x, np.ndarray)\n assert x.ndim == 2\n assert x.dtype == np.float32\n\n x = da.Array(\n {(\"x\", 0, 0): (np.ones, (5, 5))},\n name=\"x\",\n chunks=(5, 5),\n shape=(5, 5),\n meta=np.ndarray,\n dtype=float,\n )\n assert_eq(x, x)\n\n assert da.from_array(np.ones(5).astype(np.int32), meta=np.ndarray).dtype == np.int32", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_test_assert_eq_checks_dtype_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_utils.py_test_assert_eq_checks_dtype_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_utils.py", "file_name": "test_array_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 81, "end_line": 118, "span_ids": ["test_assert_eq_checks_dtype", "test_assert_eq_scheduler"], "tokens": 304}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"a,b\",\n [\n (da.array([1]), 1.0),\n (da.array([1, 2]), [1.0, 2]),\n (da.array([1, 2]), np.array([1.0, 2])),\n ],\n)\ndef test_assert_eq_checks_dtype(a, b):\n with pytest.raises(AssertionError, match=\"a and b have different dtypes\"):\n assert_eq(a, b)\n\n\n@pytest.mark.parametrize(\n \"a,b\",\n [\n (1.0, 1.0),\n ([1, 2], [1, 2]),\n (da.array([1, 2]), da.array([1, 2])),\n ],\n)\ndef test_assert_eq_scheduler(a, b):\n counter = 0 # Counts how many times `custom_scheduler` is executed.\n\n def custom_scheduler(*args, **kwargs):\n nonlocal counter\n counter += 1\n return get_sync(*args, **kwargs)\n\n assert_eq(a, b)\n assert counter == 0\n\n assert_eq(a, b, scheduler=custom_scheduler)\n # `custom_scheduler` should be executed 2x the number of arrays.\n # Once in `persist` and once in `compute`\n n_da_arrays = len([x for x in [a, b] if isinstance(x, Array)]) * 2\n assert counter == n_da_arrays", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_linspace_test_linspace.None_11": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_linspace_test_linspace.None_11", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 112, "end_line": 168, "span_ids": ["test_linspace"], "tokens": 788}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"endpoint\", [True, False])\ndef test_linspace(endpoint):\n darr = da.linspace(6, 49, endpoint=endpoint, chunks=5)\n nparr = np.linspace(6, 49, endpoint=endpoint)\n assert_eq(darr, nparr)\n\n darr = da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13)\n nparr = np.linspace(1.4, 4.9, endpoint=endpoint, num=13)\n assert_eq(darr, nparr)\n\n darr = da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float)\n nparr = np.linspace(6, 49, endpoint=endpoint, dtype=float)\n assert_eq(darr, nparr)\n\n darr, dstep = da.linspace(6, 49, endpoint=endpoint, chunks=5, retstep=True)\n nparr, npstep = np.linspace(6, 49, endpoint=endpoint, retstep=True)\n assert np.allclose(dstep, npstep)\n assert_eq(darr, nparr)\n\n darr = da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13, dtype=int)\n nparr = np.linspace(1.4, 4.9, num=13, endpoint=endpoint, dtype=int)\n assert_eq(darr, nparr)\n assert sorted(\n da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13).dask\n ) == sorted(da.linspace(1.4, 4.9, endpoint=endpoint, chunks=5, num=13).dask)\n assert sorted(\n da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float).dask\n ) == sorted(da.linspace(6, 49, endpoint=endpoint, chunks=5, dtype=float).dask)\n\n x = da.array([0.2, 6.4, 3.0, 1.6])\n nparr = np.linspace(0, 2, 8, endpoint=endpoint)\n darr = da.linspace(da.argmin(x), da.argmax(x) + 1, 8, endpoint=endpoint)\n assert_eq(darr, nparr)\n\n nparr = np.linspace(0, 0, 0, endpoint=endpoint)\n darr = da.linspace(0, 0, 0, endpoint=endpoint)\n assert_eq(darr, nparr)\n\n nparr = np.linspace(1, 1, 0, endpoint=endpoint)\n darr = da.linspace(1, 1, 0, endpoint=endpoint)\n assert_eq(darr, nparr)\n\n nparr = np.linspace(1, 5, 0, endpoint=endpoint)\n darr = da.linspace(1, 5, 0, endpoint=endpoint)\n assert_eq(darr, nparr)\n\n nparr = np.linspace(0, 0, 1, endpoint=endpoint)\n darr = da.linspace(0, 0, 1, endpoint=endpoint)\n assert_eq(darr, nparr)\n\n nparr = np.linspace(1, 1, 1, endpoint=endpoint)\n darr = da.linspace(1, 1, 1, endpoint=endpoint)\n assert_eq(darr, nparr)\n\n nparr = np.linspace(1, 5, 1, endpoint=endpoint)\n darr = da.linspace(1, 5, 1, endpoint=endpoint)\n assert_eq(darr, nparr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_bad_input_test_diag_bad_input.with_pytest_raises_TypeEr.da_diag_v_k_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_bad_input_test_diag_bad_input.with_pytest_raises_TypeEr.da_diag_v_k_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 435, "end_line": 450, "span_ids": ["test_diag_bad_input"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"k\", [0, 3, -3, 8])\ndef test_diag_bad_input(k):\n # when input numpy array is neither 1d nor 2d:\n v = np.arange(2 * 3 * 4).reshape((2, 3, 4))\n with pytest.raises(ValueError, match=\"Array must be 1d or 2d only\"):\n da.diag(v, k)\n\n # when input dask array is neither 1d nor 2d:\n v = da.arange(2 * 3 * 4).reshape((2, 3, 4))\n with pytest.raises(ValueError, match=\"Array must be 1d or 2d only\"):\n da.diag(v, k)\n\n # when input is not an array:\n v = 1\n with pytest.raises(TypeError, match=\"v must be a dask array or numpy array\"):\n da.diag(v, k)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_2d_array_creation_test_diag_2d_array_creation.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_2d_array_creation_test_diag_2d_array_creation.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 453, "end_line": 475, "span_ids": ["test_diag_2d_array_creation"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"k\", [0, 3, -3, 8])\ndef test_diag_2d_array_creation(k):\n # when input 1d-array is a numpy array:\n v = np.arange(11)\n assert_eq(da.diag(v, k), np.diag(v, k))\n\n # when input 1d-array is a dask array:\n v = da.arange(11, chunks=3)\n darr = da.diag(v, k)\n nparr = np.diag(v, k)\n assert_eq(darr, nparr)\n assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask)\n\n v = v + v + 3\n darr = da.diag(v, k)\n nparr = np.diag(v, k)\n assert_eq(darr, nparr)\n\n v = da.arange(11, chunks=11)\n darr = da.diag(v, k)\n nparr = np.diag(v, k)\n assert_eq(darr, nparr)\n assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_extraction_test_diag_extraction.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_diag_extraction_test_diag_extraction.None_5", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 478, "end_line": 498, "span_ids": ["test_diag_extraction"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"k\", [0, 3, -3, 8])\ndef test_diag_extraction(k):\n # when input 2d-array is a square numpy array:\n x = np.arange(64).reshape((8, 8))\n assert_eq(da.diag(x, k), np.diag(x, k))\n # when input 2d-array is a square dask array:\n d = da.from_array(x, chunks=(4, 4))\n assert_eq(da.diag(d, k), np.diag(x, k))\n # heterogeneous chunks:\n d = da.from_array(x, chunks=((3, 2, 3), (4, 1, 2, 1)))\n assert_eq(da.diag(d, k), np.diag(x, k))\n\n # when input 2d-array is a rectangular numpy array:\n y = np.arange(5 * 8).reshape((5, 8))\n assert_eq(da.diag(y, k), np.diag(y, k))\n # when input 2d-array is a rectangular dask array:\n d = da.from_array(y, chunks=(4, 4))\n assert_eq(da.diag(d, k), np.diag(y, k))\n # heterogeneous chunks:\n d = da.from_array(y, chunks=((3, 2), (4, 1, 2, 1)))\n assert_eq(da.diag(d, k), np.diag(y, k))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_udf_test_pad_udf.assert_eq_np_r_da_r_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_pad_udf_test_pad_udf.assert_eq_np_r_da_r_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 839, "end_line": 858, "span_ids": ["test_pad_udf"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"kwargs\", [{}, {\"scaler\": 2}])\ndef test_pad_udf(kwargs):\n def udf_pad(vector, pad_width, iaxis, inner_kwargs):\n assert kwargs == inner_kwargs\n scaler = inner_kwargs.get(\"scaler\", 1)\n vector[: pad_width[0]] = -scaler * pad_width[0]\n vector[-pad_width[1] :] = scaler * pad_width[1]\n return vector\n\n shape = (10, 11)\n chunks = (4, 5)\n pad_width = ((1, 2), (2, 3))\n\n np_a = np.random.random(shape)\n da_a = da.from_array(np_a, chunks=chunks)\n\n np_r = np.pad(np_a, pad_width, udf_pad, **kwargs)\n da_r = da.pad(da_a, pad_width, udf_pad, **kwargs)\n\n assert_eq(np_r, da_r)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_auto_chunks_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_creation.py_test_auto_chunks_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "file_type": "text/x-python", "category": "test", "start_line": 861, "end_line": 877, "span_ids": ["test_diagonal_zero_chunks", "test_auto_chunks"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_auto_chunks():\n with dask.config.set({\"array.chunk-size\": \"50 MiB\"}):\n x = da.ones((10000, 10000))\n assert 4 < x.npartitions < 32\n\n\ndef test_diagonal_zero_chunks():\n x = da.ones((8, 8), chunks=(4, 4))\n dd = da.ones((8, 8), chunks=(4, 4))\n d = da.diagonal(dd)\n\n expected = np.ones((8,))\n assert_eq(d, expected)\n assert_eq(d + d, 2 * expected)\n A = d + x\n assert_eq(A, np.full((8, 8), 2.0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_1d_test_setitem_extended_API_0d.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_1d_test_setitem_extended_API_0d.None_1", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 265, "end_line": 302, "span_ids": ["test_setitem_1d", "test_setitem_2d", "test_setitem_extended_API_0d"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setitem_1d():\n x = cupy.arange(10)\n dx = da.from_array(x.copy(), chunks=(5,))\n\n x[x > 6] = -1\n x[x % 2 == 0] = -2\n\n dx[dx > 6] = -1\n dx[dx % 2 == 0] = -2\n\n assert_eq(x, dx)\n\n\ndef test_setitem_2d():\n x = cupy.arange(24).reshape((4, 6))\n dx = da.from_array(x.copy(), chunks=(2, 2))\n\n x[x > 6] = -1\n x[x % 2 == 0] = -2\n\n dx[dx > 6] = -1\n dx[dx % 2 == 0] = -2\n\n assert_eq(x, dx)\n\n\ndef test_setitem_extended_API_0d():\n # 0-d array\n x = cupy.array(9)\n dx = da.from_array(x.copy())\n\n x[()] = -1\n dx[()] = -1\n assert_eq(x, dx.compute())\n\n x[...] = -11\n dx[...] = -11\n assert_eq(x, dx.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_1d_test_setitem_extended_API_1d.assert_eq_x_dx_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_core.py_test_setitem_extended_API_1d_test_setitem_extended_API_1d.assert_eq_x_dx_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_core.py", "file_name": "test_cupy_core.py", "file_type": "text/x-python", "category": "test", "start_line": 305, "end_line": 344, "span_ids": ["test_setitem_extended_API_1d"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"index, value\",\n [\n [Ellipsis, -1],\n [slice(2, 8, 2), -2],\n [slice(8, None, 2), -3],\n pytest.param(\n slice(8, None, 2),\n [-30],\n marks=pytest.mark.skip(reason=\"Unsupported assigning `list` to CuPy array\"),\n ),\n [slice(1, None, -2), -4],\n pytest.param(\n slice(1, None, -2),\n [-40],\n marks=pytest.mark.skip(reason=\"Unsupported assigning `list` to CuPy array\"),\n ),\n [slice(3, None, 2), -5],\n [slice(-3, None, -2), -6],\n [slice(1, None, -2), -4],\n [slice(3, None, 2), -5],\n pytest.param(\n slice(3, None, 2),\n [10, 11, 12, 13],\n marks=pytest.mark.skip(reason=\"Unsupported assigning `list` to CuPy array\"),\n ),\n pytest.param(\n slice(-4, None, -2),\n [14, 15, 16, 17],\n marks=pytest.mark.skip(reason=\"Unsupported assigning `list` to CuPy array\"),\n ),\n ],\n)\ndef test_setitem_extended_API_1d(index, value):\n # 1-d array\n x = cupy.arange(10)\n dx = da.from_array(x, chunks=(4, 6))\n dx[index] = value\n x[index] = value\n assert_eq(x, dx.compute())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_nep35_test_index_with_int_dask_array_nep35.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_slicing.py_test_index_with_int_dask_array_nep35_test_index_with_int_dask_array_nep35.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_slicing.py", "file_name": "test_cupy_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 34, "end_line": 66, "span_ids": ["test_index_with_int_dask_array_nep35"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\"idx_chunks\", [None, 3, 2, 1])\n@pytest.mark.parametrize(\"x_chunks\", [(3, 5), (2, 3), (1, 2), (1, 1)])\ndef test_index_with_int_dask_array_nep35(x_chunks, idx_chunks):\n # test data is crafted to stress use cases:\n # - pick from different chunks of x out of order\n # - a chunk of x contains no matches\n # - only one chunk of x\n x = cupy.array(\n [[10, 20, 30, 40, 50], [60, 70, 80, 90, 100], [110, 120, 130, 140, 150]]\n )\n orig_idx = np.array([3, 0, 1])\n expect = cupy.array([[40, 10, 20], [90, 60, 70], [140, 110, 120]])\n\n if x_chunks is not None:\n x = da.from_array(x, chunks=x_chunks)\n if idx_chunks is not None:\n idx = da.from_array(orig_idx, chunks=idx_chunks)\n else:\n idx = orig_idx\n\n assert_eq(x[:, idx], expect)\n assert_eq(x.T[idx, :], expect.T)\n\n # CuPy index\n orig_idx = cupy.array(orig_idx)\n if idx_chunks is not None:\n idx = da.from_array(orig_idx, chunks=idx_chunks)\n else:\n idx = orig_idx\n\n assert_eq(x[:, idx], expect)\n assert_eq(x.T[idx, :], expect.T)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_shapes__check_lu_result.assert_eq_u_da_triu_u_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_linalg.py_test_svd_compressed_shapes__check_lu_result.assert_eq_u_da_triu_u_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_linalg.py", "file_name": "test_linalg.py", "file_type": "text/x-python", "category": "test", "start_line": 525, "end_line": 544, "span_ids": ["test_svd_compressed_shapes", "_check_lu_result"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"m\", [5, 10, 15, 20])\n@pytest.mark.parametrize(\"n\", [5, 10, 15, 20])\n@pytest.mark.parametrize(\"k\", [5])\n@pytest.mark.parametrize(\"chunks\", [(5, 10), (10, 5)])\ndef test_svd_compressed_shapes(m, n, k, chunks):\n x = da.random.random(size=(m, n), chunks=chunks)\n u, s, v = svd_compressed(x, k, n_power_iter=1, compute=True, seed=1)\n u, s, v = da.compute(u, s, v)\n r = min(m, n, k)\n assert u.shape == (m, r)\n assert s.shape == (r,)\n assert v.shape == (r, n)\n\n\ndef _check_lu_result(p, l, u, A):\n assert np.allclose(p.dot(l).dot(u), A)\n\n # check triangulars\n assert_eq(l, da.tril(l), check_graph=False)\n assert_eq(u, da.triu(u), check_graph=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_escapes_to_map_blocks_when_depth_is_zero_test_map_overlap_no_depth.assert_eq_y_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_map_overlap_escapes_to_map_blocks_when_depth_is_zero_test_map_overlap_no_depth.assert_eq_y_x_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 323, "end_line": 336, "span_ids": ["test_map_overlap_escapes_to_map_blocks_when_depth_is_zero", "test_map_overlap_no_depth"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_map_overlap_escapes_to_map_blocks_when_depth_is_zero():\n x = da.arange(10, chunks=5)\n y = x.map_overlap(lambda x: x + 1, depth=0, boundary=\"none\")\n assert len(y.dask) == 2 * x.numblocks[0] # depth=0 --> map_blocks\n assert_eq(y, np.arange(10) + 1)\n\n\n@pytest.mark.parametrize(\n \"boundary\", [None, \"reflect\", \"periodic\", \"nearest\", \"none\", 0]\n)\ndef test_map_overlap_no_depth(boundary):\n x = da.arange(10, chunks=5)\n y = x.map_overlap(lambda i: i, depth=0, boundary=boundary, dtype=x.dtype)\n assert_eq(y, x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_different_depths_and_boundary_combinations_test_different_depths_and_boundary_combinations.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_different_depths_and_boundary_combinations_test_different_depths_and_boundary_combinations.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 560, "end_line": 588, "span_ids": ["test_different_depths_and_boundary_combinations"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"depth\",\n [\n {0: 0, 1: 0}, # depth all zeros\n {0: 4, 1: 0}, # depth with some zeros\n {0: 5, 1: 5}, # depth equal to boundary length\n {0: 8, 1: 7}, # depth greater than boundary length\n ],\n)\ndef test_different_depths_and_boundary_combinations(depth):\n expected = np.arange(100).reshape(10, 10)\n darr = da.from_array(expected, chunks=(5, 2))\n\n reflected = overlap(darr, depth=depth, boundary=\"reflect\")\n nearest = overlap(darr, depth=depth, boundary=\"nearest\")\n periodic = overlap(darr, depth=depth, boundary=\"periodic\")\n constant = overlap(darr, depth=depth, boundary=42)\n\n result = trim_internal(reflected, depth, boundary=\"reflect\")\n assert_array_equal(result, expected)\n\n result = trim_internal(nearest, depth, boundary=\"nearest\")\n assert_array_equal(result, expected)\n\n result = trim_internal(periodic, depth, boundary=\"periodic\")\n assert_array_equal(result, expected)\n\n result = trim_internal(constant, depth, boundary=42)\n assert_array_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_boundary_test_trim_boundary.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_overlap.py_test_trim_boundary_test_trim_boundary.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "file_type": "text/x-python", "category": "test", "start_line": 710, "end_line": 725, "span_ids": ["test_trim_boundary"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"boundary\", [\"reflect\", \"periodic\", \"nearest\", \"none\"])\ndef test_trim_boundary(boundary):\n x = da.from_array(np.arange(24).reshape(4, 6), chunks=(2, 3))\n x_overlaped = da.overlap.overlap(x, 2, boundary={0: \"reflect\", 1: boundary})\n x_trimmed = da.overlap.trim_overlap(\n x_overlaped, 2, boundary={0: \"reflect\", 1: boundary}\n )\n assert np.all(x == x_trimmed)\n\n x_overlaped = da.overlap.overlap(x, 2, boundary={1: boundary})\n x_trimmed = da.overlap.trim_overlap(x_overlaped, 2, boundary={1: boundary})\n assert np.all(x == x_trimmed)\n\n x_overlaped = da.overlap.overlap(x, 2, boundary=boundary)\n x_trimmed = da.overlap.trim_overlap(x_overlaped, 2, boundary=boundary)\n assert np.all(x == x_trimmed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_internals_1_test_rechunk_internals_1.assert_i1d_1_answer6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_internals_1_test_rechunk_internals_1.assert_i1d_1_answer6", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 26, "end_line": 92, "span_ids": ["test_rechunk_internals_1"], "tokens": 680}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_internals_1():\n \"\"\"Test the cumdims_label and _breakpoints and\n _intersect_1d internal funcs to rechunk.\"\"\"\n new = cumdims_label(((1, 1, 2), (1, 5, 1)), \"n\")\n old = cumdims_label(((4,), (1,) * 5), \"o\")\n breaks = tuple(_breakpoints(o, n) for o, n in zip(old, new))\n answer = ((\"o\", 0), (\"n\", 0), (\"n\", 1), (\"n\", 2), (\"o\", 4), (\"n\", 4))\n assert breaks[0] == answer\n answer2 = (\n (\"o\", 0),\n (\"n\", 0),\n (\"o\", 1),\n (\"n\", 1),\n (\"o\", 2),\n (\"o\", 3),\n (\"o\", 4),\n (\"o\", 5),\n (\"n\", 6),\n (\"n\", 7),\n )\n assert breaks[1] == answer2\n i1d = [_intersect_1d(b) for b in breaks]\n answer3 = [[(0, slice(0, 1))], [(0, slice(1, 2))], [(0, slice(2, 4))]]\n assert i1d[0] == answer3\n answer4 = [\n [(0, slice(0, 1))],\n [\n (1, slice(0, 1)),\n (2, slice(0, 1)),\n (3, slice(0, 1)),\n (4, slice(0, 1)),\n (5, slice(0, 1)),\n ],\n [(5, slice(1, 2))],\n ]\n assert i1d[1] == answer4\n\n new = cumdims_label(((1, 1, 2), (1, 5, 1, 0)), \"n\")\n breaks = tuple(_breakpoints(o, n) for o, n in zip(old, new))\n answer5 = (\n (\"o\", 0),\n (\"n\", 0),\n (\"o\", 1),\n (\"n\", 1),\n (\"o\", 2),\n (\"o\", 3),\n (\"o\", 4),\n (\"o\", 5),\n (\"n\", 6),\n (\"n\", 7),\n (\"n\", 7),\n )\n assert breaks[1] == answer5\n i1d = [_intersect_1d(b) for b in breaks]\n answer6 = [\n [(0, slice(0, 1))],\n [\n (1, slice(0, 1)),\n (2, slice(0, 1)),\n (3, slice(0, 1)),\n (4, slice(0, 1)),\n (5, slice(0, 1)),\n ],\n [(5, slice(1, 2))],\n [(5, slice(2, 2))],\n ]\n assert i1d[1] == answer6", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_split_into_n_chunks_test_balance_split_into_n_chunks.for_N_in_array_lens_.for_nchunks_in_range_1_2.assert_len_y_chunks_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_balance_split_into_n_chunks_test_balance_split_into_n_chunks.for_N_in_array_lens_.for_nchunks_in_range_1_2.assert_len_y_chunks_0_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 915, "end_line": 938, "span_ids": ["test_balance_split_into_n_chunks"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_balance_split_into_n_chunks():\n # Some prime numbers around 1000\n array_lens = [\n 991,\n 997,\n 1009,\n 1013,\n 1019,\n 1021,\n 1031,\n 1033,\n 1039,\n 1049,\n 1051,\n 1061,\n 1063,\n 1069,\n ]\n\n for N in array_lens:\n for nchunks in range(1, 20):\n x = da.from_array(np.random.uniform(size=N))\n y = x.rechunk(chunks=len(x) // nchunks, balance=True)\n assert len(y.chunks[0]) == nchunks", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_zero_test_rechunk_with_zero.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_rechunk_with_zero_test_rechunk_with_zero.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 941, "end_line": 949, "span_ids": ["test_rechunk_with_zero"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rechunk_with_zero():\n a = da.ones((8, 8), chunks=(4, 4))\n result = a.rechunk(((4, 4), (4, 0, 0, 4)))\n expected = da.ones((8, 8), chunks=((4, 4), (4, 0, 0, 4)))\n\n # reverse:\n a, expected = expected, a\n result = a.rechunk((4, 4))\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_chunks_with_nonzero_test_intersect_chunks_with_nonzero.assert_result_expected": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_chunks_with_nonzero_test_intersect_chunks_with_nonzero.assert_result_expected", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 952, "end_line": 968, "span_ids": ["test_intersect_chunks_with_nonzero"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_chunks_with_nonzero():\n from dask.array.rechunk import intersect_chunks\n\n old = ((4, 4), (2,))\n new = ((8,), (1, 1))\n result = list(intersect_chunks(old, new))\n expected = [\n (\n ((0, slice(0, 4, None)), (0, slice(0, 1, None))),\n ((1, slice(0, 4, None)), (0, slice(0, 1, None))),\n ),\n (\n ((0, slice(0, 4, None)), (0, slice(1, 2, None))),\n ((1, slice(0, 4, None)), (0, slice(1, 2, None))),\n ),\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_chunks_with_zero_test_intersect_chunks_with_zero.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_chunks_with_zero_test_intersect_chunks_with_zero.None_2", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 971, "end_line": 1024, "span_ids": ["test_intersect_chunks_with_zero"], "tokens": 770}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_chunks_with_zero():\n from dask.array.rechunk import intersect_chunks\n\n old = ((4, 4), (2,))\n new = ((4, 0, 0, 4), (1, 1))\n result = list(intersect_chunks(old, new))\n\n expected = [\n (((0, slice(0, 4, None)), (0, slice(0, 1, None))),),\n (((0, slice(0, 4, None)), (0, slice(1, 2, None))),),\n (((1, slice(0, 0, None)), (0, slice(0, 1, None))),),\n (((1, slice(0, 0, None)), (0, slice(1, 2, None))),),\n (((1, slice(0, 0, None)), (0, slice(0, 1, None))),),\n (((1, slice(0, 0, None)), (0, slice(1, 2, None))),),\n (((1, slice(0, 4, None)), (0, slice(0, 1, None))),),\n (((1, slice(0, 4, None)), (0, slice(1, 2, None))),),\n ]\n\n assert result == expected\n\n old = ((4, 0, 0, 4), (1, 1))\n new = ((4, 4), (2,))\n result = list(intersect_chunks(old, new))\n\n expected = [\n (\n ((0, slice(0, 4, None)), (0, slice(0, 1, None))),\n ((0, slice(0, 4, None)), (1, slice(0, 1, None))),\n ),\n (\n ((3, slice(0, 4, None)), (0, slice(0, 1, None))),\n ((3, slice(0, 4, None)), (1, slice(0, 1, None))),\n ),\n ]\n\n assert result == expected\n\n old = ((4, 4), (2,))\n new = ((2, 0, 0, 2, 4), (1, 1))\n result = list(intersect_chunks(old, new))\n expected = [\n (((0, slice(0, 2, None)), (0, slice(0, 1, None))),),\n (((0, slice(0, 2, None)), (0, slice(1, 2, None))),),\n (((0, slice(2, 2, None)), (0, slice(0, 1, None))),),\n (((0, slice(2, 2, None)), (0, slice(1, 2, None))),),\n (((0, slice(2, 2, None)), (0, slice(0, 1, None))),),\n (((0, slice(2, 2, None)), (0, slice(1, 2, None))),),\n (((0, slice(2, 4, None)), (0, slice(0, 1, None))),),\n (((0, slice(2, 4, None)), (0, slice(1, 2, None))),),\n (((1, slice(0, 4, None)), (0, slice(0, 1, None))),),\n (((1, slice(0, 4, None)), (0, slice(1, 2, None))),),\n ]\n\n assert result == expected\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_chunks_with_zero.old_12_test_intersect_chunks_with_zero.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_intersect_chunks_with_zero.old_12_test_intersect_chunks_with_zero.None_3", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 1026, "end_line": 1040, "span_ids": ["test_intersect_chunks_with_zero"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_intersect_chunks_with_zero():\n # ... other code\n\n old = ((4, 4), (2,))\n new = ((0, 0, 4, 4), (1, 1))\n result = list(intersect_chunks(old, new))\n expected = [\n (((0, slice(0, 0, None)), (0, slice(0, 1, None))),),\n (((0, slice(0, 0, None)), (0, slice(1, 2, None))),),\n (((0, slice(0, 0, None)), (0, slice(0, 1, None))),),\n (((0, slice(0, 0, None)), (0, slice(1, 2, None))),),\n (((0, slice(0, 4, None)), (0, slice(0, 1, None))),),\n (((0, slice(0, 4, None)), (0, slice(1, 2, None))),),\n (((1, slice(0, 4, None)), (0, slice(0, 1, None))),),\n (((1, slice(0, 4, None)), (0, slice(1, 2, None))),),\n ]\n\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_with_zero_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_rechunk.py_test_old_to_new_with_zero_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_rechunk.py", "file_name": "test_rechunk.py", "file_type": "text/x-python", "category": "test", "start_line": 1043, "end_line": 1065, "span_ids": ["test_old_to_new_with_zero"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_old_to_new_with_zero():\n from dask.array.rechunk import _old_to_new\n\n old = ((4, 4),)\n new = ((4, 0, 4),)\n result = _old_to_new(old, new)\n expected = [[[(0, slice(0, 4))], [(1, slice(0, 0))], [(1, slice(0, 4))]]]\n assert result == expected\n\n old = ((4,),)\n new = ((4, 0),)\n result = _old_to_new(old, new)\n expected = [[[(0, slice(0, 4))], [(0, slice(4, 4))]]]\n assert result == expected\n\n old = ((4, 0, 4),)\n new = ((4, 0, 2, 2),)\n result = _old_to_new(old, new)\n expected = [\n [[(0, slice(0, 4))], [(2, slice(0, 0))], [(2, slice(0, 2))], [(2, slice(2, 4))]]\n ]\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_mean_func_does_not_warn_test_nan_func_does_not_warn._did_not_warn": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_mean_func_does_not_warn_test_nan_func_does_not_warn._did_not_warn", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 772, "end_line": 791, "span_ids": ["test_mean_func_does_not_warn", "test_nan_func_does_not_warn"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mean_func_does_not_warn():\n # non-regression test for https://github.com/pydata/xarray/issues/5151\n xr = pytest.importorskip(\"xarray\")\n a = xr.DataArray(da.from_array(np.full((10, 10), np.nan)))\n\n with warnings.catch_warnings(record=True) as rec:\n a.mean().compute()\n assert not rec # did not warn\n\n\n@pytest.mark.parametrize(\"func\", [\"nanvar\", \"nanstd\"])\ndef test_nan_func_does_not_warn(func):\n # non-regression test for #6105\n x = np.ones((10,)) * np.nan\n x[0] = 1\n x[1] = 2\n d = da.from_array(x, chunks=2)\n with warnings.catch_warnings(record=True) as rec:\n getattr(da, func)(d).compute()\n assert not rec # did not warn", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_chunk_structure_independence_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_chunk_structure_independence_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 814, "end_line": 836, "span_ids": ["test_chunk_structure_independence"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"chunks\", list(permutations(((2, 1) * 8, (3,) * 8, (6,) * 4))))\n@pytest.mark.parametrize(\"split_every\", [2, 4])\n@pytest.mark.parametrize(\n \"axes\", list(permutations((0, 1, 2), 2)) + list(permutations((0, 1, 2)))\n)\ndef test_chunk_structure_independence(axes, split_every, chunks):\n # Reducing an array should not depend on its chunk-structure!!!\n # See Issue #8541: https://github.com/dask/dask/issues/8541\n shape = tuple(np.sum(s) for s in chunks)\n np_array = np.arange(np.prod(shape)).reshape(*shape)\n x = da.from_array(np_array, chunks=chunks)\n reduced_x = da.reduction(\n x,\n lambda x, axis, keepdims: x,\n lambda x, axis, keepdims: x,\n keepdims=True,\n axis=axes,\n split_every=split_every,\n dtype=x.dtype,\n meta=x._meta,\n )\n _assert_eq(reduced_x, np_array, check_chunks=False, check_shape=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_roll_always_results_in_a_new_array_test_union1d.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_roll_always_results_in_a_new_array_test_union1d.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1332, "end_line": 1367, "span_ids": ["test_shape_and_ndim", "test_union1d", "test_roll_always_results_in_a_new_array"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_roll_always_results_in_a_new_array():\n x = da.arange(2, 3)\n y = da.roll(x, 1)\n assert y is not x\n\n\n@pytest.mark.parametrize(\"shape\", [(10,), (5, 10), (5, 10, 10)])\ndef test_shape_and_ndim(shape):\n x = da.random.random(shape)\n assert np.shape(x) == shape\n\n x = da.random.random(shape)\n assert np.ndim(x) == len(shape)\n\n\n@pytest.mark.parametrize(\n \"shape\", [((12,), (12,)), ((4, 3), (3, 4)), ((12,), (1, 6, 2))]\n)\n@pytest.mark.parametrize(\"reverse\", [True, False])\ndef test_union1d(shape, reverse):\n s1, s2 = shape\n x1 = np.arange(12).reshape(s1)\n x2 = np.arange(6, 18).reshape(s2)\n\n if reverse:\n x1 = x1[::-1]\n\n dx1 = da.from_array(x1)\n dx2 = da.from_array(x2)\n\n result = np.union1d(dx1, dx2)\n expected = np.union1d(x1, x2)\n\n assert isinstance(result, da.Array)\n\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_expand_dims_test_expand_dims.if_axis_is_None_.else_.assert_same_keys_d_e_da_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_expand_dims_test_expand_dims.if_axis_is_None_.else_.assert_same_keys_d_e_da_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 1423, "end_line": 1439, "span_ids": ["test_expand_dims"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"axis\", [None, 0, 1, -1, (0, 1), (0, 2), (1, 2), 2])\ndef test_expand_dims(axis):\n a = np.arange(10)\n d = da.from_array(a, chunks=(3,))\n\n if axis is None:\n with pytest.raises(TypeError):\n da.expand_dims(d, axis=axis)\n elif axis == 2:\n with pytest.raises(AxisError):\n da.expand_dims(d, axis=axis)\n else:\n a_e = np.expand_dims(a, axis=axis)\n d_e = da.expand_dims(d, axis=axis)\n\n assert_eq(d_e, a_e)\n assert same_keys(d_e, da.expand_dims(d, axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh4043_test_gh4043.assert_eq_al_al_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_gh4043_test_gh4043.assert_eq_al_al_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1026, "end_line": 1033, "span_ids": ["test_gh4043"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"lock\", [True, False])\n@pytest.mark.parametrize(\"asarray\", [True, False])\n@pytest.mark.parametrize(\"fancy\", [True, False])\ndef test_gh4043(lock, asarray, fancy):\n a1 = da.from_array(np.zeros(3), chunks=1, asarray=asarray, lock=lock, fancy=fancy)\n a2 = da.from_array(np.ones(3), chunks=1, asarray=asarray, lock=lock, fancy=fancy)\n al = da.stack([a1, a2])\n assert_eq(al, al)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_array_3d_with_bool_numpy_array_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_slicing.py_test_slice_array_3d_with_bool_numpy_array_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_slicing.py", "file_name": "test_slicing.py", "file_type": "text/x-python", "category": "test", "start_line": 1036, "end_line": 1050, "span_ids": ["test_slice_array_null_dimension", "test_slice_array_3d_with_bool_numpy_array"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_slice_array_3d_with_bool_numpy_array():\n # https://github.com/dask/dask/issues/6089\n array = da.arange(0, 24).reshape((4, 3, 2))\n mask = np.arange(0, 24).reshape((4, 3, 2)) > 12\n\n actual = array[mask].compute()\n expected = np.arange(13, 24)\n assert_eq(actual, expected)\n\n\ndef test_slice_array_null_dimension():\n array = da.from_array(np.zeros((3, 0)))\n expected = np.zeros((3, 0))[[0]]\n assert_eq(array[[0]], expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_assert_eq_assert_eq.if_str_adt_str_bdt_.raise_AssertionError_f_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_assert_eq_assert_eq.if_str_adt_str_bdt_.raise_AssertionError_f_a_", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 267, "end_line": 302, "span_ids": ["assert_eq"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_eq(\n a,\n b,\n check_shape=True,\n check_graph=True,\n check_meta=True,\n check_chunks=True,\n check_type=True,\n scheduler=\"sync\",\n **kwargs,\n):\n a_original = a\n b_original = b\n\n if isinstance(a, (list, int, float)):\n a = np.array(a)\n if isinstance(b, (list, int, float)):\n b = np.array(b)\n\n a, adt, a_meta, a_computed = _get_dt_meta_computed(\n a,\n check_shape=check_shape,\n check_graph=check_graph,\n check_chunks=check_chunks,\n scheduler=scheduler,\n )\n b, bdt, b_meta, b_computed = _get_dt_meta_computed(\n b,\n check_shape=check_shape,\n check_graph=check_graph,\n check_chunks=check_chunks,\n scheduler=scheduler,\n )\n\n if str(adt) != str(bdt):\n raise AssertionError(f\"a and b have different dtypes: (a: {adt}, b: {bdt})\")\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_assert_eq.try__assert_eq.return.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/utils.py_assert_eq.try__assert_eq.return.True", "embedding": null, "metadata": {"file_path": "dask/array/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 304, "end_line": 366, "span_ids": ["assert_eq"], "tokens": 665}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assert_eq(\n a,\n b,\n check_shape=True,\n check_graph=True,\n check_meta=True,\n check_chunks=True,\n check_type=True,\n scheduler=\"sync\",\n **kwargs,\n):\n # ... other code\n\n try:\n assert (\n a.shape == b.shape\n ), f\"a and b have different shapes (a: {a.shape}, b: {b.shape})\"\n if check_type:\n _a = a if a.shape else a.item()\n _b = b if b.shape else b.item()\n assert type(_a) == type(\n _b\n ), f\"a and b have different types (a: {type(_a)}, b: {type(_b)})\"\n if check_meta:\n if hasattr(a, \"_meta\") and hasattr(b, \"_meta\"):\n assert_eq(a._meta, b._meta)\n if hasattr(a_original, \"_meta\"):\n msg = (\n f\"compute()-ing 'a' changes its number of dimensions \"\n f\"(before: {a_original._meta.ndim}, after: {a.ndim})\"\n )\n assert a_original._meta.ndim == a.ndim, msg\n if a_meta is not None:\n msg = (\n f\"compute()-ing 'a' changes its type \"\n f\"(before: {type(a_original._meta)}, after: {type(a_meta)})\"\n )\n assert type(a_original._meta) == type(a_meta), msg\n if not (np.isscalar(a_meta) or np.isscalar(a_computed)):\n msg = (\n f\"compute()-ing 'a' results in a different type than implied by its metadata \"\n f\"(meta: {type(a_meta)}, computed: {type(a_computed)})\"\n )\n assert type(a_meta) == type(a_computed), msg\n if hasattr(b_original, \"_meta\"):\n msg = (\n f\"compute()-ing 'b' changes its number of dimensions \"\n f\"(before: {b_original._meta.ndim}, after: {b.ndim})\"\n )\n assert b_original._meta.ndim == b.ndim, msg\n if b_meta is not None:\n msg = (\n f\"compute()-ing 'b' changes its type \"\n f\"(before: {type(b_original._meta)}, after: {type(b_meta)})\"\n )\n assert type(b_original._meta) == type(b_meta), msg\n if not (np.isscalar(b_meta) or np.isscalar(b_computed)):\n msg = (\n f\"compute()-ing 'b' results in a different type than implied by its metadata \"\n f\"(meta: {type(b_meta)}, computed: {type(b_computed)})\"\n )\n assert type(b_meta) == type(b_computed), msg\n msg = \"found values in 'a' and 'b' which differ by more than the allowed amount\"\n assert allclose(a, b, **kwargs), msg\n return True\n except TypeError:\n pass\n\n c = a == b\n\n if isinstance(c, np.ndarray):\n assert c.all()\n else:\n assert c\n\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_from___future___import_an___all__._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_from___future___import_an___all__._", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 48, "span_ids": ["imports"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport dataclasses\nimport datetime\nimport hashlib\nimport inspect\nimport os\nimport pickle\nimport threading\nimport uuid\nimport warnings\nfrom collections import OrderedDict\nfrom collections.abc import Callable, Iterator, Mapping\nfrom concurrent.futures import Executor\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom numbers import Integral, Number\nfrom operator import getitem\n\nfrom packaging.version import parse as parse_version\nfrom tlz import curry, groupby, identity, merge\nfrom tlz.functoolz import Compose\n\nfrom . import config, local, threaded\nfrom .compatibility import _PY_VERSION\nfrom .context import thread_state\nfrom .core import flatten\nfrom .core import get as simple_get\nfrom .core import literal, quote\nfrom .hashing import hash_buffer_hex\nfrom .system import CPU_COUNT\nfrom .utils import Dispatch, apply, ensure_dict, key_split\n\n__all__ = (\n \"DaskMethodsMixin\",\n \"annotate\",\n \"is_dask_collection\",\n \"compute\",\n \"persist\",\n \"optimize\",\n \"visualize\",\n \"tokenize\",\n \"normalize_token\",\n \"get_collection_names\",\n \"get_name_from_key\",\n \"replace_name_in_key\",\n \"clone_key\",\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__normalize_seq_func_normalize_range.return.list_map_normalize_token_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__normalize_seq_func_normalize_range.return.list_map_normalize_token_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 906, "end_line": 934, "span_ids": ["_normalize_seq_func", "normalize_range", "normalize_seq", "normalize_literal"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _normalize_seq_func(seq):\n # Defined outside normalize_seq to avoid unneccessary redefinitions and\n # therefore improving computation times.\n try:\n return list(map(normalize_token, seq))\n except RecursionError:\n if not config.get(\"tokenize.ensure-deterministic\"):\n return uuid.uuid4().hex\n\n raise RuntimeError(\n f\"Sequence {str(seq)} cannot be deterministically hashed. Please, see \"\n \"https://docs.dask.org/en/latest/custom-collections.html#implementing-deterministic-hashing \"\n \"for more information\"\n )\n\n\n@normalize_token.register((tuple, list))\ndef normalize_seq(seq):\n return type(seq).__name__, _normalize_seq_func(seq)\n\n\n@normalize_token.register(literal)\ndef normalize_literal(lit):\n return \"literal\", normalize_token(lit())\n\n\n@normalize_token.register(range)\ndef normalize_range(r):\n return list(map(normalize_token, [r.start, r.stop, r.step]))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_normalize_object_normalize_object.raise_RuntimeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py_normalize_object_normalize_object.raise_RuntimeError_", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 937, "end_line": 956, "span_ids": ["normalize_object"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@normalize_token.register(object)\ndef normalize_object(o):\n method = getattr(o, \"__dask_tokenize__\", None)\n if method is not None:\n return method()\n\n if callable(o):\n return normalize_function(o)\n\n if dataclasses.is_dataclass(o):\n return normalize_dataclass(o)\n\n if not config.get(\"tokenize.ensure-deterministic\"):\n return uuid.uuid4().hex\n\n raise RuntimeError(\n f\"Object {str(o)} cannot be deterministically hashed. Please, see \"\n \"https://docs.dask.org/en/latest/custom-collections.html#implementing-deterministic-hashing \"\n \"for more information\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__normalize_function_normalize_dataclass.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/base.py__normalize_function_normalize_dataclass.return._", "embedding": null, "metadata": {"file_path": "dask/base.py", "file_name": "base.py", "file_type": "text/x-python", "category": "implementation", "start_line": 979, "end_line": 1015, "span_ids": ["normalize_dataclass", "_normalize_function"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _normalize_function(func: Callable) -> Callable:\n if isinstance(func, Compose):\n first = getattr(func, \"first\", None)\n funcs = reversed((first,) + func.funcs) if first else func.funcs\n return tuple(normalize_function(f) for f in funcs)\n elif isinstance(func, (partial, curry)):\n args = tuple(normalize_token(i) for i in func.args)\n if func.keywords:\n kws = tuple(\n (k, normalize_token(v)) for k, v in sorted(func.keywords.items())\n )\n else:\n kws = None\n return (normalize_function(func.func), args, kws)\n else:\n try:\n result = pickle.dumps(func, protocol=4)\n if b\"__main__\" not in result: # abort on dynamic functions\n return result\n except Exception:\n pass\n try:\n import cloudpickle\n\n return cloudpickle.dumps(func, protocol=4)\n except Exception:\n return str(func)\n\n\ndef normalize_dataclass(obj):\n fields = [\n (field.name, getattr(obj, field.name)) for field in dataclasses.fields(obj)\n ]\n return (\n normalize_function(type(obj)),\n _normalize_seq_func(fields),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_from___future___import_an__deprecated": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_from___future___import_an__deprecated", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 24, "span_ids": ["imports"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport itertools\nimport os\nfrom collections.abc import Hashable, Iterable, Mapping, Sequence\nfrom itertools import product\nfrom math import prod\nfrom typing import Any, Hashable, Iterable, Mapping, Sequence\n\nimport tlz as toolz\n\nfrom .base import clone_key, get_name_from_key, tokenize\nfrom .core import flatten, keys_in_tasks, reverse_dict\nfrom .delayed import unpack_collections\nfrom .highlevelgraph import HighLevelGraph, Layer\nfrom .optimization import SubgraphCallable, fuse\nfrom .utils import (\n _deprecated,\n apply,\n ensure_dict,\n homogeneous_deepmap,\n stringify,\n stringify_collection_keys,\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockIndex_BlockIndex.__dask_distributed_unpack__.return.cls_state_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/blockwise.py_BlockIndex_BlockIndex.__dask_distributed_unpack__.return.cls_state_", "embedding": null, "metadata": {"file_path": "dask/blockwise.py", "file_name": "blockwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 183, "end_line": 206, "span_ids": ["BlockIndex.__dask_distributed_pack__", "BlockIndex.__dask_distributed_unpack__", "BlockIndex.__getitem__", "BlockIndex", "BlockIndex.__init__"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BlockIndex(BlockwiseDep):\n \"\"\"Index BlockwiseDep argument\n\n The purpose of this class is to provide each\n block of a ``Blockwise``-based operation with\n the current block index.\n \"\"\"\n\n produces_tasks: bool = False\n\n def __init__(self, numblocks: tuple[int, ...]):\n # NOTE: Unused - Just needs to be set to\n # follow the `BlockwiseDep` interface\n self.numblocks = numblocks\n\n def __getitem__(self, idx: tuple[int, ...]) -> tuple[int, ...]:\n return idx\n\n def __dask_distributed_pack__(self, **kwargs):\n return {\"numblocks\": self.numblocks}\n\n @classmethod\n def __dask_distributed_unpack__(cls, state):\n return cls(**state)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/__init__.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/__init__.py__", "embedding": null, "metadata": {"file_path": "dask/bytes/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["imports"], "tokens": 6}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from .core import read_bytes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_from___future___import_an_if_parse_version_fsspec__.errs.errs_aiohttp_client_ex": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_http.py_from___future___import_an_if_parse_version_fsspec__.errs.errs_aiohttp_client_ex", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_http.py", "file_name": "test_http.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 25, "span_ids": ["imports"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport os\nimport subprocess\nimport sys\nimport time\n\nimport fsspec\nimport pytest\nfrom fsspec.core import open_files\nfrom packaging.version import parse as parse_version\n\nimport dask.bag as db\nfrom dask.utils import tmpdir\n\nfiles = [\"a\", \"b\"]\nrequests = pytest.importorskip(\"requests\")\n\nerrs: tuple[type[Exception], ...] = (\n requests.exceptions.RequestException,\n FileNotFoundError,\n)\nif parse_version(fsspec.__version__) > parse_version(\"0.7.4\"):\n aiohttp = pytest.importorskip(\"aiohttp\")\n errs = errs + (aiohttp.client_exceptions.ClientResponseError,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_test_parquet._Should_succeed_otherwis": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet_test_parquet._Should_succeed_otherwis", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 438, "end_line": 532, "span_ids": ["test_parquet"], "tokens": 780}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"engine\", [\"pyarrow\", \"fastparquet\"])\n@pytest.mark.parametrize(\"metadata_file\", [True, False])\ndef test_parquet(s3, engine, s3so, metadata_file):\n import s3fs\n\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n np = pytest.importorskip(\"numpy\")\n\n lib = pytest.importorskip(engine)\n lib_version = parse_version(lib.__version__)\n if engine == \"pyarrow\" and lib_version < parse_version(\"0.13.1\"):\n pytest.skip(\"pyarrow < 0.13.1 not supported for parquet\")\n if (\n engine == \"pyarrow\"\n and lib_version.major == 2\n and parse_version(s3fs.__version__) > parse_version(\"0.5.0\")\n ):\n pytest.skip(\"#7056 - new s3fs not supported before pyarrow 3.0\")\n\n url = \"s3://%s/test.parquet\" % test_bucket_name\n\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"you\", \"people\"], size=1000).astype(\n \"O\"\n ),\n },\n index=pd.Index(np.arange(1000), name=\"foo\"),\n )\n df = dd.from_pandas(data, chunksize=500)\n df.to_parquet(\n url, engine=engine, storage_options=s3so, write_metadata_file=metadata_file\n )\n\n files = [f.split(\"/\")[-1] for f in s3.ls(url)]\n if metadata_file:\n assert \"_common_metadata\" in files\n assert \"_metadata\" in files\n assert \"part.0.parquet\" in files\n\n df2 = dd.read_parquet(\n url, index=\"foo\", gather_statistics=True, engine=engine, storage_options=s3so\n )\n assert len(df2.divisions) > 1\n\n dd.utils.assert_eq(data, df2)\n\n # Check that `open_file_options` arguments are\n # really passed through to fsspec\n if fsspec_parquet:\n\n # Passing `open_file_options` kwargs will fail\n # if you set an unsupported engine\n with pytest.raises(ValueError):\n dd.read_parquet(\n url,\n engine=engine,\n storage_options=s3so,\n open_file_options={\n \"precache_options\": {\"method\": \"parquet\", \"engine\": \"foo\"},\n },\n ).compute()\n\n # ...but should work fine if you modify the\n # maximum block-transfer size (max_block)\n dd.read_parquet(\n url,\n engine=engine,\n storage_options=s3so,\n open_file_options={\n \"precache_options\": {\"method\": \"parquet\", \"max_block\": 8_000},\n },\n ).compute()\n\n # Check \"open_file_func\"\n fs = get_fs_token_paths(url, storage_options=s3so)[0]\n\n def _open(*args, check=True, **kwargs):\n assert check\n return fs.open(*args, **kwargs)\n\n # Should fail if `check=False`\n with pytest.raises(AssertionError):\n dd.read_parquet(\n url,\n engine=engine,\n storage_options=s3so,\n open_file_options={\"open_file_func\": _open, \"check\": False},\n ).compute()\n\n # Should succeed otherwise\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet.df3_test_parquet.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bytes/tests/test_s3.py_test_parquet.df3_test_parquet.None_3", "embedding": null, "metadata": {"file_path": "dask/bytes/tests/test_s3.py", "file_name": "test_s3.py", "file_type": "text/x-python", "category": "test", "start_line": 533, "end_line": 548, "span_ids": ["test_parquet"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"engine\", [\"pyarrow\", \"fastparquet\"])\n@pytest.mark.parametrize(\"metadata_file\", [True, False])\ndef test_parquet(s3, engine, s3so, metadata_file):\n # ... other code\n df3 = dd.read_parquet(\n url,\n engine=engine,\n storage_options=s3so,\n open_file_options={\"open_file_func\": _open},\n )\n dd.utils.assert_eq(data, df3)\n\n # Check that `cache_type=\"all\"` result is same\n df4 = dd.read_parquet(\n url,\n engine=engine,\n storage_options=s3so,\n open_file_options={\"cache_type\": \"all\"},\n )\n dd.utils.assert_eq(data, df4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_from___future___import_an_Callback.unregister.Callback_active_remove_se": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/callbacks.py_from___future___import_an_Callback.unregister.Callback_active_remove_se", "embedding": null, "metadata": {"file_path": "dask/callbacks.py", "file_name": "callbacks.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 83, "span_ids": ["Callback._callback", "Callback.unregister", "imports", "Callback.__enter__", "Callback", "Callback.__init__", "Callback.__exit__", "Callback.register"], "tokens": 552}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nfrom collections.abc import Callable\nfrom contextlib import contextmanager\nfrom typing import ClassVar\n\n__all__ = [\"Callback\", \"add_callbacks\"]\n\n\nclass Callback:\n \"\"\"Base class for using the callback mechanism\n\n Create a callback with functions of the following signatures:\n\n >>> def start(dsk):\n ... pass\n >>> def start_state(dsk, state):\n ... pass\n >>> def pretask(key, dsk, state):\n ... pass\n >>> def posttask(key, result, dsk, state, worker_id):\n ... pass\n >>> def finish(dsk, state, failed):\n ... pass\n\n You may then construct a callback object with any number of them\n\n >>> cb = Callback(pretask=pretask, finish=finish)\n\n And use it either as a context manager over a compute/get call\n\n >>> with cb: # doctest: +SKIP\n ... x.compute()\n\n Or globally with the ``register`` method\n\n >>> cb.register()\n >>> cb.unregister()\n\n Alternatively subclass the ``Callback`` class with your own methods.\n\n >>> class PrintKeys(Callback):\n ... def _pretask(self, key, dask, state):\n ... print(\"Computing: {0}!\".format(repr(key)))\n\n >>> with PrintKeys(): # doctest: +SKIP\n ... x.compute()\n \"\"\"\n\n active: ClassVar[set[tuple[Callable | None, ...]]] = set()\n\n def __init__(\n self, start=None, start_state=None, pretask=None, posttask=None, finish=None\n ):\n if start:\n self._start = start\n if start_state:\n self._start_state = start_state\n if pretask:\n self._pretask = pretask\n if posttask:\n self._posttask = posttask\n if finish:\n self._finish = finish\n\n @property\n def _callback(self) -> tuple[Callable | None, ...]:\n fields = [\"_start\", \"_start_state\", \"_pretask\", \"_posttask\", \"_finish\"]\n return tuple(getattr(self, i, None) for i in fields)\n\n def __enter__(self):\n self._cm = add_callbacks(self)\n self._cm.__enter__()\n return self\n\n def __exit__(self, *args):\n self._cm.__exit__(*args)\n\n def register(self) -> None:\n Callback.active.add(self._callback)\n\n def unregister(self) -> None:\n Callback.active.remove(self._callback)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/compatibility.py__": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/compatibility.py__", "embedding": null, "metadata": {"file_path": "dask/compatibility.py", "file_name": "compatibility.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 6, "span_ids": ["imports"], "tokens": 29}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\n\nfrom packaging.version import parse as parse_version\n\n_PY_VERSION = parse_version(\".\".join(map(str, sys.version_info[:3])))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_from___future___import_an__get_paths.return.paths": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_from___future___import_an__get_paths.return.paths", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 41, "span_ids": ["imports", "_get_paths"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport ast\nimport base64\nimport builtins # Explicitly use builtins.set as 'set' will be shadowed by a function\nimport json\nimport os\nimport site\nimport sys\nimport threading\nimport warnings\nfrom collections.abc import Mapping, Sequence\nfrom typing import TYPE_CHECKING, Any\n\nimport yaml\n\nif TYPE_CHECKING:\n from typing_extensions import Literal\n\nno_default = \"__no_default__\"\n\n\ndef _get_paths():\n \"\"\"Get locations to search for YAML configuration files.\n\n This logic exists as a separate function for testing purposes.\n \"\"\"\n\n paths = [\n os.getenv(\"DASK_ROOT_CONFIG\", \"/etc/dask\"),\n os.path.join(sys.prefix, \"etc\", \"dask\"),\n *[os.path.join(prefix, \"etc\", \"dask\") for prefix in site.PREFIXES],\n os.path.join(os.path.expanduser(\"~\"), \".config\", \"dask\"),\n ]\n if \"DASK_CONFIG\" in os.environ:\n paths.append(os.environ[\"DASK_CONFIG\"])\n\n # Remove duplicate paths while preserving ordering\n paths = list(reversed(list(dict.fromkeys(reversed(paths)))))\n\n return paths", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_paths_canonical_name.return.k": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_paths_canonical_name.return.k", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 44, "end_line": 82, "span_ids": ["impl:4", "canonical_name"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "paths = _get_paths()\n\nif \"DASK_CONFIG\" in os.environ:\n PATH = os.environ[\"DASK_CONFIG\"]\nelse:\n PATH = os.path.join(os.path.expanduser(\"~\"), \".config\", \"dask\")\n\n\nconfig: dict = {}\nglobal_config = config # alias\n\n\nconfig_lock = threading.Lock()\n\n\ndefaults: list[Mapping] = []\n\n\ndef canonical_name(k: str, config: dict) -> str:\n \"\"\"Return the canonical name for a key.\n\n Handles user choice of '-' or '_' conventions by standardizing on whichever\n version was set first. If a key already exists in either hyphen or\n underscore form, the existing version is the canonical name. If neither\n version exists the original key is used as is.\n \"\"\"\n try:\n if k in config:\n return k\n except TypeError:\n # config is not a mapping, return the same name as provided\n return k\n\n altk = k.replace(\"_\", \"-\") if \"_\" in k else k.replace(\"-\", \"_\")\n\n if altk in config:\n return altk\n\n return k", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_rename_rename.set_new_config_config_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_rename_rename.set_new_config_config_", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 527, "end_line": 543, "span_ids": ["rename"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def rename(aliases: Mapping, config: dict = config) -> None:\n \"\"\"Rename old keys to new keys\n\n This helps migrate older configuration versions over time\n \"\"\"\n old = []\n new = {}\n for o, n in aliases.items():\n value = get(o, None, config=config)\n if value is not None:\n old.append(o)\n new[n] = value\n\n for k in old:\n del config[canonical_name(k, config)] # TODO: support nested keys\n\n set(new, config=config)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_update_defaults_update_defaults.update_config_new_prior": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/config.py_update_defaults_update_defaults.update_config_new_prior", "embedding": null, "metadata": {"file_path": "dask/config.py", "file_name": "config.py", "file_type": "text/x-python", "category": "implementation", "start_line": 546, "end_line": 558, "span_ids": ["update_defaults"], "tokens": 105}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def update_defaults(\n new: Mapping, config: dict = config, defaults: list[Mapping] = defaults\n) -> None:\n \"\"\"Add a new set of defaults to the configuration\n\n It does two things:\n\n 1. Add the defaults to a global collection to be used by refresh later\n 2. Updates the global config with the new configuration\n prioritizing older values over newer ones\n \"\"\"\n defaults.append(new)\n update(config, new, priority=\"old\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_from___future___import_an_make_meta_pandas_datetime_tz.return._nonempty_scalar_x_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/backends.py_from___future___import_an_make_meta_pandas_datetime_tz.return._nonempty_scalar_x_", "embedding": null, "metadata": {"file_path": "dask/dataframe/backends.py", "file_name": "backends.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 87, "span_ids": ["__2", "impl", "imports", "make_meta_pandas_datetime_tz", "__3", "__1", "_"], "tokens": 447}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport warnings\nfrom typing import Iterable\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import (\n is_categorical_dtype,\n is_datetime64tz_dtype,\n is_interval_dtype,\n is_period_dtype,\n is_scalar,\n is_sparse,\n union_categoricals,\n)\n\nfrom dask.array.dispatch import percentile_lookup\nfrom dask.array.percentile import _percentile\nfrom dask.sizeof import SimpleSizeof, sizeof\n\nfrom ..utils import is_arraylike, typename\nfrom .core import DataFrame, Index, Scalar, Series, _Frame\nfrom .dispatch import (\n categorical_dtype_dispatch,\n concat,\n concat_dispatch,\n get_parallel_type,\n group_split_dispatch,\n hash_object_dispatch,\n is_categorical_dtype_dispatch,\n make_meta_dispatch,\n make_meta_obj,\n meta_nonempty,\n tolist_dispatch,\n union_categoricals_dispatch,\n)\nfrom .extensions import make_array_nonempty, make_scalar\nfrom .utils import (\n _empty_series,\n _nonempty_scalar,\n _scalar_from_dtype,\n is_float_na_dtype,\n is_integer_na_dtype,\n)\n\n##########\n# Pandas #\n##########\n\n\n@make_scalar.register(np.dtype)\ndef _(dtype):\n return _scalar_from_dtype(dtype)\n\n\n@make_scalar.register(pd.Timestamp)\n@make_scalar.register(pd.Timedelta)\n@make_scalar.register(pd.Period)\n@make_scalar.register(pd.Interval)\ndef _(x):\n return x\n\n\n@make_meta_dispatch.register((pd.Series, pd.DataFrame))\ndef _(x, index=None):\n return x.iloc[:0]\n\n\n@make_meta_dispatch.register(pd.Index)\ndef _(x, index=None):\n return x[0:0]\n\n\nmeta_object_types: tuple[type, ...] = (pd.Series, pd.DataFrame, pd.Index, pd.MultiIndex)\ntry:\n import scipy.sparse as sp\n\n meta_object_types += (sp.spmatrix,)\nexcept ImportError:\n pass\n\n\n@meta_nonempty.register(pd.DatetimeTZDtype)\n@make_meta_dispatch.register(pd.DatetimeTZDtype)\ndef make_meta_pandas_datetime_tz(x, index=None):\n return _nonempty_scalar(x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reset_index__Frame.clear_divisions.return.type_self_self_dask_sel": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.reset_index__Frame.clear_divisions.return.type_self_self_dask_sel", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 549, "end_line": 581, "span_ids": ["_Frame.reset_index", "_Frame.known_divisions", "_Frame.clear_divisions"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def reset_index(self, drop=False):\n \"\"\"Reset the index to the default index.\n\n Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will\n not be monotonically increasing from 0. Instead, it will restart at 0\n for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).\n This is due to the inability to statically know the full length of the\n index.\n\n For DataFrame with multi-level index, returns a new DataFrame with\n labeling information in the columns under the index names, defaulting\n to 'level_0', 'level_1', etc. if any are None. For a standard index,\n the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n drop : boolean, default False\n Do not try to insert index into dataframe columns.\n \"\"\"\n return self.map_partitions(\n M.reset_index, drop=drop, enforce_metadata=False\n ).clear_divisions()\n\n @property\n def known_divisions(self):\n \"\"\"Whether divisions are already known\"\"\"\n return len(self.divisions) > 0 and self.divisions[0] is not None\n\n def clear_divisions(self):\n \"\"\"Forget division information\"\"\"\n divisions = (None,) * (self.npartitions + 1)\n return type(self)(self.dask, self._name, self._meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.compute_current_divisions__Frame.compute_current_divisions.return.compute_divisions_self_c": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.compute_current_divisions__Frame.compute_current_divisions.return.compute_divisions_self_c", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 582, "end_line": 636, "span_ids": ["_Frame.compute_current_divisions"], "tokens": 603}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def compute_current_divisions(self, col=None):\n \"\"\"Compute the current divisions of the DataFrame.\n\n This method triggers immediate computation. If you find yourself running this command\n repeatedly for the same dataframe, we recommend storing the result\n so you don't have to rerun it.\n\n If the column or index values overlap between partitions, raises ``ValueError``.\n To prevent this, make sure the data are sorted by the column or index.\n\n Parameters\n ----------\n col : string, optional\n Calculate the divisions for a non-index column by passing in the name of the column.\n If col is not specified, the index will be used to calculate divisions.\n In this case, if the divisions are already known, they will be returned\n immediately without computing.\n\n Examples\n --------\n >>> import dask\n >>> ddf = dask.datasets.timeseries(start=\"2021-01-01\", end=\"2021-01-07\", freq=\"1H\").clear_divisions()\n >>> divisions = ddf.compute_current_divisions()\n >>> print(divisions) # doctest: +NORMALIZE_WHITESPACE\n (Timestamp('2021-01-01 00:00:00'),\n Timestamp('2021-01-02 00:00:00'),\n Timestamp('2021-01-03 00:00:00'),\n Timestamp('2021-01-04 00:00:00'),\n Timestamp('2021-01-05 00:00:00'),\n Timestamp('2021-01-06 00:00:00'),\n Timestamp('2021-01-06 23:00:00'))\n\n >>> ddf.divisions = divisions\n >>> ddf.known_divisions\n True\n\n >>> ddf = ddf.reset_index().clear_divisions()\n >>> divisions = ddf.compute_current_divisions(\"timestamp\")\n >>> print(divisions) # doctest: +NORMALIZE_WHITESPACE\n (Timestamp('2021-01-01 00:00:00'),\n Timestamp('2021-01-02 00:00:00'),\n Timestamp('2021-01-03 00:00:00'),\n Timestamp('2021-01-04 00:00:00'),\n Timestamp('2021-01-05 00:00:00'),\n Timestamp('2021-01-06 00:00:00'),\n Timestamp('2021-01-06 23:00:00'))\n\n >>> ddf = ddf.set_index(\"timestamp\", divisions=divisions, sorted=True)\n \"\"\"\n if col is None and self.known_divisions:\n return self.divisions\n\n from .shuffle import compute_divisions\n\n return compute_divisions(self, col=col)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.get_partition__Frame.get_partition.if_0_n_self_npartiti.else_.raise_ValueError_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.get_partition__Frame.get_partition.if_0_n_self_npartiti.else_.raise_ValueError_msg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 636, "end_line": 646, "span_ids": ["_Frame.get_partition"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def get_partition(self, n):\n \"\"\"Get a dask DataFrame/Series representing the `nth` partition.\"\"\"\n if 0 <= n < self.npartitions:\n name = f\"get-partition-{str(n)}-{self._name}\"\n divisions = self.divisions[n : n + 2]\n layer = {(name, 0): (self._name, n)}\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n return new_dd_object(graph, name, self._meta, divisions)\n else:\n msg = f\"n must be 0 <= n < {self.npartitions}\"\n raise ValueError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.std__Frame.std.return.handle_out_out_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.std__Frame.std.return.handle_out_out_result_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2232, "end_line": 2308, "span_ids": ["_Frame.std"], "tokens": 549}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @_numeric_only\n @derived_from(pd.DataFrame)\n def std(\n self,\n axis=None,\n skipna=True,\n ddof=1,\n split_every=False,\n dtype=None,\n out=None,\n numeric_only=None,\n ):\n axis = self._validate_axis(axis)\n _raise_if_object_series(self, \"std\")\n _raise_if_not_series_or_dataframe(self, \"std\")\n\n meta = self._meta_nonempty.std(axis=axis, skipna=skipna)\n is_df_like = is_dataframe_like(self._meta)\n needs_time_conversion = False\n numeric_dd = self\n\n if PANDAS_GT_120 and is_df_like:\n time_cols = self._meta.select_dtypes(include=\"datetime\").columns\n if len(time_cols) > 0:\n (\n numeric_dd,\n needs_time_conversion,\n ) = self._convert_time_cols_to_numeric(time_cols, axis, meta, skipna)\n elif PANDAS_GT_120 and not is_df_like:\n needs_time_conversion = is_datetime64_any_dtype(self._meta)\n if needs_time_conversion:\n numeric_dd = _convert_to_numeric(self, skipna)\n\n if axis == 1:\n result = map_partitions(\n M.std if not needs_time_conversion else _sqrt_and_convert_to_timedelta,\n numeric_dd,\n meta=meta,\n token=self._token_prefix + \"std\",\n axis=axis,\n skipna=skipna,\n ddof=ddof,\n enforce_metadata=False,\n parent_meta=self._meta,\n )\n return handle_out(out, result)\n\n # Case where axis=0 or axis=None\n v = numeric_dd.var(skipna=skipna, ddof=ddof, split_every=split_every)\n name = self._token_prefix + \"std\"\n\n if needs_time_conversion:\n sqrt_func_kwargs = {\n \"is_df_like\": is_df_like,\n \"time_cols\": time_cols if is_df_like else None,\n \"axis\": axis,\n }\n sqrt_func = _sqrt_and_convert_to_timedelta\n else:\n sqrt_func_kwargs = {}\n sqrt_func = np.sqrt\n\n result = map_partitions(\n sqrt_func,\n v,\n meta=meta,\n token=name,\n enforce_metadata=False,\n parent_meta=self._meta,\n **sqrt_func_kwargs,\n )\n\n # Try to match the Pandas result dtype\n if is_df_like and hasattr(meta, \"dtype\"):\n result = result.astype(meta.dtype)\n\n return handle_out(out, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._convert_time_cols_to_numeric__Frame._convert_time_cols_to_numeric.return.numeric_dd_needs_time_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame._convert_time_cols_to_numeric__Frame._convert_time_cols_to_numeric.return.numeric_dd_needs_time_co", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2310, "end_line": 2338, "span_ids": ["_Frame._convert_time_cols_to_numeric"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n def _convert_time_cols_to_numeric(self, time_cols, axis, meta, skipna):\n from .io import from_pandas\n\n needs_time_conversion = True\n\n # Ensure all columns are correct type. Need to shallow copy since cols will be modified\n if axis == 0:\n numeric_dd = self[meta.index].copy()\n else:\n numeric_dd = self.copy()\n\n # Mix of datetimes with other numeric types produces NaNs for each value in std() series\n if axis == 1 and len(time_cols) != len(self.columns):\n # This is faster than converting each column to numeric when it's not necessary\n # since each standard deviation will just be NaN\n needs_time_conversion = False\n numeric_dd = from_pandas(\n pd.DataFrame(\n {\"_\": pd.Series([np.nan])},\n index=self.index,\n ),\n npartitions=self.npartitions,\n )\n else:\n # Convert timedelta and datetime columns to integer types so we can use var\n for col in time_cols:\n numeric_dd[col] = _convert_to_numeric(numeric_dd[col], skipna)\n\n return numeric_dd, needs_time_conversion", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.append__Frame.append.return.concat_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.append__Frame.append.return.concat_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3048, "end_line": 3066, "span_ids": ["_Frame.append"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.Series)\n def append(self, other, interleave_partitions=False):\n if PANDAS_GT_140:\n warnings.warn(\n \"The frame.append method is deprecated and will be removed from\"\n \"dask in a future version. Use dask.dataframe.concat instead.\",\n FutureWarning,\n )\n # because DataFrame.append will override the method,\n # wrap by pd.Series.append docstring\n from .multi import concat\n\n if isinstance(other, (list, dict)):\n msg = \"append doesn't support list or dict input\"\n raise NotImplementedError(msg)\n\n return concat(\n [self, other], join=\"outer\", interleave_partitions=interleave_partitions\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.dot__Frame.dot.return.self_map_partitions__dot_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__Frame.dot__Frame.dot.return.self_map_partitions__dot_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3068, "end_line": 3086, "span_ids": ["_Frame.dot"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _Frame(DaskMethodsMixin, OperatorMethodMixin):\n\n @derived_from(pd.Series)\n def dot(self, other, meta=no_default):\n if not isinstance(other, _Frame):\n raise TypeError(\"The second operand must be a dask array or dask dataframe\")\n\n if isinstance(other, DataFrame):\n s = self.map_partitions(M.dot, other, token=\"dot\", meta=meta)\n return s.groupby(by=s.index).apply(\n lambda x: x.sum(skipna=False), meta=s._meta_nonempty\n )\n\n def _dot_series(*args, **kwargs):\n # .sum() is invoked on each partition before being applied to all\n # partitions. The return type is expected to be a series, not a numpy object\n return pd.Series(M.dot(*args, **kwargs))\n\n return self.map_partitions(_dot_series, other, token=\"dot\", meta=meta).sum(\n skipna=False\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._repartition_quantiles_Series._get_numeric_data.return.self": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series._repartition_quantiles_Series._get_numeric_data.return.self", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3538, "end_line": 3554, "span_ids": ["Series._repartition_quantiles", "Series._get_numeric_data", "Series.__getitem__"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n def _repartition_quantiles(self, npartitions, upsample=1.0):\n \"\"\"Approximate quantiles of Series used for repartitioning\"\"\"\n from .partitionquantiles import partition_quantiles\n\n return partition_quantiles(self, npartitions, upsample=upsample)\n\n def __getitem__(self, key):\n if isinstance(key, Series) and self.divisions == key.divisions:\n name = \"index-%s\" % tokenize(self, key)\n dsk = partitionwise_graph(operator.getitem, name, self, key)\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])\n return Series(graph, name, self._meta, self.divisions)\n return self.loc[key]\n\n @derived_from(pd.DataFrame)\n def _get_numeric_data(self, how=\"any\", subset=None):\n return self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.iteritems_Series.iteritems.return.__self_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.iteritems_Series.iteritems.return.__self_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3556, "end_line": 3573, "span_ids": ["Series.iteritems"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def iteritems(self):\n if PANDAS_GT_150:\n warnings.warn(\n \"iteritems is deprecated and will be removed in a future version. \"\n \"Use .items instead.\",\n FutureWarning,\n )\n # We use the `_` generator below to ensure the deprecation warning above\n # is raised when `.iteritems()` is called, not when the first `next()`\n # iteration happens\n\n def _(self):\n for i in range(self.npartitions):\n s = self.get_partition(i).compute()\n yield from s.items()\n\n return _(self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.__iter___Series.nunique.if_dropna_.else_.return.uniqs_size": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Series.__iter___Series.nunique.if_dropna_.else_.return.uniqs_size", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3575, "end_line": 3654, "span_ids": ["Series.explode", "Series._validate_axis", "Series.__iter__", "Series.nunique", "Series.groupby", "Series.__contains__", "Series.unique", "Series.count", "Series.mode"], "tokens": 559}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Series(_Frame):\n\n @derived_from(pd.Series)\n def __iter__(self):\n for i in range(self.npartitions):\n s = self.get_partition(i).compute()\n yield from s\n\n @_deprecated(\n message=(\n \"Using the ``in`` operator to test for membership in Series is \"\n \"deprecated. To test for membership in the index use \"\n \"``(s.index == key).any()``. Similarly to test for membership in \"\n \"the values use ``(s == key).any()``\"\n )\n )\n def __contains__(self, key):\n return (self == key).any().compute()\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, \"index\", None):\n raise ValueError(f\"No axis named {axis}\")\n # convert to numeric axis\n return {None: 0, \"index\": 0}.get(axis, axis)\n\n @derived_from(pd.Series)\n def groupby(\n self, by=None, group_keys=True, sort=None, observed=None, dropna=None, **kwargs\n ):\n from dask.dataframe.groupby import SeriesGroupBy\n\n return SeriesGroupBy(\n self,\n by=by,\n group_keys=group_keys,\n sort=sort,\n observed=observed,\n dropna=dropna,\n **kwargs,\n )\n\n @derived_from(pd.Series)\n def count(self, split_every=False):\n return super().count(split_every=split_every)\n\n @derived_from(pd.Series)\n def mode(self, dropna=True, split_every=False):\n return super().mode(dropna=dropna, split_every=split_every)\n\n @derived_from(pd.Series)\n def explode(self):\n meta = self._meta.explode()\n return self.map_partitions(M.explode, meta=meta, enforce_metadata=False)\n\n def unique(self, split_every=None, split_out=1):\n \"\"\"\n Return Series of unique values in the object. Includes NA values.\n\n Returns\n -------\n uniques : Series\n \"\"\"\n return aca(\n self,\n chunk=methods.unique,\n aggregate=methods.unique,\n meta=self._meta,\n token=\"unique\",\n split_every=split_every,\n series_name=self.name,\n split_out=split_out,\n )\n\n @derived_from(pd.Series)\n def nunique(self, split_every=None, dropna=True):\n uniqs = self.drop_duplicates(split_every=split_every)\n if dropna:\n # count mimics pandas behavior and excludes NA values\n return uniqs.count()\n else:\n return uniqs.size", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.map_Index.map.return.applied": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.map_Index.map.return.applied", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4170, "end_line": 4188, "span_ids": ["Index.map"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n @insert_meta_param_description(pad=12)\n @derived_from(pd.Index)\n def map(self, arg, na_action=None, meta=no_default, is_monotonic=False):\n \"\"\"\n Note that this method clears any known divisions.\n\n If your mapping function is monotonically increasing then use `is_monotonic`\n to apply the maping function to the old divisions and assign the new\n divisions to the output.\n\n \"\"\"\n applied = super().map(arg, na_action=na_action, meta=meta)\n if is_monotonic and self.known_divisions:\n applied.divisions = tuple(\n pd.Series(self.divisions).map(arg, na_action=na_action)\n )\n else:\n applied = applied.clear_divisions()\n return applied", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.is_monotonic_Index.is_monotonic_decreasing.return.super_is_monotonic_decr": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_Index.is_monotonic_Index.is_monotonic_decreasing.return.super_is_monotonic_decr", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 4190, "end_line": 4209, "span_ids": ["Index.is_monotonic_increasing", "Index.is_monotonic_decreasing", "Index.is_monotonic"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Index(Series):\n\n @property\n @derived_from(pd.Index)\n def is_monotonic(self):\n if PANDAS_GT_150:\n warnings.warn(\n \"is_monotonic is deprecated and will be removed in a future version. \"\n \"Use is_monotonic_increasing instead.\",\n FutureWarning,\n )\n return super().is_monotonic_increasing\n\n @property\n @derived_from(pd.Index)\n def is_monotonic_increasing(self):\n return super().is_monotonic_increasing\n\n @property\n @derived_from(pd.Index)\n def is_monotonic_decreasing(self):\n return super().is_monotonic_decreasing", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply.if_split_every_is_None__apply_concat_apply.return.new_dd_object_graph_fina": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_apply_concat_apply.if_split_every_is_None__apply_concat_apply.return.new_dd_object_graph_fina", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6017, "end_line": 6114, "span_ids": ["apply_concat_apply"], "tokens": 861}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef apply_concat_apply(\n args,\n chunk=None,\n aggregate=None,\n combine=None,\n meta=no_default,\n token=None,\n chunk_kwargs=None,\n aggregate_kwargs=None,\n combine_kwargs=None,\n split_every=None,\n split_out=None,\n split_out_setup=None,\n split_out_setup_kwargs=None,\n sort=None,\n ignore_index=False,\n **kwargs,\n):\n # ... other code\n\n if split_every is None:\n split_every = 8\n elif split_every is False:\n split_every = npartitions\n elif split_every < 2 or not isinstance(split_every, Integral):\n raise ValueError(\"split_every must be an integer >= 2\")\n\n token_key = tokenize(\n token or (chunk, aggregate),\n meta,\n args,\n chunk_kwargs,\n aggregate_kwargs,\n combine_kwargs,\n split_every,\n split_out,\n split_out_setup,\n split_out_setup_kwargs,\n )\n\n # Blockwise Chunk Layer\n chunk_name = f\"{token or funcname(chunk)}-chunk-{token_key}\"\n chunked = map_partitions(\n chunk,\n *args,\n token=chunk_name,\n # NOTE: We are NOT setting the correct\n # `meta` here on purpose. We are using\n # `map_partitions` as a convenient way\n # to build a `Blockwise` layer, and need\n # to avoid the metadata emulation step.\n meta=dfs[0],\n enforce_metadata=False,\n transform_divisions=False,\n align_dataframes=False,\n **chunk_kwargs,\n )\n\n # Blockwise Split Layer\n if split_out and split_out > 1:\n chunked = chunked.map_partitions(\n hash_shard,\n split_out,\n split_out_setup,\n split_out_setup_kwargs,\n ignore_index,\n token=\"split-%s\" % token_key,\n # NOTE: We are NOT setting the correct\n # `meta` here on purpose. We are using\n # `map_partitions` as a convenient way\n # to build a `Blockwise` layer, and need\n # to avoid the metadata emulation step.\n meta=dfs[0],\n enforce_metadata=False,\n transform_divisions=False,\n align_dataframes=False,\n )\n\n # Handle sort behavior\n if sort is not None:\n if sort and split_out > 1:\n raise NotImplementedError(\n \"Cannot guarantee sorted keys for `split_out>1`.\"\n \" Try using split_out=1, or grouping with sort=False.\"\n )\n aggregate_kwargs = aggregate_kwargs or {}\n aggregate_kwargs[\"sort\"] = sort\n\n # Tree-Reduction Layer\n final_name = f\"{token or funcname(aggregate)}-agg-{token_key}\"\n layer = DataFrameTreeReduction(\n final_name,\n chunked._name,\n npartitions,\n partial(_concat, ignore_index=ignore_index),\n partial(combine, **combine_kwargs) if combine_kwargs else combine,\n finalize_func=partial(aggregate, **aggregate_kwargs)\n if aggregate_kwargs\n else aggregate,\n split_every=split_every,\n split_out=split_out if (split_out and split_out > 1) else None,\n tree_node_name=f\"{token or funcname(combine)}-combine-{token_key}\",\n )\n\n if meta is no_default:\n meta_chunk = _emulate(chunk, *args, udf=True, **chunk_kwargs)\n meta = _emulate(\n aggregate, _concat([meta_chunk], ignore_index), udf=True, **aggregate_kwargs\n )\n meta = make_meta(\n meta,\n index=(getattr(make_meta(dfs[0]), \"index\", None) if dfs else None),\n parent_meta=dfs[0]._meta,\n )\n\n graph = HighLevelGraph.from_collections(final_name, layer, dependencies=(chunked,))\n divisions = [None] * ((split_out or 1) + 1)\n return new_dd_object(graph, final_name, meta, divisions, parent_meta=dfs[0]._meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions_map_partitions.if_meta_is_no_default_.else_.meta_is_emulated.False": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions_map_partitions.if_meta_is_no_default_.else_.meta_is_emulated.False", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6152, "end_line": 6222, "span_ids": ["map_partitions"], "tokens": 624}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef map_partitions(\n func,\n *args,\n meta=no_default,\n enforce_metadata=True,\n transform_divisions=True,\n align_dataframes=True,\n **kwargs,\n):\n \"\"\"Apply Python function on each DataFrame partition.\n\n Parameters\n ----------\n func : function\n Function applied to each partition.\n args, kwargs :\n Arguments and keywords to pass to the function. At least one of the\n args should be a Dask.dataframe. Arguments and keywords may contain\n ``Scalar``, ``Delayed`` or regular python objects. DataFrame-like args\n (both dask and pandas) will be repartitioned to align (if necessary)\n before applying the function (see ``align_dataframes`` to control).\n enforce_metadata : bool, default True\n Whether to enforce at runtime that the structure of the DataFrame\n produced by ``func`` actually matches the structure of ``meta``.\n This will rename and reorder columns for each partition,\n and will raise an error if this doesn't work or types don't match.\n transform_divisions : bool, default True\n Whether to apply the function onto the divisions and apply those\n transformed divisions to the output.\n align_dataframes : bool, default True\n Whether to repartition DataFrame- or Series-like args\n (both dask and pandas) so their divisions align before applying\n the function. This requires all inputs to have known divisions.\n Single-partition inputs will be split into multiple partitions.\n\n If False, all inputs must have either the same number of partitions\n or a single partition. Single-partition inputs will be broadcast to\n every partition of multi-partition inputs.\n $META\n \"\"\"\n name = kwargs.pop(\"token\", None)\n parent_meta = kwargs.pop(\"parent_meta\", None)\n\n assert callable(func)\n if name is not None:\n token = tokenize(meta, *args, **kwargs)\n else:\n name = funcname(func)\n token = tokenize(func, meta, *args, **kwargs)\n name = f\"{name}-{token}\"\n\n from .multi import _maybe_align_partitions\n\n if align_dataframes:\n args = _maybe_from_pandas(args)\n args = _maybe_align_partitions(args)\n\n dfs = [df for df in args if isinstance(df, _Frame)]\n meta_index = getattr(make_meta(dfs[0]), \"index\", None) if dfs else None\n if parent_meta is None and dfs:\n parent_meta = dfs[0]._meta\n\n if meta is no_default:\n # Use non-normalized kwargs here, as we want the real values (not\n # delayed values)\n meta = _emulate(func, *args, udf=True, **kwargs)\n meta_is_emulated = True\n else:\n meta = make_meta(meta, index=meta_index, parent_meta=parent_meta)\n meta_is_emulated = False\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions.if_all_isinstance_arg_Sc_map_partitions.return.new_dd_object_graph_name": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_map_partitions.if_all_isinstance_arg_Sc_map_partitions.return.new_dd_object_graph_name", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6224, "end_line": 6313, "span_ids": ["map_partitions"], "tokens": 757}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef map_partitions(\n func,\n *args,\n meta=no_default,\n enforce_metadata=True,\n transform_divisions=True,\n align_dataframes=True,\n **kwargs,\n):\n # ... other code\n\n if all(isinstance(arg, Scalar) for arg in args):\n layer = {\n (name, 0): (apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)\n }\n graph = HighLevelGraph.from_collections(name, layer, dependencies=args)\n return Scalar(graph, name, meta)\n elif not (has_parallel_type(meta) or is_arraylike(meta) and meta.shape):\n if not meta_is_emulated:\n warnings.warn(\n \"Meta is not valid, `map_partitions` expects output to be a pandas object. \"\n \"Try passing a pandas object as meta or a dict or tuple representing the \"\n \"(name, dtype) of the columns. In the future the meta you passed will not work.\",\n FutureWarning,\n )\n # If `meta` is not a pandas object, the concatenated results will be a\n # different type\n meta = make_meta(_concat([meta]), index=meta_index)\n\n # Ensure meta is empty series\n meta = make_meta(meta, parent_meta=parent_meta)\n\n args2 = []\n dependencies = []\n for arg in args:\n if isinstance(arg, _Frame):\n args2.append(arg)\n dependencies.append(arg)\n continue\n arg = normalize_arg(arg)\n arg2, collections = unpack_collections(arg)\n if collections:\n args2.append(arg2)\n dependencies.extend(collections)\n else:\n args2.append(arg)\n\n kwargs3 = {}\n simple = True\n for k, v in kwargs.items():\n v = normalize_arg(v)\n v, collections = unpack_collections(v)\n dependencies.extend(collections)\n kwargs3[k] = v\n if collections:\n simple = False\n\n divisions = dfs[0].divisions\n if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:\n try:\n divisions = func(\n *[pd.Index(a.divisions) if a is dfs[0] else a for a in args], **kwargs\n )\n if isinstance(divisions, pd.Index):\n divisions = methods.tolist(divisions)\n except Exception:\n pass\n else:\n if not valid_divisions(divisions):\n divisions = [None] * (dfs[0].npartitions + 1)\n\n if has_keyword(func, \"partition_info\"):\n partition_info = {\n (i,): {\"number\": i, \"division\": division}\n for i, division in enumerate(divisions[:-1])\n }\n\n args2.insert(0, BlockwiseDepDict(partition_info))\n orig_func = func\n func = lambda partition_info, *args, **kwargs: orig_func(\n *args, **kwargs, partition_info=partition_info\n )\n\n if enforce_metadata:\n dsk = partitionwise_graph(\n apply_and_enforce,\n name,\n *args2,\n dependencies=dependencies,\n _func=func,\n _meta=meta,\n **kwargs3,\n )\n else:\n kwargs4 = kwargs if simple else kwargs3\n dsk = partitionwise_graph(\n func, name, *args2, **kwargs4, dependencies=dependencies\n )\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)\n return new_dd_object(graph, name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__take_last__take_last.if_skipna_is_False_.else_.if_is_dataframe_like_a_.else_.return._last_valid_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__take_last__take_last.if_skipna_is_False_.else_.if_is_dataframe_like_a_.else_.return._last_valid_a_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6726, "end_line": 6764, "span_ids": ["_take_last"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _take_last(a, skipna=True):\n \"\"\"\n take last row (Series) of DataFrame / last value of Series\n considering NaN.\n\n Parameters\n ----------\n a : pd.DataFrame or pd.Series\n skipna : bool, default True\n Whether to exclude NaN\n\n \"\"\"\n\n def _last_valid(s):\n for i in range(1, min(10, len(s) + 1)):\n val = s.iloc[-i]\n if not pd.isnull(val):\n return val\n else:\n nonnull = s[s.notna()]\n if not nonnull.empty:\n return nonnull.iloc[-1]\n return None\n\n if skipna is False:\n return a.iloc[-1]\n else:\n # take last valid value excluding NaN, NaN location may be different\n # in each column\n if is_dataframe_like(a):\n # create Series from appropriate backend dataframe library\n series_typ = type(a.iloc[0:1, 0])\n if a.empty:\n return series_typ([], dtype=\"float\")\n return series_typ(\n {col: _last_valid(a[col]) for col in a.columns}, index=a.columns\n )\n else:\n return _last_valid(a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_check_divisions_check_divisions.if_len_divisions_1_.raise_ValueError_msg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_check_divisions_check_divisions.if_len_divisions_1_.raise_ValueError_msg_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 6767, "end_line": 6777, "span_ids": ["check_divisions"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_divisions(divisions):\n if not isinstance(divisions, (list, tuple)):\n raise ValueError(\"New division must be list or tuple\")\n divisions = list(divisions)\n if len(divisions) == 0:\n raise ValueError(\"New division must not be empty\")\n if divisions != sorted(divisions):\n raise ValueError(\"New division must be sorted\")\n if len(divisions[:-1]) != len(list(unique(divisions[:-1]))):\n msg = \"New division must be unique, except for the last element\"\n raise ValueError(msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_mapseries_series_map.return.new_dd_object_graph_fina": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py_mapseries_series_map.return.new_dd_object_graph_fina", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 7606, "end_line": 7672, "span_ids": ["mapseries", "mapseries_combine", "series_map"], "tokens": 582}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def mapseries(base_chunk, concat_map):\n return base_chunk.map(concat_map)\n\n\ndef mapseries_combine(index, concat_result):\n final_series = concat_result.sort_index()\n final_series = index.to_series().map(final_series)\n return final_series\n\n\ndef series_map(base_series, map_series):\n npartitions = base_series.npartitions\n split_out = map_series.npartitions\n\n dsk = {}\n\n base_token_key = tokenize(base_series, split_out)\n base_split_prefix = f\"base-split-{base_token_key}\"\n base_shard_prefix = f\"base-shard-{base_token_key}\"\n for i, key in enumerate(base_series.__dask_keys__()):\n dsk[(base_split_prefix, i)] = (hash_shard, key, split_out)\n for j in range(split_out):\n dsk[(base_shard_prefix, 0, i, j)] = (getitem, (base_split_prefix, i), j)\n\n map_token_key = tokenize(map_series)\n map_split_prefix = f\"map-split-{map_token_key}\"\n map_shard_prefix = f\"map-shard-{map_token_key}\"\n for i, key in enumerate(map_series.__dask_keys__()):\n dsk[(map_split_prefix, i)] = (\n hash_shard,\n key,\n split_out,\n split_out_on_index,\n None,\n )\n for j in range(split_out):\n dsk[(map_shard_prefix, 0, i, j)] = (getitem, (map_split_prefix, i), j)\n\n token_key = tokenize(base_series, map_series)\n map_prefix = f\"map-series-{token_key}\"\n for i in range(npartitions):\n for j in range(split_out):\n dsk[(map_prefix, i, j)] = (\n mapseries,\n (base_shard_prefix, 0, i, j),\n (_concat, [(map_shard_prefix, 0, k, j) for k in range(split_out)]),\n )\n\n final_prefix = f\"map-series-combine-{token_key}\"\n for i, key in enumerate(base_series.index.__dask_keys__()):\n dsk[(final_prefix, i)] = (\n mapseries_combine,\n key,\n (_concat, [(map_prefix, i, j) for j in range(split_out)]),\n )\n\n meta = map_series._meta.copy()\n meta.index = base_series._meta.index\n meta = make_meta(meta)\n\n dependencies = [base_series, map_series, base_series.index]\n graph = HighLevelGraph.from_collections(\n final_prefix, dsk, dependencies=dependencies\n )\n divisions = list(base_series.divisions)\n\n return new_dd_object(graph, final_prefix, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__convert_to_numeric_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/core.py__convert_to_numeric_", "embedding": null, "metadata": {"file_path": "dask/dataframe/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 7675, "end_line": 7711, "span_ids": ["_convert_to_numeric", "_raise_if_not_series_or_dataframe", "_sqrt_and_convert_to_timedelta"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _convert_to_numeric(series, skipna):\n if skipna:\n return series.dropna().view(\"i8\")\n\n # series.view(\"i8\") with pd.NaT produces -9223372036854775808 is why we need to do this\n return series.view(\"i8\").mask(series.isnull(), np.nan)\n\n\ndef _sqrt_and_convert_to_timedelta(partition, axis, *args, **kwargs):\n if axis == 1:\n return pd.to_timedelta(M.std(partition, axis=axis, *args, **kwargs))\n\n is_df_like, time_cols = kwargs[\"is_df_like\"], kwargs[\"time_cols\"]\n\n sqrt = np.sqrt(partition)\n\n if not is_df_like:\n return pd.to_timedelta(sqrt)\n\n time_col_mask = sqrt.index.isin(time_cols)\n matching_vals = sqrt[time_col_mask]\n for time_col, matching_val in zip(time_cols, matching_vals):\n sqrt[time_col] = pd.to_timedelta(matching_val)\n\n return sqrt\n\n\ndef _raise_if_not_series_or_dataframe(x, funcname):\n \"\"\"\n Utility function to raise an error if an object is not a Series or DataFrame\n \"\"\"\n if not is_series_like(x) and not is_dataframe_like(x):\n raise NotImplementedError(\n \"`%s` is only supported with objects that are Dataframes or Series\"\n % funcname\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy__GroupBy.__iter__.raise_NotImplementedError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy__GroupBy.__iter__.raise_NotImplementedError", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1026, "end_line": 1135, "span_ids": ["_GroupBy.index", "_GroupBy.index_1", "_GroupBy.__iter__", "_GroupBy._groupby_kwargs", "_GroupBy.__init__", "_GroupBy"], "tokens": 765}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n \"\"\"Superclass for DataFrameGroupBy and SeriesGroupBy\n\n Parameters\n ----------\n\n obj: DataFrame or Series\n DataFrame or Series to be grouped\n by: str, list or Series\n The key for grouping\n slice: str, list\n The slice keys applied to GroupBy result\n group_keys: bool\n Passed to pandas.DataFrame.groupby()\n dropna: bool\n Whether to drop null values from groupby index\n sort: bool, defult None\n Passed along to aggregation methods. If allowed,\n the output aggregation will have sorted keys.\n observed: bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n \"\"\"\n\n def __init__(\n self,\n df,\n by=None,\n slice=None,\n group_keys=True,\n dropna=None,\n sort=None,\n observed=None,\n ):\n\n by_ = by if isinstance(by, (tuple, list)) else [by]\n if any(isinstance(key, pd.Grouper) for key in by_):\n raise NotImplementedError(\"pd.Grouper is currently not supported by Dask.\")\n\n assert isinstance(df, (DataFrame, Series))\n self.group_keys = group_keys\n self.obj = df\n # grouping key passed via groupby method\n self.by = _normalize_by(df, by)\n self.sort = sort\n\n partitions_aligned = all(\n item.npartitions == df.npartitions if isinstance(item, Series) else True\n for item in (self.by if isinstance(self.by, (tuple, list)) else [self.by])\n )\n\n if not partitions_aligned:\n raise NotImplementedError(\n \"The grouped object and 'by' of the groupby must have the same divisions.\"\n )\n\n # slicing key applied to _GroupBy instance\n self._slice = slice\n\n if isinstance(self.by, list):\n by_meta = [\n item._meta if isinstance(item, Series) else item for item in self.by\n ]\n\n elif isinstance(self.by, Series):\n by_meta = self.by._meta\n\n else:\n by_meta = self.by\n\n self.dropna = {}\n if dropna is not None:\n self.dropna[\"dropna\"] = dropna\n\n # Hold off on setting observed by default: https://github.com/dask/dask/issues/6951\n self.observed = {}\n if observed is not None:\n self.observed[\"observed\"] = observed\n\n self._meta = self.obj._meta.groupby(\n by_meta, group_keys=group_keys, **self.observed, **self.dropna\n )\n\n @property # type: ignore\n @_deprecated()\n def index(self):\n return self.by\n\n @index.setter\n def index(self, value):\n self.by = value\n\n @property\n def _groupby_kwargs(self):\n return {\n \"by\": self.by,\n \"group_keys\": self.group_keys,\n **self.dropna,\n \"sort\": self.sort,\n **self.observed,\n }\n\n def __iter__(self):\n raise NotImplementedError(\n \"Iteration of DataFrameGroupBy objects requires computing the groups which \"\n \"may be slow. You probably want to use 'apply' to execute a function for \"\n \"all the columns. To access individual groups, use 'get_group'. To list \"\n \"all the group names, use 'df[].unique().compute()'.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.compute__GroupBy._shuffle.return.df4_by2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/groupby.py__GroupBy.compute__GroupBy._shuffle.return.df4_by2", "embedding": null, "metadata": {"file_path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1303, "end_line": 1354, "span_ids": ["_GroupBy._shuffle", "_GroupBy.compute"], "tokens": 436}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _GroupBy:\n\n def compute(self, **kwargs):\n raise NotImplementedError(\n \"DataFrameGroupBy does not allow compute method.\"\n \"Please chain it with an aggregation method (like ``.mean()``) or get a \"\n \"specific group using ``.get_group()`` before calling ``compute()``\"\n )\n\n def _shuffle(self, meta):\n df = self.obj\n\n if isinstance(self.obj, Series):\n # Temporarily convert series to dataframe for shuffle\n df = df.to_frame(\"__series__\")\n convert_back_to_series = True\n else:\n convert_back_to_series = False\n\n if isinstance(self.by, DataFrame): # add by columns to dataframe\n df2 = df.assign(**{\"_by_\" + c: self.by[c] for c in self.by.columns})\n by = self.by\n elif isinstance(self.by, Series):\n df2 = df.assign(_by=self.by)\n by = self.by\n else:\n df2 = df\n by = df._select_columns_or_index(self.by)\n\n df3 = shuffle(df2, by) # shuffle dataframe and index\n\n if isinstance(self.by, DataFrame):\n # extract by from dataframe\n cols = [\"_by_\" + c for c in self.by.columns]\n by2 = df3[cols]\n if is_dataframe_like(meta):\n df4 = df3.map_partitions(drop_columns, cols, meta.columns.dtype)\n else:\n df4 = df3.drop(cols, axis=1)\n elif isinstance(self.by, Series):\n by2 = df3[\"_by\"]\n by2.name = self.by.name\n if is_dataframe_like(meta):\n df4 = df3.map_partitions(drop_columns, \"_by\", meta.columns.dtype)\n else:\n df4 = df3.drop(\"_by\", axis=1)\n else:\n df4 = df3\n by2 = self.by\n\n if convert_back_to_series:\n df4 = df4[\"__series__\"].rename(self.obj.name)\n\n return df4, by2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json_read_json._Create_a_dataframe_fro": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json_read_json._Create_a_dataframe_fro", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/json.py", "file_name": "json.py", "file_type": "text/x-python", "category": "implementation", "start_line": 106, "end_line": 193, "span_ids": ["read_json"], "tokens": 757}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef read_json(\n url_path,\n orient=\"records\",\n lines=None,\n storage_options=None,\n blocksize=None,\n sample=2**20,\n encoding=\"utf-8\",\n errors=\"strict\",\n compression=\"infer\",\n meta=None,\n engine=pd.read_json,\n include_path_column=False,\n path_converter=None,\n **kwargs,\n):\n \"\"\"Create a dataframe from a set of JSON files\n\n This utilises ``pandas.read_json()``, and most parameters are\n passed through - see its docstring.\n\n Differences: orient is 'records' by default, with lines=True; this\n is appropriate for line-delimited \"JSON-lines\" data, the kind of JSON output\n that is most common in big-data scenarios, and which can be chunked when\n reading (see ``read_json()``). All other options require blocksize=None,\n i.e., one partition per input file.\n\n Parameters\n ----------\n url_path: str, list of str\n Location to read from. If a string, can include a glob character to\n find a set of file names.\n Supports protocol specifications such as ``\"s3://\"``.\n encoding, errors:\n The text encoding to implement, e.g., \"utf-8\" and how to respond\n to errors in the conversion (see ``str.encode()``).\n orient, lines, kwargs\n passed to pandas; if not specified, lines=True when orient='records',\n False otherwise.\n storage_options: dict\n Passed to backend file-system implementation\n blocksize: None or int\n If None, files are not blocked, and you get one partition per input\n file. If int, which can only be used for line-delimited JSON files,\n each partition will be approximately this size in bytes, to the nearest\n newline character.\n sample: int\n Number of bytes to pre-load, to provide an empty dataframe structure\n to any blocks without data. Only relevant when using blocksize.\n encoding, errors:\n Text conversion, ``see bytes.decode()``\n compression : string or None\n String like 'gzip' or 'xz'.\n engine : function object, default ``pd.read_json``\n The underlying function that dask will use to read JSON files. By\n default, this will be the pandas JSON reader (``pd.read_json``).\n include_path_column : bool or str, optional\n Include a column with the file path where each row in the dataframe\n originated. If ``True``, a new column is added to the dataframe called\n ``path``. If ``str``, sets new column name. Default is ``False``.\n path_converter : function or None, optional\n A function that takes one argument and returns a string. Used to convert\n paths in the ``path`` column, for instance, to strip a common prefix from\n all the paths.\n $META\n\n Returns\n -------\n dask.DataFrame\n\n Examples\n --------\n Load single file\n\n >>> dd.read_json('myfile.1.json') # doctest: +SKIP\n\n Load multiple files\n\n >>> dd.read_json('myfile.*.json') # doctest: +SKIP\n\n >>> dd.read_json(['myfile.1.json', 'myfile.2.json']) # doctest: +SKIP\n\n Load large line-delimited JSON files using partitions of approx\n 256MB size\n\n >> dd.read_json('data/file*.csv', blocksize=2**28)\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json.if_lines_is_None__read_json.return.from_delayed_parts_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json.if_lines_is_None__read_json.return.from_delayed_parts_meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/json.py", "file_name": "json.py", "file_type": "text/x-python", "category": "implementation", "start_line": 194, "end_line": 286, "span_ids": ["read_json"], "tokens": 609}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@insert_meta_param_description\ndef read_json(\n url_path,\n orient=\"records\",\n lines=None,\n storage_options=None,\n blocksize=None,\n sample=2**20,\n encoding=\"utf-8\",\n errors=\"strict\",\n compression=\"infer\",\n meta=None,\n engine=pd.read_json,\n include_path_column=False,\n path_converter=None,\n **kwargs,\n):\n if lines is None:\n lines = orient == \"records\"\n if orient != \"records\" and lines:\n raise ValueError(\n \"Line-delimited JSON is only available with\" 'orient=\"records\".'\n )\n if blocksize and (orient != \"records\" or not lines):\n raise ValueError(\n \"JSON file chunking only allowed for JSON-lines\"\n \"input (orient='records', lines=True).\"\n )\n storage_options = storage_options or {}\n if include_path_column is True:\n include_path_column = \"path\"\n\n if path_converter is None:\n path_converter = lambda x: x\n\n if blocksize:\n b_out = read_bytes(\n url_path,\n b\"\\n\",\n blocksize=blocksize,\n sample=sample,\n compression=compression,\n include_path=include_path_column,\n **storage_options,\n )\n if include_path_column:\n first, chunks, paths = b_out\n first_path = path_converter(paths[0])\n path_dtype = pd.CategoricalDtype(path_converter(p) for p in paths)\n flat_paths = flatten(\n [path_converter(p)] * len(chunk) for p, chunk in zip(paths, chunks)\n )\n else:\n first, chunks = b_out\n first_path = None\n flat_paths = (None,)\n path_dtype = None\n\n flat_chunks = flatten(chunks)\n if meta is None:\n meta = read_json_chunk(\n first,\n encoding,\n errors,\n engine,\n include_path_column,\n first_path,\n path_dtype,\n kwargs,\n )\n meta = make_meta(meta)\n parts = [\n delayed(read_json_chunk)(\n chunk,\n encoding,\n errors,\n engine,\n include_path_column,\n path,\n path_dtype,\n kwargs,\n meta=meta,\n )\n for chunk, path in zip_longest(flat_chunks, flat_paths)\n ]\n else:\n files = open_files(\n url_path,\n \"rt\",\n encoding=encoding,\n errors=errors,\n compression=compression,\n **storage_options,\n )\n path_dtype = pd.CategoricalDtype(path_converter(f.path) for f in files)\n parts = [\n delayed(read_json_file)(\n f,\n orient,\n lines,\n engine,\n include_path_column,\n path_converter(f.path),\n path_dtype,\n kwargs,\n )\n for f in files\n ]\n\n return from_delayed(parts, meta=meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json_chunk_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/json.py_read_json_chunk_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/json.py", "file_name": "json.py", "file_type": "text/x-python", "category": "implementation", "start_line": 289, "end_line": 320, "span_ids": ["read_json_chunk", "read_json_file", "add_path_column"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_json_chunk(\n chunk, encoding, errors, engine, column_name, path, path_dtype, kwargs, meta=None\n):\n s = io.StringIO(chunk.decode(encoding, errors))\n s.seek(0)\n df = engine(s, orient=\"records\", lines=True, **kwargs)\n if meta is not None and df.empty:\n return meta\n\n if column_name:\n df = add_path_column(df, column_name, path, path_dtype)\n\n return df\n\n\ndef read_json_file(f, orient, lines, engine, column_name, path, path_dtype, kwargs):\n with f as open_file:\n df = engine(open_file, orient=orient, lines=lines, **kwargs)\n if column_name:\n df = add_path_column(df, column_name, path, path_dtype)\n return df\n\n\ndef add_path_column(df, column_name, path, dtype):\n if column_name in df.columns:\n raise ValueError(\n f\"Files already contain the column name: '{column_name}', so the path \"\n \"column cannot use this name. Please set `include_path_column` to a \"\n \"unique name.\"\n )\n return df.assign(**{column_name: pd.Series([path] * len(df), dtype=dtype)})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__read_table_from_path__read_table_from_path.if_partition_keys_.else_.with__open_input_files_.if_row_groups_None_.else_.return.pq_ParquetFile_fil_read_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py__read_table_from_path__read_table_from_path.if_partition_keys_.else_.with__open_input_files_.if_row_groups_None_.else_.return.pq_ParquetFile_fil_read_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 195, "end_line": 280, "span_ids": ["_read_table_from_path"], "tokens": 510}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _read_table_from_path(\n path,\n fs,\n row_groups,\n columns,\n schema,\n filters,\n partitions,\n partition_keys,\n piece_to_arrow_func,\n **kwargs,\n):\n \"\"\"Read arrow table from file path.\n\n Used in all cases by `ArrowLegacyEngine._read_table`.\n Used by `ArrowDatasetEngine._read_table` when no filters\n are specified (otherwise fragments are converted directly\n into tables).\n \"\"\"\n\n # Define file-opening options\n read_kwargs = kwargs.get(\"read\", {}).copy()\n precache_options, open_file_options = _process_open_file_options(\n read_kwargs.pop(\"open_file_options\", {}),\n **(\n {\n \"allow_precache\": False,\n \"default_cache\": \"none\",\n }\n if _is_local_fs(fs)\n else {\n \"columns\": columns,\n \"row_groups\": row_groups if row_groups == [None] else [row_groups],\n \"default_engine\": \"pyarrow\",\n \"default_cache\": \"none\",\n }\n ),\n )\n\n if partition_keys:\n tables = []\n with _open_input_files(\n [path],\n fs=fs,\n precache_options=precache_options,\n **open_file_options,\n )[0] as fil:\n for rg in row_groups:\n piece = pq.ParquetDatasetPiece(\n path,\n row_group=rg,\n partition_keys=partition_keys,\n open_file_func=lambda _path, **_kwargs: fil,\n )\n arrow_table = piece_to_arrow_func(\n piece, columns, partitions, **read_kwargs\n )\n tables.append(arrow_table)\n\n if len(row_groups) > 1:\n # NOTE: Not covered by pytest\n return pa.concat_tables(tables)\n else:\n return tables[0]\n else:\n with _open_input_files(\n [path],\n fs=fs,\n precache_options=precache_options,\n **open_file_options,\n )[0] as fil:\n if row_groups == [None]:\n return pq.ParquetFile(fil).read(\n columns=columns,\n use_threads=False,\n use_pandas_metadata=True,\n **read_kwargs,\n )\n else:\n return pq.ParquetFile(fil).read_row_groups(\n row_groups,\n columns=columns,\n use_threads=False,\n use_pandas_metadata=True,\n **read_kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info.if_len_paths_1_and_fs_ArrowDatasetEngine._collect_dataset_info._be_avoided_at_all_costs": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info.if_len_paths_1_and_fs_ArrowDatasetEngine._collect_dataset_info._be_avoided_at_all_costs", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 845, "end_line": 932, "span_ids": ["ArrowDatasetEngine._collect_dataset_info"], "tokens": 853}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _collect_dataset_info(\n cls,\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n ):\n # ... other code\n (\n _dataset_kwargs,\n read_kwargs,\n user_kwargs,\n ) = _split_user_options(**kwargs)\n # ... other code\n if len(paths) == 1 and fs.isdir(paths[0]):\n\n # Use _analyze_paths to avoid relative-path\n # problems (see GH#5608)\n paths, base, fns = _sort_and_analyze_paths(paths, fs)\n paths = fs.sep.join([base, fns[0]])\n\n meta_path = fs.sep.join([paths, \"_metadata\"])\n if not ignore_metadata_file and fs.exists(meta_path):\n # Use _metadata file\n ds = pa_ds.parquet_dataset(\n meta_path,\n filesystem=fs,\n partitioning=partitioning[\"obj\"].discover(\n *partitioning.get(\"args\", []),\n **partitioning.get(\"kwargs\", {}),\n ),\n **_dataset_kwargs,\n )\n has_metadata_file = True\n if gather_statistics is None:\n gather_statistics = True\n elif require_extension:\n # Need to materialize all paths if we are missing the _metadata file\n # Raise error if all files have been filtered by extension\n len0 = len(paths)\n paths = [\n path for path in fs.find(paths) if path.endswith(require_extension)\n ]\n if len0 and paths == []:\n raise ValueError(\n \"No files satisfy the `require_extension` criteria \"\n f\"(files must end with {require_extension}).\"\n )\n\n elif len(paths) > 1:\n paths, base, fns = _sort_and_analyze_paths(paths, fs)\n meta_path = fs.sep.join([base, \"_metadata\"])\n if \"_metadata\" in fns:\n # Pyarrow cannot handle \"_metadata\" when `paths` is a list\n # Use _metadata file\n if not ignore_metadata_file:\n ds = pa_ds.parquet_dataset(\n meta_path,\n filesystem=fs,\n partitioning=partitioning[\"obj\"].discover(\n *partitioning.get(\"args\", []),\n **partitioning.get(\"kwargs\", {}),\n ),\n **_dataset_kwargs,\n )\n has_metadata_file = True\n if gather_statistics is None:\n gather_statistics = True\n\n # Populate valid_paths, since the original path list\n # must be used to filter the _metadata-based dataset\n fns.remove(\"_metadata\")\n valid_paths = fns\n\n # Final \"catch-all\" pyarrow.dataset call\n if ds is None:\n ds = pa_ds.dataset(\n paths,\n filesystem=fs,\n format=\"parquet\",\n partitioning=partitioning[\"obj\"].discover(\n *partitioning.get(\"args\", []),\n **partitioning.get(\"kwargs\", {}),\n ),\n **_dataset_kwargs,\n )\n\n # At this point, we know if `split_row_groups` should be\n # set to `True` by default. If the user has not specified\n # this option, we will only collect statistics if there is\n # a global \"_metadata\" file available, otherwise we will\n # opt for `gather_statistics=False`. For `ArrowDatasetEngine`,\n # statistics are only required to calculate divisions\n # and/or aggregate row-groups using `chunksize` (not for\n # filtering).\n #\n # By default, we will create an output partition for each\n # row group in the dataset (`split_row_groups=True`).\n # However, we will NOT split by row-group if\n # `gather_statistics=False`, because this can be\n # interpreted as an indication that metadata overhead should\n # be avoided at all costs.\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info.if_gather_statistics_is_N_ArrowDatasetEngine._collect_dataset_info.None_46": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._collect_dataset_info.if_gather_statistics_is_N_ArrowDatasetEngine._collect_dataset_info.None_46", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 933, "end_line": 1008, "span_ids": ["ArrowDatasetEngine._collect_dataset_info"], "tokens": 751}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _collect_dataset_info(\n cls,\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n ):\n # ... other code\n if gather_statistics is None:\n gather_statistics = False\n if split_row_groups is None:\n if gather_statistics:\n split_row_groups = True\n else:\n split_row_groups = False\n\n # Deal with directory partitioning\n # Get all partition keys (without filters) to populate partition_obj\n partition_obj = [] # See `partition_info` description below\n hive_categories = defaultdict(list)\n file_frag = None\n for file_frag in ds.get_fragments():\n keys = pa_ds._get_partition_keys(file_frag.partition_expression)\n if not (keys or hive_categories):\n break # Bail - This is not a hive-partitioned dataset\n for k, v in keys.items():\n if v not in hive_categories[k]:\n hive_categories[k].append(v)\n\n physical_schema = ds.schema\n if file_frag is not None:\n # Check/correct order of `categories` using last file_frag\n #\n # Note that `_get_partition_keys` does NOT preserve the\n # partition-hierarchy order of the keys. Therefore, we\n # use custom logic to determine the \"correct\" oredering\n # of the `categories` output.\n #\n # Example (why we need to \"reorder\" `categories`):\n #\n # # Fragment path has \"hive\" structure\n # file_frag.path\n #\n # '/data/path/b=x/c=x/part.1.parquet'\n #\n # # `categories` may NOT preserve the hierachy order\n # categories.keys()\n #\n # dict_keys(['c', 'b'])\n #\n cat_keys = [\n part.split(\"=\")[0]\n for part in file_frag.path.split(fs.sep)\n if \"=\" in part\n ]\n if set(hive_categories) == set(cat_keys):\n hive_categories = {\n k: hive_categories[k] for k in cat_keys if k in hive_categories\n }\n\n physical_schema = file_frag.physical_schema\n\n partition_names = list(hive_categories)\n for name in partition_names:\n partition_obj.append(PartitionObj(name, hive_categories[name]))\n\n # Check the `aggregate_files` setting\n aggregation_depth = _get_aggregation_depth(aggregate_files, partition_names)\n\n # Construct and return `datset_info`\n #\n # Note on (hive) partitioning information:\n #\n # - \"partitions\" : (list of PartitionObj) This is a list of\n # simple objects providing `name` and `keys` attributes\n # for each partition column. The list is designed to\n # \"duck type\" a `ParquetPartitions` object, so that the\n # same code path can be used for both legacy and\n # pyarrow.dataset-based logic.\n # - \"partition_names\" : (list) This is a list containing the\n # names of partitioned columns.\n # - \"partitioning\" : (dict) The `partitioning` options\n # used for file discovory by pyarrow.\n #\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._construct_collection_plan_ArrowDatasetEngine._construct_collection_plan._Check_if_this_is_a_very": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._construct_collection_plan_ArrowDatasetEngine._construct_collection_plan._Check_if_this_is_a_very", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1145, "end_line": 1237, "span_ids": ["ArrowDatasetEngine._construct_collection_plan"], "tokens": 827}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _construct_collection_plan(cls, dataset_info):\n \"\"\"pyarrow.dataset version of _construct_collection_plan\n Use dataset_info to construct the general plan for\n generating the output DataFrame collection.\n\n The \"plan\" is essentially a list (called `parts`) of\n information that is needed to produce each output partition.\n After this function is returned, the information in each\n element of `parts` will be used to produce a single Dask-\n DataFrame partition (unless some elements of `parts`\n are aggregated together in a follow-up step).\n\n This method also returns ``stats`` (which is a list of\n parquet-metadata statistics for each element of parts),\n and ``common_metadata`` (which is a dictionary of kwargs\n that should be passed to the ``read_partition`` call for\n every output partition).\n\n This method is overridden in `ArrowLegacyEngine`.\n \"\"\"\n\n # Collect necessary dataset information from dataset_info\n ds = dataset_info[\"ds\"]\n fs = dataset_info[\"fs\"]\n filters = dataset_info[\"filters\"]\n split_row_groups = dataset_info[\"split_row_groups\"]\n gather_statistics = dataset_info[\"gather_statistics\"]\n chunksize = dataset_info[\"chunksize\"]\n aggregation_depth = dataset_info[\"aggregation_depth\"]\n index_cols = dataset_info[\"index_cols\"]\n schema = dataset_info[\"schema\"]\n partition_names = dataset_info[\"partition_names\"]\n partitioning = dataset_info[\"partitioning\"]\n partitions = dataset_info[\"partitions\"]\n categories = dataset_info[\"categories\"]\n has_metadata_file = dataset_info[\"has_metadata_file\"]\n valid_paths = dataset_info[\"valid_paths\"]\n kwargs = dataset_info[\"kwargs\"]\n\n # Ensure metadata_task_size is set\n # (Using config file or defaults)\n metadata_task_size = _set_metadata_task_size(\n dataset_info[\"metadata_task_size\"], fs\n )\n\n # Cannot gather_statistics if our `metadata` is a list\n # of paths, or if we are building a multiindex (for now).\n # We also don't \"need\" to gather statistics if we don't\n # want to apply any filters or calculate divisions. Note\n # that the `ArrowDatasetEngine` doesn't even require\n # `gather_statistics=True` for filtering.\n if split_row_groups is None:\n split_row_groups = False\n _need_aggregation_stats = chunksize or (\n int(split_row_groups) > 1 and aggregation_depth\n )\n if len(index_cols) > 1:\n gather_statistics = False\n elif not _need_aggregation_stats and filters is None and len(index_cols) == 0:\n gather_statistics = False\n\n # Determine which columns need statistics.\n flat_filters = _flatten_filters(filters)\n stat_col_indices = {}\n for i, name in enumerate(schema.names):\n if name in index_cols or name in flat_filters:\n if name in partition_names:\n # Partition columns wont have statistics\n continue\n stat_col_indices[name] = i\n\n # If the user has not specified `gather_statistics`,\n # we will only do so if there are specific columns in\n # need of statistics.\n # NOTE: We cannot change `gather_statistics` from True\n # to False (even if `stat_col_indices` is empty), in\n # case a `chunksize` was specified, and the row-group\n # statistics are needed for part aggregation.\n if gather_statistics is None:\n gather_statistics = bool(stat_col_indices)\n\n # Add common kwargs\n common_kwargs = {\n \"partitioning\": partitioning,\n \"partitions\": partitions,\n \"categories\": categories,\n \"filters\": filters,\n \"schema\": schema,\n **kwargs,\n }\n\n # Check if this is a very simple case where we can just return\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._construct_collection_plan._the_path_names_ArrowDatasetEngine._construct_collection_plan.return.parts_stats_common_kwar": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._construct_collection_plan._the_path_names_ArrowDatasetEngine._construct_collection_plan.return.parts_stats_common_kwar", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1238, "end_line": 1328, "span_ids": ["ArrowDatasetEngine._construct_collection_plan"], "tokens": 733}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _construct_collection_plan(cls, dataset_info):\n # the path names\n if gather_statistics is False and not split_row_groups:\n return (\n [\n {\"piece\": (full_path, None, None)}\n for full_path in sorted(ds.files, key=natural_sort_key)\n ],\n [],\n common_kwargs,\n )\n\n # Get/transate filters\n ds_filters = None\n if filters is not None:\n ds_filters = pq._filters_to_expression(filters)\n\n # Define subset of `dataset_info` required by _collect_file_parts\n dataset_info_kwargs = {\n \"fs\": fs,\n \"split_row_groups\": split_row_groups,\n \"gather_statistics\": gather_statistics,\n \"partitioning\": partitioning,\n \"filters\": filters,\n \"ds_filters\": ds_filters,\n \"schema\": schema,\n \"stat_col_indices\": stat_col_indices,\n \"aggregation_depth\": aggregation_depth,\n \"chunksize\": chunksize,\n \"partitions\": partitions,\n }\n\n # Main parts/stats-construction\n if (\n has_metadata_file\n or metadata_task_size == 0\n or metadata_task_size > len(ds.files)\n ):\n # We have a global _metadata file to work with.\n # Therefore, we can just loop over fragments on the client.\n\n # Start with sorted (by path) list of file-based fragments\n file_frags = sorted(\n (frag for frag in ds.get_fragments(ds_filters)),\n key=lambda x: natural_sort_key(x.path),\n )\n parts, stats = cls._collect_file_parts(file_frags, dataset_info_kwargs)\n else:\n # We DON'T have a global _metadata file to work with.\n # We should loop over files in parallel\n\n # Collect list of file paths.\n # If valid_paths is not None, the user passed in a list\n # of files containing a _metadata file. Since we used\n # the _metadata file to generate our dataset object , we need\n # to ignore any file fragments that are not in the list.\n all_files = sorted(ds.files, key=natural_sort_key)\n if valid_paths:\n all_files = [\n filef\n for filef in all_files\n if filef.split(fs.sep)[-1] in valid_paths\n ]\n\n parts, stats = [], []\n if all_files:\n # Build and compute a task graph to construct stats/parts\n gather_parts_dsk = {}\n name = \"gather-pq-parts-\" + tokenize(all_files, dataset_info_kwargs)\n finalize_list = []\n for task_i, file_i in enumerate(\n range(0, len(all_files), metadata_task_size)\n ):\n finalize_list.append((name, task_i))\n gather_parts_dsk[finalize_list[-1]] = (\n cls._collect_file_parts,\n all_files[file_i : file_i + metadata_task_size],\n dataset_info_kwargs,\n )\n\n def _combine_parts(parts_and_stats):\n parts, stats = [], []\n for part, stat in parts_and_stats:\n parts += part\n if stat:\n stats += stat\n return parts, stats\n\n gather_parts_dsk[\"final-\" + name] = (_combine_parts, finalize_list)\n parts, stats = Delayed(\"final-\" + name, gather_parts_dsk).compute()\n\n return parts, stats, common_kwargs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_from___future___import_an_NONE_LABEL.___null_dask_index___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_from___future___import_an_NONE_LABEL.___null_dask_index___", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 32, "span_ids": ["imports"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport math\nimport warnings\n\nimport tlz as toolz\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.utils import stringify_path\nfrom packaging.version import parse as parse_version\n\nfrom ....base import compute_as_if_collection, tokenize\nfrom ....blockwise import BlockIndex\nfrom ....delayed import Delayed\nfrom ....highlevelgraph import HighLevelGraph\nfrom ....layers import DataFrameIOLayer\nfrom ....utils import apply, import_required, natural_sort_key, parse_bytes\nfrom ...core import DataFrame, Scalar, new_dd_object\nfrom ...methods import concat\nfrom ..utils import _is_local_fs\nfrom .utils import Engine, _sort_and_analyze_paths\n\ntry:\n import snappy\n\n snappy.compress\nexcept (ImportError, AttributeError):\n snappy = None\n\n\n__all__ = (\"read_parquet\", \"to_parquet\")\n\nNONE_LABEL = \"__null_dask_index__\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py___ParquetFunctionWrapper.__call__.return.read_parquet_part_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py___ParquetFunctionWrapper.__call__.return.read_parquet_part_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 34, "end_line": 98, "span_ids": ["ParquetFunctionWrapper", "imports", "ParquetFunctionWrapper.project_columns", "ParquetFunctionWrapper.__call__", "ParquetFunctionWrapper.__init__"], "tokens": 332}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# ----------------------------------------------------------------------\n# User API\n\n\nclass ParquetFunctionWrapper:\n \"\"\"\n Parquet Function-Wrapper Class\n Reads parquet data from disk to produce a partition\n (given a `part` argument).\n \"\"\"\n\n def __init__(\n self,\n engine,\n fs,\n meta,\n columns,\n index,\n kwargs,\n common_kwargs,\n ):\n self.engine = engine\n self.fs = fs\n self.meta = meta\n self.columns = columns\n self.index = index\n\n # `kwargs` = user-defined kwargs to be passed\n # identically for all partitions.\n #\n # `common_kwargs` = kwargs set by engine to be\n # passed identically for all\n # partitions.\n self.common_kwargs = toolz.merge(common_kwargs, kwargs or {})\n\n def project_columns(self, columns):\n \"\"\"Return a new ParquetFunctionWrapper object\n with a sub-column projection.\n \"\"\"\n if columns == self.columns:\n return self\n return ParquetFunctionWrapper(\n self.engine,\n self.fs,\n self.meta,\n columns,\n self.index,\n None, # Already merged into common_kwargs\n self.common_kwargs,\n )\n\n def __call__(self, part):\n\n if not isinstance(part, list):\n part = [part]\n\n return read_parquet_part(\n self.fs,\n self.engine,\n self.meta,\n [(p[\"piece\"], p.get(\"kwargs\", {})) for p in part],\n self.columns,\n self.index,\n self.common_kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ToParquetFunctionWrapper_ToParquetFunctionWrapper.__dask_tokenize__.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ToParquetFunctionWrapper_ToParquetFunctionWrapper.__dask_tokenize__.return._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 101, "end_line": 145, "span_ids": ["ToParquetFunctionWrapper.__init__", "ToParquetFunctionWrapper.__dask_tokenize__", "ToParquetFunctionWrapper"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ToParquetFunctionWrapper:\n \"\"\"\n Parquet Function-Wrapper Class\n\n Writes a DataFrame partition into a distinct parquet\n file. When called, the function also requires the\n current block index (via ``blockwise.BlockIndex``).\n \"\"\"\n\n def __init__(\n self,\n engine,\n path,\n fs,\n partition_on,\n write_metadata_file,\n i_offset,\n name_function,\n kwargs_pass,\n ):\n self.engine = engine\n self.path = path\n self.fs = fs\n self.partition_on = partition_on\n self.write_metadata_file = write_metadata_file\n self.i_offset = i_offset\n self.name_function = name_function\n self.kwargs_pass = kwargs_pass\n\n # NOTE: __name__ must be with \"to-parquet\"\n # for the name of the resulting `Blockwise`\n # layer to begin with \"to-parquet\"\n self.__name__ = \"to-parquet\"\n\n def __dask_tokenize__(self):\n return (\n self.engine,\n self.path,\n self.fs,\n self.partition_on,\n self.write_metadata_file,\n self.i_offset,\n self.name_function,\n self.kwargs_pass,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ToParquetFunctionWrapper.__call___read_parquet": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_ToParquetFunctionWrapper.__call___read_parquet", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 147, "end_line": 481, "span_ids": ["ToParquetFunctionWrapper.__call__", "read_parquet"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ToParquetFunctionWrapper:\n\n def __call__(self, df, block_index: tuple[int]):\n # Get partition index from block index tuple\n part_i = block_index[0]\n filename = (\n f\"part.{part_i + self.i_offset}.parquet\"\n if self.name_function is None\n else self.name_function(part_i + self.i_offset)\n )\n\n # Write out data\n return self.engine.write_partition(\n df,\n self.path,\n self.fs,\n filename,\n self.partition_on,\n self.write_metadata_file,\n **(dict(self.kwargs_pass, head=True) if part_i == 0 else self.kwargs_pass),\n )\n\n\ndef read_parquet(\n path,\n columns=None,\n filters=None,\n categories=None,\n index=None,\n storage_options=None,\n engine=\"auto\",\n gather_statistics=None,\n ignore_metadata_file=False,\n metadata_task_size=None,\n split_row_groups=None,\n chunksize=None,\n aggregate_files=None,\n **kwargs,\n):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.parts_divisions_index__read_parquet.return.new_dd_object_graph_outp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_read_parquet.parts_divisions_index__read_parquet.return.new_dd_object_graph_outp", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 428, "end_line": 481, "span_ids": ["read_parquet"], "tokens": 402}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_parquet(\n path,\n columns=None,\n filters=None,\n categories=None,\n index=None,\n storage_options=None,\n engine=\"auto\",\n gather_statistics=None,\n ignore_metadata_file=False,\n metadata_task_size=None,\n split_row_groups=None,\n chunksize=None,\n aggregate_files=None,\n **kwargs,\n):\n # ... other code\n parts, divisions, index, index_in_columns = process_statistics(\n parts,\n statistics,\n filters,\n index,\n chunksize,\n split_row_groups,\n fs,\n aggregation_depth,\n )\n\n # Account for index and columns arguments.\n # Modify `meta` dataframe accordingly\n meta, index, columns = set_index_columns(\n meta, index, columns, index_in_columns, auto_index_allowed\n )\n if meta.index.name == NONE_LABEL:\n meta.index.name = None\n\n # Set the index that was previously treated as a column\n if index_in_columns:\n meta = meta.set_index(index)\n if meta.index.name == NONE_LABEL:\n meta.index.name = None\n\n if len(divisions) < 2:\n # empty dataframe - just use meta\n graph = {(output_name, 0): meta}\n divisions = (None, None)\n else:\n # Create Blockwise layer\n layer = DataFrameIOLayer(\n output_name,\n columns,\n parts,\n ParquetFunctionWrapper(\n engine,\n fs,\n meta,\n columns,\n index,\n {}, # All kwargs should now be in `common_kwargs`\n common_kwargs,\n ),\n label=label,\n creation_info={\n \"func\": read_parquet,\n \"args\": (path,),\n \"kwargs\": input_kwargs,\n },\n )\n graph = HighLevelGraph({output_name: layer}, {output_name: set()})\n\n return new_dd_object(graph, output_name, meta, divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_check_multi_support_read_parquet_part.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_check_multi_support_read_parquet_part.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 484, "end_line": 524, "span_ids": ["check_multi_support", "read_parquet_part"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_multi_support(engine):\n # Helper function to check that the engine\n # supports a multi-partition read\n return hasattr(engine, \"multi_support\") and engine.multi_support()\n\n\ndef read_parquet_part(fs, engine, meta, part, columns, index, kwargs):\n \"\"\"Read a part of a parquet dataset\n\n This function is used by `read_parquet`.\"\"\"\n if isinstance(part, list):\n if len(part) == 1 or part[0][1] or not check_multi_support(engine):\n # Part kwargs expected\n func = engine.read_partition\n dfs = [\n func(fs, rg, columns.copy(), index, **toolz.merge(kwargs, kw))\n for (rg, kw) in part\n ]\n df = concat(dfs, axis=0) if len(dfs) > 1 else dfs[0]\n else:\n # No part specific kwargs, let engine read\n # list of parts at once\n df = engine.read_partition(\n fs, [p[0] for p in part], columns.copy(), index, **kwargs\n )\n else:\n # NOTE: `kwargs` are the same for all parts, while `part_kwargs` may\n # be different for each part.\n rg, part_kwargs = part\n df = engine.read_partition(\n fs, rg, columns, index, **toolz.merge(kwargs, part_kwargs)\n )\n\n if meta.columns.name:\n df.columns.name = meta.columns.name\n columns = columns or []\n index = index or []\n df = df[[c for c in columns if c not in index]]\n if index == [NONE_LABEL]:\n df.index.name = None\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet._index_preservation_itse_to_parquet.meta_name._metadata_data_write_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet._index_preservation_itse_to_parquet.meta_name._metadata_data_write_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 728, "end_line": 819, "span_ids": ["to_parquet"], "tokens": 793}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_parquet(\n df,\n path,\n engine=\"auto\",\n compression=\"default\",\n write_index=True,\n append=False,\n overwrite=False,\n ignore_divisions=False,\n partition_on=None,\n storage_options=None,\n custom_metadata=None,\n write_metadata_file=True,\n compute=True,\n compute_kwargs=None,\n schema=None,\n name_function=None,\n **kwargs,\n):\n # index preservation itself). For both engines, the column index\n # will be written to \"pandas metadata\" if write_index=True\n index_cols = []\n if write_index:\n real_cols = set(df.columns)\n none_index = list(df._meta.index.names) == [None]\n df = df.reset_index()\n if none_index:\n df.columns = [\n c if c not in reserved_names else NONE_LABEL for c in df.columns\n ]\n index_cols = [c for c in set(df.columns) - real_cols]\n else:\n # Not writing index - might as well drop it\n df = df.reset_index(drop=True)\n\n _to_parquet_kwargs = {\n \"engine\",\n \"compression\",\n \"write_index\",\n \"append\",\n \"ignore_divisions\",\n \"partition_on\",\n \"storage_options\",\n \"write_metadata_file\",\n \"compute\",\n }\n kwargs_pass = {k: v for k, v in kwargs.items() if k not in _to_parquet_kwargs}\n\n # Engine-specific initialization steps to write the dataset.\n # Possibly create parquet metadata, and load existing stuff if appending\n if custom_metadata:\n if b\"pandas\" in custom_metadata.keys():\n raise ValueError(\n \"User-defined key/value metadata (custom_metadata) can not \"\n \"contain a b'pandas' key. This key is reserved by Pandas, \"\n \"and overwriting the corresponding value can render the \"\n \"entire dataset unreadable.\"\n )\n kwargs_pass[\"custom_metadata\"] = custom_metadata\n meta, schema, i_offset = engine.initialize_write(\n df,\n fs,\n path,\n append=append,\n ignore_divisions=ignore_divisions,\n partition_on=partition_on,\n division_info=division_info,\n index_cols=index_cols,\n schema=schema,\n **kwargs_pass,\n )\n\n # Check that custom name_function is valid,\n # and that it will produce unique names\n if name_function is not None:\n if not callable(name_function):\n raise ValueError(\"``name_function`` must be a callable with one argument.\")\n filenames = [name_function(i + i_offset) for i in range(df.npartitions)]\n if len(set(filenames)) < len(filenames):\n raise ValueError(\"``name_function`` must produce unique filenames.\")\n\n # Create Blockwise layer for parquet-data write\n kwargs_pass[\"fmd\"] = meta\n kwargs_pass[\"compression\"] = compression\n kwargs_pass[\"index_cols\"] = index_cols\n kwargs_pass[\"schema\"] = schema\n data_write = df.map_partitions(\n ToParquetFunctionWrapper(\n engine,\n path,\n fs,\n partition_on,\n write_metadata_file,\n i_offset,\n name_function,\n kwargs_pass,\n ),\n BlockIndex((df.npartitions,)),\n # Pass in the original metadata to avoid\n # metadata emulation in `map_partitions`.\n # This is necessary, because we are not\n # expecting a dataframe-like output.\n meta=df._meta,\n enforce_metadata=False,\n transform_divisions=False,\n align_dataframes=False,\n )\n\n # Collect metadata and write _metadata.\n # TODO: Use tree-reduction layer (when available)\n meta_name = \"metadata-\" + data_write._name\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet.if_write_metadata_file__to_parquet.if_compute_.else_.return.Scalar_graph_meta_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_to_parquet.if_write_metadata_file__to_parquet.if_compute_.else_.return.Scalar_graph_meta_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 820, "end_line": 844, "span_ids": ["to_parquet"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def to_parquet(\n df,\n path,\n engine=\"auto\",\n compression=\"default\",\n write_index=True,\n append=False,\n overwrite=False,\n ignore_divisions=False,\n partition_on=None,\n storage_options=None,\n custom_metadata=None,\n write_metadata_file=True,\n compute=True,\n compute_kwargs=None,\n schema=None,\n name_function=None,\n **kwargs,\n):\n # ... other code\n if write_metadata_file:\n dsk = {\n (meta_name, 0): (\n apply,\n engine.write_metadata,\n [\n data_write.__dask_keys__(),\n meta,\n fs,\n path,\n ],\n {\"append\": append, \"compression\": compression},\n )\n }\n else:\n dsk = {(meta_name, 0): (lambda x: None, data_write.__dask_keys__())}\n\n # Convert data_write + dsk to computable collection\n graph = HighLevelGraph.from_collections(meta_name, dsk, dependencies=(data_write,))\n if compute:\n return compute_as_if_collection(\n Scalar, graph, [(meta_name, 0)], **compute_kwargs\n )\n else:\n return Scalar(graph, meta_name, \"\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_create_metadata_file.out_dir_create_metadata_file.return.out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py_create_metadata_file.out_dir_create_metadata_file.return.out", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 925, "end_line": 986, "span_ids": ["create_metadata_file"], "tokens": 656}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def create_metadata_file(\n paths,\n root_dir=None,\n out_dir=None,\n engine=\"pyarrow\",\n storage_options=None,\n split_every=32,\n compute=True,\n compute_kwargs=None,\n fs=None,\n):\n # ... other code\n out_dir = root_dir if out_dir is None else out_dir\n\n # Start constructing a raw graph\n dsk = {}\n name = \"gen-metadata-\" + tokenize(paths, fs)\n collect_name = \"collect-\" + name\n agg_name = \"agg-\" + name\n\n # Define a \"collect\" task for each file in the input list.\n # Each tasks will:\n # 1. Extract the footer metadata from a distinct file\n # 2. Populate the `file_path` field in the metadata\n # 3. Return the extracted/modified metadata\n for p, (fn, path) in enumerate(zip(fns, paths)):\n key = (collect_name, p, 0)\n dsk[key] = (engine.collect_file_metadata, path, fs, fn)\n\n # Build a reduction tree to aggregate all footer metadata\n # into a single metadata object. Each task in the tree\n # will take in a list of metadata objects as input, and will\n # usually output a single (aggregated) metadata object.\n # The final task in the tree will write the result to disk\n # instead of returning it (this behavior is triggered by\n # passing a file path to `engine.aggregate_metadata`).\n parts = len(paths)\n widths = [parts]\n while parts > 1:\n parts = math.ceil(parts / split_every)\n widths.append(parts)\n height = len(widths)\n for depth in range(1, height):\n for group in range(widths[depth]):\n p_max = widths[depth - 1]\n lstart = split_every * group\n lstop = min(lstart + split_every, p_max)\n dep_task_name = collect_name if depth == 1 else agg_name\n node_list = [(dep_task_name, p, depth - 1) for p in range(lstart, lstop)]\n if depth == height - 1:\n assert group == 0\n dsk[name] = (engine.aggregate_metadata, node_list, fs, out_dir)\n else:\n dsk[(agg_name, group, depth)] = (\n engine.aggregate_metadata,\n node_list,\n None,\n None,\n )\n\n # There will be no aggregation tasks if there is only one file\n if len(paths) == 1:\n dsk[name] = (engine.aggregate_metadata, [(collect_name, 0, 0)], fs, out_dir)\n\n # Convert the raw graph to a `Delayed` object\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[])\n out = Delayed(name, graph)\n\n # Optionally compute the result\n if compute:\n if compute_kwargs is None:\n compute_kwargs = dict()\n out = out.compute(**compute_kwargs)\n return out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py__ENGINES_get_engine.if_engine_auto_.else_.raise_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/core.py__ENGINES_get_engine.if_engine_auto_.else_.raise_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/core.py", "file_name": "core.py", "file_type": "text/x-python", "category": "implementation", "start_line": 989, "end_line": 1063, "span_ids": ["get_engine", "impl:10"], "tokens": 558}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "_ENGINES: dict[str, Engine] = {}\n\n\ndef get_engine(engine):\n \"\"\"Get the parquet engine backend implementation.\n\n Parameters\n ----------\n engine : str, default 'auto'\n Backend parquet library to use. Options include: 'auto', 'fastparquet',\n and 'pyarrow'. Defaults to 'auto', which selects the FastParquetEngine\n if fastparquet is installed (and ArrowDatasetEngine otherwise).\n If 'pyarrow' is specified, the ArrowDatasetEngine (which leverages the\n pyarrow.dataset API) will be used.\n gather_statistics : bool or None (default).\n\n Returns\n -------\n A dict containing a ``'read'`` and ``'write'`` function.\n \"\"\"\n if engine in _ENGINES:\n return _ENGINES[engine]\n\n if engine == \"auto\":\n for eng in [\"fastparquet\", \"pyarrow\"]:\n try:\n return get_engine(eng)\n except RuntimeError:\n pass\n else:\n raise RuntimeError(\"Please install either fastparquet or pyarrow\")\n\n elif engine == \"fastparquet\":\n import_required(\"fastparquet\", \"`fastparquet` not installed\")\n from .fastparquet import FastParquetEngine\n\n _ENGINES[\"fastparquet\"] = eng = FastParquetEngine\n return eng\n\n elif engine in (\"pyarrow\", \"arrow\", \"pyarrow-legacy\", \"pyarrow-dataset\"):\n\n pa = import_required(\"pyarrow\", \"`pyarrow` not installed\")\n pa_version = parse_version(pa.__version__)\n\n if engine in (\"pyarrow\", \"arrow\"):\n engine = \"pyarrow-dataset\"\n\n if engine == \"pyarrow-dataset\":\n if pa_version.major < 1:\n raise ImportError(\n f\"pyarrow-{pa_version.major} does not support the \"\n f\"pyarrow.dataset API. Please install pyarrow>=1.\"\n )\n\n from .arrow import ArrowDatasetEngine\n\n _ENGINES[engine] = eng = ArrowDatasetEngine\n else:\n from .arrow import ArrowLegacyEngine\n\n warnings.warn(\n \"`ArrowLegacyEngine` ('pyarrow-legacy') is deprecated \"\n \"and will be removed in the near future. Please use \"\n \"`engine='pyarrow'` instead.\",\n FutureWarning,\n stacklevel=3,\n )\n _ENGINES[engine] = eng = ArrowLegacyEngine\n return eng\n\n else:\n raise ValueError(\n f'Unsupported engine: \"{engine}\".'\n ' Valid choices include \"pyarrow\" and \"fastparquet\".'\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_dataset_info_FastParquetEngine._collect_dataset_info.require_extension.dataset_kwargs_pop_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine._collect_dataset_info_FastParquetEngine._collect_dataset_info.require_extension.dataset_kwargs_pop_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 361, "end_line": 396, "span_ids": ["FastParquetEngine._collect_dataset_info"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def _collect_dataset_info(\n cls,\n paths,\n fs,\n categories,\n index,\n gather_statistics,\n filters,\n split_row_groups,\n chunksize,\n aggregate_files,\n ignore_metadata_file,\n metadata_task_size,\n kwargs,\n ):\n\n # Define the parquet-file (pf) object to use for metadata,\n # Also, initialize `parts`. If `parts` is populated here,\n # then each part will correspond to a file. Otherwise, each part will\n # correspond to a row group (populated later).\n #\n # This logic is mostly to handle `gather_statistics=False` cases,\n # because this also means we should avoid scanning every file in the\n # dataset. If _metadata is available, set `gather_statistics=True`\n # (if `gather_statistics=None`).\n\n # Extract \"supported\" key-word arguments from `kwargs`.\n # Split items into `dataset_kwargs` and `read_kwargs`\n dataset_kwargs, read_kwargs, user_kwargs = _split_user_options(**kwargs)\n\n parts = []\n _metadata_exists = False\n require_extension = dataset_kwargs.pop(\n \"require_extension\", (\".parq\", \".parquet\")\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.pf_to_pandas_FastParquetEngine.pf_to_pandas.return.df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/fastparquet.py_FastParquetEngine.pf_to_pandas_FastParquetEngine.pf_to_pandas.return.df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/fastparquet.py", "file_name": "fastparquet.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1040, "end_line": 1130, "span_ids": ["FastParquetEngine.pf_to_pandas"], "tokens": 566}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastParquetEngine(Engine):\n\n @classmethod\n def pf_to_pandas(\n cls,\n pf,\n fs=None,\n columns=None,\n categories=None,\n index=None,\n open_file_options=None,\n **kwargs,\n ):\n # This method was mostly copied from the fastparquet\n # `ParquetFile.to_pandas` definition. We maintain our\n # own implmentation in Dask to enable better remote\n # file-handling control\n\n # Handle selected columns\n if columns is not None:\n columns = columns[:]\n else:\n columns = pf.columns + list(pf.cats)\n if index:\n columns += [i for i in index if i not in columns]\n\n # Extract row-groups and pre-allocate df\n rgs = pf.row_groups\n size = sum(rg.num_rows for rg in rgs)\n df, views = pf.pre_allocate(size, columns, categories, index)\n start = 0\n\n # Get a map of file names -> row-groups\n fn_rg_map = defaultdict(list)\n for rg in rgs:\n fn = pf.row_group_filename(rg)\n fn_rg_map[fn].append(rg)\n\n # Define file-opening options\n precache_options, open_file_options = _process_open_file_options(\n open_file_options,\n **(\n {\n \"allow_precache\": False,\n \"default_cache\": \"readahead\",\n }\n if _is_local_fs(fs)\n else {\n \"metadata\": pf,\n \"columns\": list(set(columns).intersection(pf.columns)),\n \"row_groups\": [rgs for rgs in fn_rg_map.values()],\n \"default_engine\": \"fastparquet\",\n \"default_cache\": \"readahead\",\n }\n ),\n )\n\n with ExitStack() as stack:\n\n for fn, infile in zip(\n fn_rg_map.keys(),\n _open_input_files(\n list(fn_rg_map.keys()),\n fs=fs,\n context_stack=stack,\n precache_options=precache_options,\n **open_file_options,\n ),\n ):\n for rg in fn_rg_map[fn]:\n thislen = rg.num_rows\n parts = {\n name: (\n v\n if name.endswith(\"-catdef\")\n else v[start : start + thislen]\n )\n for (name, v) in views.items()\n }\n\n # Add row-group data to df\n pf.read_row_group_file(\n rg,\n columns,\n categories,\n index,\n assign=parts,\n partition_meta=pf.partition_meta,\n infile=infile,\n **kwargs,\n )\n start += thislen\n return df", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__set_metadata_task_size__set_metadata_task_size.return.metadata_task_size": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__set_metadata_task_size__set_metadata_task_size.return.metadata_task_size", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 679, "end_line": 690, "span_ids": ["_set_metadata_task_size"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _set_metadata_task_size(metadata_task_size, fs):\n # Set metadata_task_size using the config file\n # if the kwarg value was not specified\n if metadata_task_size is None:\n # If a default value is not specified in the config file,\n # otherwise we use \"0\"\n config_str = \"dataframe.parquet.metadata-task-size-\" + (\n \"local\" if _is_local_fs(fs) else \"remote\"\n )\n return config.get(config_str, 0)\n\n return metadata_task_size", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__process_open_file_options__process_open_file_options.return.precache_options_open_fi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__process_open_file_options__process_open_file_options.return.precache_options_open_fi", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 693, "end_line": 728, "span_ids": ["_process_open_file_options"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _process_open_file_options(\n open_file_options,\n metadata=None,\n columns=None,\n row_groups=None,\n default_engine=None,\n default_cache=\"readahead\",\n allow_precache=True,\n):\n # Process `open_file_options`.\n # Set default values and extract `precache_options`\n open_file_options = (open_file_options or {}).copy()\n precache_options = open_file_options.pop(\"precache_options\", {}).copy()\n if not allow_precache:\n # Precaching not allowed\n # (probably because the file system is local)\n precache_options = {}\n if \"open_file_func\" not in open_file_options:\n if precache_options.get(\"method\", None) == \"parquet\":\n open_file_options[\"cache_type\"] = open_file_options.get(\n \"cache_type\", \"parts\"\n )\n precache_options.update(\n {\n \"metadata\": metadata,\n \"columns\": columns,\n \"row_groups\": row_groups,\n \"engine\": precache_options.get(\"engine\", default_engine),\n }\n )\n else:\n open_file_options[\"cache_type\"] = open_file_options.get(\n \"cache_type\", default_cache\n )\n open_file_options[\"mode\"] = open_file_options.get(\"mode\", \"rb\")\n return precache_options, open_file_options", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__split_user_options_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/utils.py__split_user_options_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 731, "end_line": 746, "span_ids": ["_split_user_options"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _split_user_options(**kwargs):\n # Check user-defined options.\n # Split into \"file\" and \"dataset\"-specific kwargs\n user_kwargs = kwargs.copy()\n dataset_options = {\n **user_kwargs.pop(\"file\", {}).copy(),\n **user_kwargs.pop(\"dataset\", {}).copy(),\n }\n read_options = user_kwargs.pop(\"read\", {}).copy()\n read_options[\"open_file_options\"] = user_kwargs.pop(\"open_file_options\", {}).copy()\n return (\n dataset_options,\n read_options,\n user_kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_warnings_read_sql_query.if_not_isinstance_index_c.raise_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_warnings_read_sql_query.if_not_isinstance_index_c.raise_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 95, "span_ids": ["imports", "read_sql_query"], "tokens": 781}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport numpy as np\nimport pandas as pd\n\nimport dask\nfrom dask.delayed import tokenize\n\nfrom ... import delayed\nfrom .. import methods\nfrom .io import from_delayed, from_pandas\n\n\ndef read_sql_query(\n sql,\n con,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n \"\"\"\n Read SQL query into a DataFrame.\n\n If neither ``divisions`` or ``npartitions`` is given, the memory footprint of the\n first few rows will be determined, and partitions of size ~256MB will\n be used.\n\n Parameters\n ----------\n sql : SQLAlchemy Selectable\n SQL query to be executed. TextClause is not supported\n con : str\n Full sqlalchemy URI for the database connection\n index_col : str\n Column which becomes the index, and defines the partitioning. Should\n be a indexed column in the SQL server, and any orderable type. If the\n type is number or time, then partition boundaries can be inferred from\n ``npartitions`` or ``bytes_per_chunk``; otherwise must supply explicit\n ``divisions``.\n divisions: sequence\n Values of the index column to split the table by. If given, this will\n override ``npartitions`` and ``bytes_per_chunk``. The divisions are the value\n boundaries of the index column used to define the partitions. For\n example, ``divisions=list('acegikmoqsuwz')`` could be used to partition\n a string column lexographically into 12 partitions, with the implicit\n assumption that each partition contains similar numbers of records.\n npartitions : int\n Number of partitions, if ``divisions`` is not given. Will split the values\n of the index column linearly between ``limits``, if given, or the column\n max/min. The index column must be numeric or time for this to work\n limits: 2-tuple or None\n Manually give upper and lower range of values for use with ``npartitions``;\n if None, first fetches max/min from the DB. Upper limit, if\n given, is inclusive.\n bytes_per_chunk : str or int\n If both ``divisions`` and ``npartitions`` is None, this is the target size of\n each partition, in bytes\n head_rows : int\n How many rows to load for inferring the data-types, and memory per row\n meta : empty DataFrame or None\n If provided, do not attempt to infer dtypes, but use these, coercing\n all chunks on load\n engine_kwargs : dict or None\n Specific db engine parameters for sqlalchemy\n kwargs : dict\n Additional parameters to pass to `pd.read_sql()`\n\n Returns\n -------\n dask.dataframe\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n \"\"\"\n import sqlalchemy as sa\n\n if not isinstance(con, str):\n raise TypeError(\n \"'con' must be of type str, not \"\n + str(type(con))\n + \"Note: Dask does not support SQLAlchemy connectables here\"\n )\n if index_col is None:\n raise ValueError(\"Must specify index column to partition on\")\n if not isinstance(index_col, (str, sa.Column, sa.sql.elements.ColumnClause)):\n raise ValueError(\n \"'index_col' must be of type str or sa.Column, not \" + str(type(index_col))\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_query.if_not_head_rows_0__read_sql_query.return.from_delayed_parts_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_query.if_not_head_rows_0__read_sql_query.return.from_delayed_parts_meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 96, "end_line": 185, "span_ids": ["read_sql_query"], "tokens": 791}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_sql_query(\n sql,\n con,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n # ... other code\n if not head_rows > 0:\n if meta is None:\n raise ValueError(\"Must provide 'meta' if 'head_rows' is 0\")\n if divisions is None and npartitions is None:\n raise ValueError(\n \"Must provide 'divisions' or 'npartitions' if 'head_rows' is 0\"\n )\n if divisions and npartitions:\n raise TypeError(\"Must supply either 'divisions' or 'npartitions', not both\")\n\n engine_kwargs = {} if engine_kwargs is None else engine_kwargs\n engine = sa.create_engine(con, **engine_kwargs)\n\n index = (\n sa.Column(index_col)\n if isinstance(index_col, str)\n else sa.Column(index_col.name, index_col.type)\n )\n\n kwargs[\"index_col\"] = index.name\n\n if head_rows > 0:\n # derive metadata from first few rows\n q = sql.limit(head_rows)\n head = pd.read_sql(q, engine, **kwargs)\n\n if len(head) == 0:\n # no results at all\n return from_pandas(head, npartitions=1)\n\n bytes_per_row = (head.memory_usage(deep=True, index=True)).sum() / head_rows\n if meta is None:\n meta = head.iloc[:0]\n\n if divisions is None:\n if limits is None:\n # calculate max and min for given index\n q = sa.sql.select(\n [sa.sql.func.max(index), sa.sql.func.min(index)]\n ).select_from(sql.subquery())\n minmax = pd.read_sql(q, engine)\n maxi, mini = minmax.iloc[0]\n dtype = minmax.dtypes[\"max_1\"]\n else:\n mini, maxi = limits\n dtype = pd.Series(limits).dtype\n\n if npartitions is None:\n q = sa.sql.select([sa.sql.func.count(index)]).select_from(sql.subquery())\n count = pd.read_sql(q, engine)[\"count_1\"][0]\n npartitions = (\n int(\n round(\n count * bytes_per_row / dask.utils.parse_bytes(bytes_per_chunk)\n )\n )\n or 1\n )\n if dtype.kind == \"M\":\n divisions = methods.tolist(\n pd.date_range(\n start=mini,\n end=maxi,\n freq=\"%iS\" % ((maxi - mini).total_seconds() / npartitions),\n )\n )\n divisions[0] = mini\n divisions[-1] = maxi\n elif dtype.kind in [\"i\", \"u\", \"f\"]:\n divisions = np.linspace(mini, maxi, npartitions + 1).tolist()\n else:\n raise TypeError(\n 'Provided index column is of type \"{}\". If divisions is not provided the '\n \"index column type must be numeric or datetime.\".format(dtype)\n )\n\n parts = []\n lowers, uppers = divisions[:-1], divisions[1:]\n for i, (lower, upper) in enumerate(zip(lowers, uppers)):\n cond = index <= upper if i == len(lowers) - 1 else index < upper\n q = sql.where(sa.sql.and_(index >= lower, cond))\n parts.append(\n delayed(_read_sql_chunk)(\n q, con, meta, engine_kwargs=engine_kwargs, **kwargs\n )\n )\n\n engine.dispose()\n\n return from_delayed(parts, meta, divisions=divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table_read_sql_table.from_sqlalchemy_import_sq": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table_read_sql_table.from_sqlalchemy_import_sq", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 188, "end_line": 269, "span_ids": ["read_sql_table"], "tokens": 740}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_sql_table(\n table_name,\n con,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n columns=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n schema=None,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n \"\"\"\n Read SQL database table into a DataFrame.\n\n If neither ``divisions`` or ``npartitions`` is given, the memory footprint of the\n first few rows will be determined, and partitions of size ~256MB will\n be used.\n\n Parameters\n ----------\n table_name : str\n Name of SQL table in database.\n con : str\n Full sqlalchemy URI for the database connection\n index_col : str\n Column which becomes the index, and defines the partitioning. Should\n be a indexed column in the SQL server, and any orderable type. If the\n type is number or time, then partition boundaries can be inferred from\n ``npartitions`` or ``bytes_per_chunk``; otherwise must supply explicit\n ``divisions``.\n columns : sequence of str or SqlAlchemy column or None\n Which columns to select; if None, gets all. Note can be a mix of str and SqlAlchemy columns\n schema : str or None\n Pass this to sqlalchemy to select which DB schema to use within the\n URI connection\n divisions: sequence\n Values of the index column to split the table by. If given, this will\n override ``npartitions`` and ``bytes_per_chunk``. The divisions are the value\n boundaries of the index column used to define the partitions. For\n example, ``divisions=list('acegikmoqsuwz')`` could be used to partition\n a string column lexographically into 12 partitions, with the implicit\n assumption that each partition contains similar numbers of records.\n npartitions : int\n Number of partitions, if ``divisions`` is not given. Will split the values\n of the index column linearly between ``limits``, if given, or the column\n max/min. The index column must be numeric or time for this to work\n limits: 2-tuple or None\n Manually give upper and lower range of values for use with ``npartitions``;\n if None, first fetches max/min from the DB. Upper limit, if\n given, is inclusive.\n bytes_per_chunk : str or int\n If both ``divisions`` and ``npartitions`` is None, this is the target size of\n each partition, in bytes\n head_rows : int\n How many rows to load for inferring the data-types, and memory per row\n meta : empty DataFrame or None\n If provided, do not attempt to infer dtypes, but use these, coercing\n all chunks on load\n engine_kwargs : dict or None\n Specific db engine parameters for sqlalchemy\n kwargs : dict\n Additional parameters to pass to `pd.read_sql()`\n\n Returns\n -------\n dask.dataframe\n\n See Also\n --------\n read_sql_query : Read SQL query into a DataFrame.\n\n Examples\n --------\n >>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',\n ... npartitions=10, index_col='id') # doctest: +SKIP\n \"\"\"\n import sqlalchemy as sa\n from sqlalchemy import sql\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table.if_table_in_kwargs__read_sql_table.return.read_sql_query_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_table.if_table_in_kwargs__read_sql_table.return.read_sql_query_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 271, "end_line": 382, "span_ids": ["read_sql_table"], "tokens": 850}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_sql_table(\n table_name,\n con,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n columns=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n schema=None,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n # ... other code\n\n if \"table\" in kwargs:\n warnings.warn(\n \"The `table` keyword has been replaced by `table_name`. Please use `table_name` instead.\",\n DeprecationWarning,\n )\n table_name = kwargs.pop(\"table\")\n if \"uri\" in kwargs:\n warnings.warn(\n \"The `uri` keyword has been replaced by `con`. Please use `con` instead.\",\n DeprecationWarning,\n )\n con = kwargs.pop(\"uri\")\n deprecated_args = False\n if not isinstance(table_name, str):\n warnings.warn(\n \"`read_sql_table` will no longer support {}; please use a `table_name` of type str instead \"\n \"or use `read_sql_query`, if you are using a SQLAlchemy query\".format(\n type(table_name)\n ),\n DeprecationWarning,\n )\n deprecated_args = True\n if columns is not None:\n for col in columns:\n if not isinstance(col, (sa.Column, str)):\n warnings.warn(\n \"`columns` will no longer support SQLAlchemy selectables; please use `read_sql_query` \"\n \"instead\",\n DeprecationWarning,\n )\n deprecated_args = True\n\n if not _gt14():\n warnings.warn(\n \"Dask will soon require SQLAlchemy 1.4 or newer. \"\n \"Please update your SQLAlchemy version. \"\n \"Friendly note: Upgrading to SQLAlchemy 1.4 may brake code. Do it with caution. \",\n category=DeprecationWarning,\n )\n if deprecated_args or not _gt14():\n return _old_read_sql_table(\n table=table_name,\n uri=con,\n index_col=index_col,\n divisions=divisions,\n npartitions=npartitions,\n limits=limits,\n columns=columns,\n bytes_per_chunk=bytes_per_chunk,\n head_rows=head_rows,\n schema=schema,\n meta=meta,\n engine_kwargs=engine_kwargs,\n **kwargs,\n )\n\n if not isinstance(con, str):\n raise TypeError(\n \"`con` must be of type str, not \"\n + str(type(con))\n + \"Note: Dask does not support SQLAlchemy connectables here\"\n )\n\n engine_kwargs = {} if engine_kwargs is None else engine_kwargs\n engine = sa.create_engine(con, **engine_kwargs)\n m = sa.MetaData()\n if isinstance(table_name, str):\n table_name = sa.Table(\n table_name, m, autoload=True, autoload_with=engine, schema=schema\n )\n else:\n raise TypeError(\n \"`table_name` must be of type str, not \" + str(type(table_name))\n )\n engine.dispose()\n\n columns = (\n [\n (\n sa.Column(c, table_name.columns[c].type)\n if isinstance(c, str)\n else sa.Column(c.name, c.type)\n )\n for c in columns\n ]\n if columns\n else [sa.Column(c.name, c.type) for c in table_name.columns]\n )\n index = (\n sa.Column(index_col, table_name.columns[index_col].type)\n if isinstance(index_col, str)\n else sa.Column(index_col.name, index_col.type)\n )\n\n if index.name not in [c.name for c in columns]:\n columns.append(index)\n\n query = sql.select(columns).select_from(table_name)\n\n return read_sql_query(\n sql=query,\n con=con,\n index_col=index,\n divisions=divisions,\n npartitions=npartitions,\n limits=limits,\n bytes_per_chunk=bytes_per_chunk,\n head_rows=head_rows,\n meta=meta,\n engine_kwargs=engine_kwargs,\n **kwargs,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_read_sql.if_isinstance_sql_str_.else_.return.read_sql_query_sql_con_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py_read_sql_read_sql.if_isinstance_sql_str_.else_.return.read_sql_query_sql_con_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 385, "end_line": 421, "span_ids": ["read_sql"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def read_sql(sql, con, index_col, **kwargs):\n \"\"\"\n Read SQL query or database table into a DataFrame.\n\n This function is a convenience wrapper around ``read_sql_table`` and\n ``read_sql_query``. It will delegate to the specific function depending\n on the provided input. A SQL query will be routed to ``read_sql_query``,\n while a database table name will be routed to ``read_sql_table``.\n Note that the delegated function might have more specific notes about\n their functionality not listed here.\n\n Parameters\n ----------\n sql : str or SQLAlchemy Selectable\n Name of SQL table in database or SQL query to be executed. TextClause is not supported\n con : str\n Full sqlalchemy URI for the database connection\n index_col : str\n Column which becomes the index, and defines the partitioning. Should\n be a indexed column in the SQL server, and any orderable type. If the\n type is number or time, then partition boundaries can be inferred from\n ``npartitions`` or ``bytes_per_chunk``; otherwise must supply explicit\n ``divisions``.\n\n Returns\n -------\n dask.dataframe\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n read_sql_query : Read SQL query into a DataFrame.\n \"\"\"\n if isinstance(sql, str):\n return read_sql_table(sql, con, index_col, **kwargs)\n else:\n return read_sql_query(sql, con, index_col, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__to_sql_chunk__gt14.if_.else_.return.True": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__to_sql_chunk__gt14.if_.else_.return.True", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 441, "end_line": 469, "span_ids": ["_gt14", "_to_sql_chunk"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _to_sql_chunk(d, uri, engine_kwargs=None, **kwargs):\n import sqlalchemy as sa\n\n engine_kwargs = engine_kwargs or {}\n engine = sa.create_engine(uri, **engine_kwargs)\n\n q = d.to_sql(con=engine, **kwargs)\n engine.dispose()\n\n return q\n\n\ndef _gt14() -> bool:\n \"\"\"\n Check if sqlalchemy.__version__ is at least 1.4.0, when several\n deprecations were made.\n \"\"\"\n import sqlalchemy\n\n if (\n sqlalchemy.__version__.startswith(\"0.\")\n or sqlalchemy.__version__.startswith(\"1.0\")\n or sqlalchemy.__version__.startswith(\"1.1\")\n or sqlalchemy.__version__.startswith(\"1.2\")\n or sqlalchemy.__version__.startswith(\"1.3\")\n ):\n return False\n else:\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__old_read_sql_table__old_read_sql_table._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__old_read_sql_table__old_read_sql_table._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 472, "end_line": 555, "span_ids": ["_old_read_sql_table"], "tokens": 845}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _old_read_sql_table(\n table,\n uri,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n columns=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n schema=None,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n \"\"\"\n Create dataframe from an SQL table.\n If neither divisions or npartitions is given, the memory footprint of the\n first few rows will be determined, and partitions of size ~256MB will\n be used.\n Parameters\n ----------\n table : string or sqlalchemy expression\n Select columns from here.\n uri : string\n Full sqlalchemy URI for the database connection\n index_col : string\n Column which becomes the index, and defines the partitioning. Should\n be a indexed column in the SQL server, and any orderable type. If the\n type is number or time, then partition boundaries can be inferred from\n npartitions or bytes_per_chunk; otherwide must supply explicit\n ``divisions=``.\n ``index_col`` could be a function to return a value, e.g.,\n ``sql.func.abs(sql.column('value')).label('abs(value)')``.\n ``index_col=sql.func.abs(sql.column(\"value\")).label(\"abs(value)\")``, or\n ``index_col=cast(sql.column(\"id\"),types.BigInteger).label(\"id\")`` to convert\n the textfield ``id`` to ``BigInteger``.\n Note ``sql``, ``cast``, ``types`` methods comes from ``sqlalchemy`` module.\n Labeling columns created by functions or arithmetic operations is\n required.\n divisions: sequence\n Values of the index column to split the table by. If given, this will\n override npartitions and bytes_per_chunk. The divisions are the value\n boundaries of the index column used to define the partitions. For\n example, ``divisions=list('acegikmoqsuwz')`` could be used to partition\n a string column lexographically into 12 partitions, with the implicit\n assumption that each partition contains similar numbers of records.\n npartitions : int\n Number of partitions, if divisions is not given. Will split the values\n of the index column linearly between limits, if given, or the column\n max/min. The index column must be numeric or time for this to work\n limits: 2-tuple or None\n Manually give upper and lower range of values for use with npartitions;\n if None, first fetches max/min from the DB. Upper limit, if\n given, is inclusive.\n columns : list of strings or None\n Which columns to select; if None, gets all; can include sqlalchemy\n functions, e.g.,\n ``sql.func.abs(sql.column('value')).label('abs(value)')``.\n Labeling columns created by functions or arithmetic operations is\n recommended.\n bytes_per_chunk : str, int\n If both divisions and npartitions is None, this is the target size of\n each partition, in bytes\n head_rows : int\n How many rows to load for inferring the data-types, unless passing meta\n meta : empty DataFrame or None\n If provided, do not attempt to infer dtypes, but use these, coercing\n all chunks on load\n schema : str or None\n If using a table name, pass this to sqlalchemy to select which DB\n schema to use within the URI connection\n engine_kwargs : dict or None\n Specific db engine parameters for sqlalchemy\n kwargs : dict\n Additional parameters to pass to `pd.read_sql()`\n Returns\n -------\n dask.dataframe\n Examples\n --------\n >>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',\n ... npartitions=10, index_col='id') # doctest: +SKIP\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__old_read_sql_table.sa__old_read_sql_table.if_head_rows_0_.else_.if_divisions_is_None_and_.raise_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__old_read_sql_table.sa__old_read_sql_table.if_head_rows_0_.else_.if_divisions_is_None_and_.raise_ValueError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 556, "end_line": 620, "span_ids": ["_old_read_sql_table"], "tokens": 620}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _old_read_sql_table(\n table,\n uri,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n columns=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n schema=None,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n import sqlalchemy as sa\n from sqlalchemy import sql\n from sqlalchemy.sql import elements\n\n warnings.warn(\n \"You are using a compatibility version of `read_sql_table` that will be \"\n \"removed in a future version of dask. This function existst to support \"\n \"old versions of SQLAlchemy (< 1.4). This compatibility function is less \"\n \"stable than the new version. We recommend you update your code.\",\n DeprecationWarning,\n )\n\n if index_col is None:\n raise ValueError(\"Must specify index column to partition on\")\n\n engine_kwargs = {} if engine_kwargs is None else engine_kwargs\n engine = sa.create_engine(uri, **engine_kwargs)\n m = sa.MetaData()\n if isinstance(table, str):\n table = sa.Table(table, m, autoload=True, autoload_with=engine, schema=schema)\n\n index = table.columns[index_col] if isinstance(index_col, str) else index_col\n if not isinstance(index_col, (str, elements.Label)):\n raise ValueError(\n \"Use label when passing an SQLAlchemy instance as the index (%s)\" % index\n )\n if divisions and npartitions:\n raise TypeError(\"Must supply either divisions or npartitions, not both\")\n\n columns = (\n [(table.columns[c] if isinstance(c, str) else c) for c in columns]\n if columns\n else list(table.columns)\n )\n if index not in columns:\n columns.append(index)\n\n if isinstance(index_col, str):\n kwargs[\"index_col\"] = index_col\n else:\n # function names get pandas auto-named\n kwargs[\"index_col\"] = index_col.name\n\n if head_rows > 0:\n # derive metadata from first few rows\n q = sql.select(columns).limit(head_rows).select_from(table)\n head = pd.read_sql(q, engine, **kwargs)\n\n if len(head) == 0:\n # no results at all\n name = table.name\n schema = table.schema\n head = pd.read_sql_table(name, uri, schema=schema, index_col=index_col)\n return from_pandas(head, npartitions=1)\n\n bytes_per_row = (head.memory_usage(deep=True, index=True)).sum() / head_rows\n if meta is None:\n meta = head.iloc[:0]\n elif meta is None:\n raise ValueError(\"Must provide meta if head_rows is 0\")\n else:\n if divisions is None and npartitions is None:\n raise ValueError(\n \"Must provide divisions or npartitions when using explicit meta.\"\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__old_read_sql_table.if_divisions_is_None___old_read_sql_table.return.from_delayed_parts_meta_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/sql.py__old_read_sql_table.if_divisions_is_None___old_read_sql_table.return.from_delayed_parts_meta_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/sql.py", "file_name": "sql.py", "file_type": "text/x-python", "category": "implementation", "start_line": 622, "end_line": 677, "span_ids": ["_old_read_sql_table"], "tokens": 526}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _old_read_sql_table(\n table,\n uri,\n index_col,\n divisions=None,\n npartitions=None,\n limits=None,\n columns=None,\n bytes_per_chunk=\"256 MiB\",\n head_rows=5,\n schema=None,\n meta=None,\n engine_kwargs=None,\n **kwargs,\n):\n # ... other code\n\n if divisions is None:\n if limits is None:\n # calculate max and min for given index\n q = sql.select([sql.func.max(index), sql.func.min(index)]).select_from(\n table\n )\n minmax = pd.read_sql(q, engine)\n maxi, mini = minmax.iloc[0]\n dtype = minmax.dtypes[\"max_1\"]\n else:\n mini, maxi = limits\n dtype = pd.Series(limits).dtype\n\n if npartitions is None:\n q = sql.select([sql.func.count(index)]).select_from(table)\n count = pd.read_sql(q, engine)[\"count_1\"][0]\n npartitions = (\n int(\n round(\n count * bytes_per_row / dask.utils.parse_bytes(bytes_per_chunk)\n )\n )\n or 1\n )\n if dtype.kind == \"M\":\n divisions = methods.tolist(\n pd.date_range(\n start=mini,\n end=maxi,\n freq=\"%iS\" % ((maxi - mini).total_seconds() / npartitions),\n )\n )\n divisions[0] = mini\n divisions[-1] = maxi\n elif dtype.kind in [\"i\", \"u\", \"f\"]:\n divisions = np.linspace(mini, maxi, npartitions + 1).tolist()\n else:\n raise TypeError(\n 'Provided index column is of type \"{}\". If divisions is not provided the '\n \"index column type must be numeric or datetime.\".format(dtype)\n )\n\n parts = []\n lowers, uppers = divisions[:-1], divisions[1:]\n for i, (lower, upper) in enumerate(zip(lowers, uppers)):\n cond = index <= upper if i == len(lowers) - 1 else index < upper\n q = sql.select(columns).where(sql.and_(index >= lower, cond)).select_from(table)\n parts.append(\n delayed(_read_sql_chunk)(\n q, uri, meta, engine_kwargs=engine_kwargs, **kwargs\n )\n )\n\n engine.dispose()\n\n return from_delayed(parts, meta, divisions=divisions)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_nodes_test_to_hdf_modes_multiple_nodes.None_4.assert_eq_dd_concat_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_nodes_test_to_hdf_modes_multiple_nodes.None_4.assert_eq_dd_concat_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 232, "end_line": 277, "span_ids": ["test_to_hdf_modes_multiple_nodes"], "tokens": 498}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_modes_multiple_nodes():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n # appending a single partition to existing data\n a = dd.from_pandas(df, 1)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"a\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(dd.concat([df, df]), out)\n\n # overwriting a file with a single partition\n a = dd.from_pandas(df, 1)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"w\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)\n\n # appending two partitions to existing data\n a = dd.from_pandas(df, 2)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"a\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(dd.concat([df, df]), out)\n\n # overwriting a file with two partitions\n a = dd.from_pandas(df, 2)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"w\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(df, out)\n\n # overwriting a single partition, keeping other partitions\n a = dd.from_pandas(df, 2)\n with tmpfile(\"h5\") as fn:\n a.to_hdf(fn, \"/data1\")\n a.to_hdf(fn, \"/data2\")\n a.to_hdf(fn, \"/data*\", mode=\"a\", append=False)\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(dd.concat([df, df]), out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_files_test_to_hdf_modes_multiple_files.None_3.assert_eq_dd_concat_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_hdf_modes_multiple_files_test_to_hdf_modes_multiple_files.None_3.assert_eq_dd_concat_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 280, "end_line": 320, "span_ids": ["test_to_hdf_modes_multiple_files"], "tokens": 473}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_hdf_modes_multiple_files():\n pytest.importorskip(\"tables\")\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]\n )\n\n # appending a single partition to existing data\n a = dd.from_pandas(df, 1)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a.to_hdf(os.path.join(dn, \"data2\"), \"/data\")\n a.to_hdf(fn, \"/data\", mode=\"a\")\n out = dd.read_hdf(fn, \"/data*\")\n assert_eq(dd.concat([df, df]), out)\n\n # appending two partitions to existing data\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a.to_hdf(os.path.join(dn, \"data2\"), \"/data\")\n a.to_hdf(fn, \"/data\", mode=\"a\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(dd.concat([df, df]), out)\n\n # overwriting a file with two partitions\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a.to_hdf(os.path.join(dn, \"data1\"), \"/data\")\n a.to_hdf(fn, \"/data\", mode=\"w\")\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(df, out)\n\n # overwriting a single partition, keeping other partitions\n a = dd.from_pandas(df, 2)\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data*\")\n a.to_hdf(os.path.join(dn, \"data1\"), \"/data\")\n a.to_hdf(fn, \"/data\", mode=\"a\", append=False)\n out = dd.read_hdf(fn, \"/data\")\n assert_eq(dd.concat([df, df]), out)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_contextlib_test_meta_from_array._Should_be_5_partitions_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_contextlib_test_meta_from_array._Should_be_5_partitions_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 43, "span_ids": ["imports", "test_meta_from_array"], "tokens": 375}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\nfrom concurrent.futures import ThreadPoolExecutor\nfrom threading import Lock\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport dask.array as da\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.io.io import _meta_from_array\nfrom dask.dataframe.utils import assert_eq, is_categorical_dtype\nfrom dask.delayed import Delayed, delayed\nfrom dask.utils import tmpfile\n\n####################\n# Arrays and BColz #\n####################\n\n\ndef test_meta_from_array():\n x = np.array([[1, 2], [3, 4]], dtype=np.int64)\n res = _meta_from_array(x)\n assert isinstance(res, pd.DataFrame)\n assert res[0].dtype == np.int64\n assert res[1].dtype == np.int64\n tm.assert_index_equal(res.columns, pd.Index([0, 1]))\n\n x = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)\n res = _meta_from_array(x, columns=[\"a\", \"b\"])\n assert isinstance(res, pd.DataFrame)\n assert res[\"a\"].dtype == np.float64\n assert res[\"b\"].dtype == np.float64\n tm.assert_index_equal(res.columns, pd.Index([\"a\", \"b\"]))\n\n with pytest.raises(ValueError):\n _meta_from_array(x, columns=[\"a\", \"b\", \"c\"])\n\n np.random.seed(42)\n x = np.random.rand(201, 2)\n x = dd.from_array(x, chunksize=50, columns=[\"a\", \"b\"])\n assert len(x.divisions) == 6 # Should be 5 partitions and the end", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_with_record_dtype_check_bcolz_deprecation_warning.with_pytest_warns_FutureW.yield": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_array_with_record_dtype_check_bcolz_deprecation_warning.with_pytest_warns_FutureW.yield", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 106, "end_line": 119, "span_ids": ["check_bcolz_deprecation_warning", "test_from_array_with_record_dtype"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_array_with_record_dtype():\n x = np.array([(i, i * 10) for i in range(10)], dtype=[(\"a\", \"i4\"), (\"b\", \"i4\")])\n d = dd.from_array(x, chunksize=4)\n assert isinstance(d, dd.DataFrame)\n assert list(d.columns) == [\"a\", \"b\"]\n assert d.divisions == (0, 4, 8, 9)\n\n assert (d.compute().to_records(index=False) == x).all()\n\n\n@contextlib.contextmanager\ndef check_bcolz_deprecation_warning():\n with pytest.warns(FutureWarning, match=\"bcolz was deprecated\"):\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_multiple_threads_test_from_bcolz_multiple_threads.with_check_bcolz_deprecat.with_ThreadPoolExecutor_5.list_pool_map_check_rang": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_multiple_threads_test_from_bcolz_multiple_threads.with_check_bcolz_deprecat.with_ThreadPoolExecutor_5.list_pool_map_check_rang", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 122, "end_line": 152, "span_ids": ["test_from_bcolz_multiple_threads"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz_multiple_threads():\n bcolz = pytest.importorskip(\"bcolz\")\n\n def check(i):\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"]\n )\n\n d = dd.from_bcolz(t, chunksize=2)\n\n assert d.npartitions == 2\n assert is_categorical_dtype(d.dtypes[\"a\"])\n assert list(d.x.compute(scheduler=\"sync\")) == [1, 2, 3]\n assert list(d.a.compute(scheduler=\"sync\")) == [\"a\", \"b\", \"a\"]\n\n d = dd.from_bcolz(t, chunksize=2, index=\"x\")\n\n L = list(d.index.compute(scheduler=\"sync\"))\n assert L == [1, 2, 3] or L == [1, 3, 2]\n\n # Names\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(\n dd.from_bcolz(t, chunksize=2).dask\n )\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(\n dd.from_bcolz(t, chunksize=3).dask\n )\n\n with check_bcolz_deprecation_warning():\n with ThreadPoolExecutor(5) as pool:\n list(pool.map(check, range(5)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_test_from_bcolz.with_check_bcolz_deprecat.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_io.py_test_from_bcolz_test_from_bcolz.with_check_bcolz_deprecat.None_8", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_io.py", "file_name": "test_io.py", "file_type": "text/x-python", "category": "test", "start_line": 155, "end_line": 188, "span_ids": ["test_from_bcolz"], "tokens": 388}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_from_bcolz():\n bcolz = pytest.importorskip(\"bcolz\")\n\n t = bcolz.ctable(\n [[1, 2, 3], [1.0, 2.0, 3.0], [\"a\", \"b\", \"a\"]], names=[\"x\", \"y\", \"a\"]\n )\n\n with check_bcolz_deprecation_warning():\n d = dd.from_bcolz(t, chunksize=2)\n assert d.npartitions == 2\n assert is_categorical_dtype(d.dtypes[\"a\"])\n assert list(d.x.compute(scheduler=\"sync\")) == [1, 2, 3]\n assert list(d.a.compute(scheduler=\"sync\")) == [\"a\", \"b\", \"a\"]\n L = list(d.index.compute(scheduler=\"sync\"))\n assert L == [0, 1, 2]\n\n d = dd.from_bcolz(t, chunksize=2, index=\"x\")\n L = list(d.index.compute(scheduler=\"sync\"))\n assert L == [1, 2, 3] or L == [1, 3, 2]\n\n # Names\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(\n dd.from_bcolz(t, chunksize=2).dask\n )\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(\n dd.from_bcolz(t, chunksize=3).dask\n )\n\n dsk = dd.from_bcolz(t, chunksize=3).dask\n\n t.append((4, 4.0, \"b\"))\n t.flush()\n\n assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(dsk)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_json_test_read_json_with_path_column.with_tmpfile_json_as_f.assert_eq_actual_actual_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_json_test_read_json_with_path_column.with_tmpfile_json_as_f.assert_eq_actual_actual_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 29, "span_ids": ["imports", "test_read_json_with_path_column"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import json\nimport os\n\nimport pandas as pd\nimport pytest\n\nimport dask\nimport dask.dataframe as dd\nfrom dask.dataframe.utils import assert_eq\nfrom dask.utils import tmpdir, tmpfile\n\ndf = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [1, 2, 3, 4]})\nddf = dd.from_pandas(df, npartitions=2)\n\n\n@pytest.mark.parametrize(\"orient\", [\"split\", \"records\", \"index\", \"columns\", \"values\"])\ndef test_read_json_with_path_column(orient):\n with tmpfile(\"json\") as f:\n df.to_json(f, orient=orient, lines=False)\n actual = dd.read_json(f, orient=orient, lines=False, include_path_column=True)\n actual_pd = pd.read_json(f, orient=orient, lines=False)\n # The default column name when include_path_colum is True is \"path\"\n # The paths on Windows are converted to forward slash somewhere in the file\n # reading chain in Dask, so we have to do the same here.\n actual_pd[\"path\"] = pd.Series(\n (f.replace(os.sep, \"/\"),) * len(actual_pd), dtype=\"category\"\n )\n assert actual.path.dtype == \"category\"\n assert_eq(actual, actual_pd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_path_column_with_duplicate_name_is_error_test_write_orient_not_records_and_lines.with_tmpfile_json_as_f.with_pytest_raises_ValueE.dd_to_json_ddf_f_orient": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_path_column_with_duplicate_name_is_error_test_write_orient_not_records_and_lines.with_tmpfile_json_as_f.with_pytest_raises_ValueE.dd_to_json_ddf_f_orient", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 32, "end_line": 69, "span_ids": ["test_write_orient_not_records_and_lines", "test_read_json_path_column_with_duplicate_name_is_error", "test_read_orient_not_records_and_lines", "test_read_json_with_path_converter"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_read_json_path_column_with_duplicate_name_is_error():\n with tmpfile(\"json\") as f:\n df.to_json(f, orient=\"records\", lines=False)\n with pytest.raises(ValueError, match=\"Files already contain\"):\n dd.read_json(f, orient=\"records\", lines=False, include_path_column=\"x\")\n\n\ndef test_read_json_with_path_converter():\n path_column_name = \"filenames\"\n\n def path_converter(x):\n return \"asdf.json\"\n\n with tmpfile(\"json\") as f:\n df.to_json(f, orient=\"records\", lines=False)\n actual = dd.read_json(\n f,\n orient=\"records\",\n lines=False,\n include_path_column=path_column_name,\n path_converter=path_converter,\n )\n actual_pd = pd.read_json(f, orient=\"records\", lines=False)\n actual_pd[path_column_name] = pd.Series(\n (path_converter(f),) * len(actual_pd), dtype=\"category\"\n )\n assert_eq(actual, actual_pd)\n\n\ndef test_read_orient_not_records_and_lines():\n with pytest.raises(ValueError, match=\"Line-delimited JSON\"):\n dd.read_json(\"nofile.json\", orient=\"split\", lines=True)\n\n\ndef test_write_orient_not_records_and_lines():\n with tmpfile(\"json\") as f:\n with pytest.raises(ValueError, match=\"Line-delimited JSON\"):\n dd.to_json(ddf, f, orient=\"split\", lines=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_multiple_files_with_path_column_test_read_json_multiple_files_with_path_column.assert_eq_res_sol_check": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_multiple_files_with_path_column_test_read_json_multiple_files_with_path_column.assert_eq_res_sol_check", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 72, "end_line": 93, "span_ids": ["test_read_json_multiple_files_with_path_column"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"blocksize\", [5, 15, 33, 200, 90000])\ndef test_read_json_multiple_files_with_path_column(blocksize, tmpdir):\n fil1 = str(tmpdir.join(\"fil1.json\")).replace(os.sep, \"/\")\n fil2 = str(tmpdir.join(\"fil2.json\")).replace(os.sep, \"/\")\n df = pd.DataFrame({\"x\": range(5), \"y\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n df2 = df.assign(x=df.x + 0.5)\n orient = \"records\"\n lines = True\n df.to_json(fil1, orient=orient, lines=lines)\n df2.to_json(fil2, orient=orient, lines=lines)\n path_dtype = pd.CategoricalDtype((fil1, fil2))\n df[\"path\"] = pd.Series((fil1,) * len(df), dtype=path_dtype)\n df2[\"path\"] = pd.Series((fil2,) * len(df2), dtype=path_dtype)\n sol = pd.concat([df, df2])\n res = dd.read_json(\n str(tmpdir.join(\"fil*.json\")),\n orient=orient,\n lines=lines,\n include_path_column=True,\n blocksize=blocksize,\n )\n assert_eq(res, sol, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_basic_test_read_json_basic.with_tmpfile_json_as_f.assert_eq_actual_df_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_read_json_basic_test_read_json_basic.with_tmpfile_json_as_f.assert_eq_actual_df_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 96, "end_line": 106, "span_ids": ["test_read_json_basic"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"orient\", [\"split\", \"records\", \"index\", \"columns\", \"values\"])\ndef test_read_json_basic(orient):\n with tmpfile(\"json\") as f:\n df.to_json(f, orient=orient, lines=False)\n actual = dd.read_json(f, orient=orient, lines=False)\n actual_pd = pd.read_json(f, orient=orient, lines=False)\n\n assert_eq(actual, actual_pd)\n if orient == \"values\":\n actual.columns = list(df.columns)\n assert_eq(actual, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_json_compressed_test_read_json_inferred_compression.with_tmpdir_as_path_.assert_eq_df_actual_che": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_json.py_test_json_compressed_test_read_json_inferred_compression.with_tmpdir_as_path_.assert_eq_df_actual_che", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_json.py", "file_name": "test_json.py", "file_type": "text/x-python", "category": "test", "start_line": 203, "end_line": 216, "span_ids": ["test_read_json_inferred_compression", "test_json_compressed"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"compression\", [None, \"gzip\", \"xz\"])\ndef test_json_compressed(compression):\n with tmpdir() as path:\n dd.to_json(ddf, path, compression=compression)\n actual = dd.read_json(os.path.join(path, \"*\"), compression=compression)\n assert_eq(df, actual, check_index=False)\n\n\ndef test_read_json_inferred_compression():\n with tmpdir() as path:\n fn = os.path.join(path, \"*.json.gz\")\n dd.to_json(ddf, fn, compression=\"gzip\")\n actual = dd.read_json(fn)\n assert_eq(df, actual, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_layer_creation_info_test_layer_creation_info.assert_eq_ddf1_ddf3_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_layer_creation_info_test_layer_creation_info.assert_eq_ddf1_ddf3_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2507, "end_line": 2530, "span_ids": ["test_layer_creation_info"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_layer_creation_info(tmpdir, engine):\n df = pd.DataFrame({\"a\": range(10), \"b\": [\"cat\", \"dog\"] * 5})\n dd.from_pandas(df, npartitions=1).to_parquet(\n tmpdir, engine=engine, partition_on=[\"b\"]\n )\n\n # Apply filters directly in dd.read_parquet\n filters = [(\"b\", \"==\", \"cat\")]\n ddf1 = dd.read_parquet(tmpdir, engine=engine, filters=filters)\n assert \"dog\" not in ddf1[\"b\"].compute()\n\n # Results will not match if we use dd.read_parquet\n # without filters\n ddf2 = dd.read_parquet(tmpdir, engine=engine)\n with pytest.raises(AssertionError):\n assert_eq(ddf1, ddf2)\n\n # However, we can use `creation_info` to regenerate\n # the same collection with `filters` defined\n info = ddf2.dask.layers[ddf2._name].creation_info\n kwargs = info.get(\"kwargs\", {})\n kwargs[\"filters\"] = filters\n ddf3 = info[\"func\"](*info.get(\"args\", []), **kwargs)\n assert_eq(ddf1, ddf3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_extra_connection_engine_keywords_test_extra_connection_engine_keywords.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_extra_connection_engine_keywords_test_extra_connection_engine_keywords.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 364, "end_line": 384, "span_ids": ["test_extra_connection_engine_keywords"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_extra_connection_engine_keywords(caplog, db):\n data = read_sql_table(\n \"test\", db, npartitions=2, index_col=\"number\", engine_kwargs={\"echo\": False}\n ).compute()\n # no captured message from the stdout with the echo=False parameter (this is the default)\n out = \"\\n\".join(r.message for r in caplog.records)\n assert out == \"\"\n assert_eq(data, df)\n # with the echo=True sqlalchemy parameter, you should get all SQL queries in the stdout\n data = read_sql_table(\n \"test\", db, npartitions=2, index_col=\"number\", engine_kwargs={\"echo\": True}\n ).compute()\n out = \"\\n\".join(r.message for r in caplog.records)\n assert \"WHERE\" in out\n assert \"FROM\" in out\n assert \"SELECT\" in out\n assert \"AND\" in out\n assert \">= ?\" in out\n assert \"< ?\" in out\n assert \"<= ?\" in out\n assert_eq(data, df)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_query_test_query.assert_eq_out_df_loc_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_query_test_query.assert_eq_out_df_loc_5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 387, "end_line": 409, "span_ids": ["test_query"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_query(db):\n import sqlalchemy as sa\n from sqlalchemy import sql\n\n s1 = sql.select([sql.column(\"number\"), sql.column(\"name\")]).select_from(\n sql.table(\"test\")\n )\n out = read_sql_query(s1, db, npartitions=2, index_col=\"number\")\n assert_eq(out, df[[\"name\"]])\n\n s2 = (\n sql.select(\n [\n sa.cast(sql.column(\"number\"), sa.types.BigInteger).label(\"number\"),\n sql.column(\"name\"),\n ]\n )\n .where(sql.column(\"number\") >= 5)\n .select_from(sql.table(\"test\"))\n )\n\n out = read_sql_query(s2, db, npartitions=2, index_col=\"number\")\n assert_eq(out, df.loc[5:, [\"name\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_query_index_from_query_test_query_index_from_query.assert_eq_out_lenname_df": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_query_index_from_query_test_query_index_from_query.assert_eq_out_lenname_df", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 412, "end_line": 425, "span_ids": ["test_query_index_from_query"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_query_index_from_query(db):\n from sqlalchemy import sql\n\n number = sql.column(\"number\")\n name = sql.column(\"name\")\n s1 = sql.select([number, name, sql.func.length(name).label(\"lenname\")]).select_from(\n sql.table(\"test\")\n )\n out = read_sql_query(s1, db, npartitions=2, index_col=\"lenname\")\n\n lenname_df = df.copy()\n lenname_df[\"lenname\"] = lenname_df[\"name\"].str.len()\n lenname_df = lenname_df.reset_index().set_index(\"lenname\")\n assert_eq(out, lenname_df.loc[:, [\"number\", \"name\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_query_with_meta_test_query_with_meta.assert_eq_out_df_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_query_with_meta_test_query_with_meta.assert_eq_out_df_name_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 428, "end_line": 443, "span_ids": ["test_query_with_meta"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_query_with_meta(db):\n from sqlalchemy import sql\n\n data = {\n \"name\": pd.Series([], name=\"name\", dtype=\"str\"),\n \"age\": pd.Series([], name=\"age\", dtype=\"int\"),\n }\n index = pd.Index([], name=\"number\", dtype=\"int\")\n meta = pd.DataFrame(data, index=index)\n\n s1 = sql.select(\n [sql.column(\"number\"), sql.column(\"name\"), sql.column(\"age\")]\n ).select_from(sql.table(\"test\"))\n out = read_sql_query(s1, db, npartitions=2, index_col=\"number\", meta=meta)\n # Don't check dtype for windows https://github.com/dask/dask/issues/8620\n assert_eq(out, df[[\"name\", \"age\"]], check_dtype=sys.platform != \"win32\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_character_index_without_divisions_tmp_db_uri.with_tmpfile_as_f_.yield_sqlite_s_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_no_character_index_without_divisions_tmp_db_uri.with_tmpfile_as_f_.yield_sqlite_s_f", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 446, "end_line": 471, "span_ids": ["test_read_sql", "tmp_db_uri", "test_no_character_index_without_divisions"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_character_index_without_divisions(db):\n\n # attempt to read the sql table with a character index and no divisions\n with pytest.raises(TypeError):\n read_sql_table(\"test\", db, npartitions=2, index_col=\"name\", divisions=None)\n\n\ndef test_read_sql(db):\n from sqlalchemy import sql\n\n s = sql.select([sql.column(\"number\"), sql.column(\"name\")]).select_from(\n sql.table(\"test\")\n )\n out = read_sql(s, db, npartitions=2, index_col=\"number\")\n assert_eq(out, df[[\"name\"]])\n\n data = read_sql_table(\"test\", db, npartitions=2, index_col=\"number\").compute()\n assert (data.name == df.name).all()\n assert data.index.name == \"number\"\n assert_eq(data, df)\n\n\n@contextmanager\ndef tmp_db_uri():\n with tmpfile() as f:\n yield \"sqlite:///%s\" % f", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_test_to_sql.None_5.assert_actual_npartiti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_test_to_sql.None_5.assert_actual_npartiti", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 474, "end_line": 544, "span_ids": ["test_to_sql"], "tokens": 633}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", (1, 2))\n@pytest.mark.parametrize(\"parallel\", (False, True))\ndef test_to_sql(npartitions, parallel):\n df_by_age = df.set_index(\"age\")\n df_appended = pd.concat(\n [\n df,\n df,\n ]\n )\n\n ddf = dd.from_pandas(df, npartitions)\n ddf_by_age = ddf.set_index(\"age\")\n\n # Simple round trip test: use existing \"number\" index_col\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, parallel=parallel)\n result = read_sql_table(\"test\", uri, \"number\")\n assert_eq(df, result)\n\n # Test writing no index, and reading back in with one of the other columns as index (`read_sql_table` requires\n # an index_col)\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, parallel=parallel, index=False)\n\n result = read_sql_table(\"test\", uri, \"negish\")\n assert_eq(df.set_index(\"negish\"), result)\n\n result = read_sql_table(\"test\", uri, \"age\")\n assert_eq(df_by_age, result)\n\n # Index by \"age\" instead\n with tmp_db_uri() as uri:\n ddf_by_age.to_sql(\"test\", uri, parallel=parallel)\n result = read_sql_table(\"test\", uri, \"age\")\n assert_eq(df_by_age, result)\n\n # Index column can't have \"object\" dtype if no partitions are provided\n with tmp_db_uri() as uri:\n ddf.set_index(\"name\").to_sql(\"test\", uri)\n with pytest.raises(\n TypeError,\n match='Provided index column is of type \"object\". If divisions is not provided the index column type must be numeric or datetime.', # noqa: E501\n ):\n read_sql_table(\"test\", uri, \"name\")\n\n # Test various \"if_exists\" values\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri)\n\n # Writing a table that already exists fails\n with pytest.raises(ValueError, match=\"Table 'test' already exists\"):\n ddf.to_sql(\"test\", uri)\n\n ddf.to_sql(\"test\", uri, parallel=parallel, if_exists=\"append\")\n result = read_sql_table(\"test\", uri, \"number\")\n\n assert_eq(df_appended, result)\n\n ddf_by_age.to_sql(\"test\", uri, parallel=parallel, if_exists=\"replace\")\n result = read_sql_table(\"test\", uri, \"age\")\n assert_eq(df_by_age, result)\n\n # Verify number of partitions returned, when compute=False\n with tmp_db_uri() as uri:\n result = ddf.to_sql(\"test\", uri, parallel=parallel, compute=False)\n\n # the first result is from the \"meta\" insert\n actual = len(result.compute())\n\n assert actual == npartitions", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_kwargs_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_sql.py_test_to_sql_kwargs_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "file_type": "text/x-python", "category": "test", "start_line": 547, "end_line": 572, "span_ids": ["test_to_sql_engine_kwargs", "test_to_sql_kwargs"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_sql_kwargs():\n ddf = dd.from_pandas(df, 2)\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, method=\"multi\")\n with pytest.raises(\n TypeError, match=\"to_sql\\\\(\\\\) got an unexpected keyword argument 'unknown'\"\n ):\n ddf.to_sql(\"test\", uri, unknown=None)\n\n\ndef test_to_sql_engine_kwargs(caplog):\n ddf = dd.from_pandas(df, 2)\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, engine_kwargs={\"echo\": False})\n logs = \"\\n\".join(r.message for r in caplog.records)\n assert logs == \"\"\n assert_eq(df, read_sql_table(\"test\", uri, \"number\"))\n\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, engine_kwargs={\"echo\": True})\n logs = \"\\n\".join(r.message for r in caplog.records)\n assert \"CREATE\" in logs\n assert \"INSERT\" in logs\n\n assert_eq(df, read_sql_table(\"test\", uri, \"number\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py__meta_from_dtypes__set_context.return.stack_enter_context_obj_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py__meta_from_dtypes__set_context.return.stack_enter_context_obj_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 68, "end_line": 119, "span_ids": ["_guid", "_set_context", "_meta_from_dtypes"], "tokens": 391}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _meta_from_dtypes(to_read_columns, file_dtypes, index_cols, column_index_names):\n \"\"\"Get the final metadata for the dask.dataframe\n\n Parameters\n ----------\n to_read_columns : list\n All the columns to end up with, including index names\n file_dtypes : dict\n Mapping from column name to dtype for every element\n of ``to_read_columns``\n index_cols : list\n Subset of ``to_read_columns`` that should move to the\n index\n column_index_names : list\n The values for df.columns.name for a MultiIndex in the\n columns, or df.index.name for a regular Index in the columns\n\n Returns\n -------\n meta : DataFrame\n \"\"\"\n data = {\n c: pd.Series([], dtype=file_dtypes.get(c, \"int64\")) for c in to_read_columns\n }\n indexes = [data.pop(c) for c in index_cols or []]\n if len(indexes) == 0:\n index = None\n elif len(index_cols) == 1:\n index = indexes[0]\n # XXX: this means we can't roundtrip dataframes where the index names\n # is actually __index_level_0__\n if index_cols[0] != \"__index_level_0__\":\n index.name = index_cols[0]\n else:\n index = pd.MultiIndex.from_arrays(indexes, names=index_cols)\n df = pd.DataFrame(data, index=index)\n\n if column_index_names:\n df.columns.names = column_index_names\n return df\n\n\ndef _guid():\n \"\"\"Simple utility function to get random hex string\"\"\"\n return uuid4().hex\n\n\ndef _set_context(obj, stack):\n \"\"\"Helper function to place an object on a context stack\"\"\"\n if stack is None:\n return obj\n return stack.enter_context(obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py__open_input_files_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/utils.py__open_input_files_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 122, "end_line": 198, "span_ids": ["_open_input_files"], "tokens": 644}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _open_input_files(\n paths,\n fs=None,\n context_stack=None,\n open_file_func=None,\n precache_options=None,\n **kwargs,\n):\n \"\"\"Return a list of open-file objects given\n a list of input-file paths.\n\n WARNING: This utility is experimental, and is meant\n for internal ``dask.dataframe`` use only.\n\n Parameters\n ----------\n paths : list(str)\n Remote or local path of the parquet file\n fs : fsspec object, optional\n File-system instance to use for file handling\n context_stack : contextlib.ExitStack, Optional\n Context manager to use for open files.\n open_file_func : callable, optional\n Callable function to use for file opening. If this argument\n is specified, ``open_file_func(path, **kwargs)`` will be used\n to open each file in ``paths``. Default is ``fs.open``.\n precache_options : dict, optional\n Dictionary of key-word arguments to use for precaching.\n If ``precache_options`` contains ``{\"method\": \"parquet\"}``,\n ``fsspec.parquet.open_parquet_file`` will be used for remote\n storage.\n **kwargs :\n Key-word arguments to pass to the appropriate open function\n \"\"\"\n # Use call-back function if specified\n if open_file_func is not None:\n return [\n _set_context(open_file_func(path, **kwargs), context_stack)\n for path in paths\n ]\n\n # Check if we are using `fsspec.parquet`.\n # In the future, fsspec should be able to handle\n # `{\"method\": \"parquet\"}`. However, for now we\n # will redirect to `open_parquet_file` manually\n precache_options = (precache_options or {}).copy()\n precache = precache_options.pop(\"method\", None)\n if (\n precache == \"parquet\"\n and fs is not None\n and not _is_local_fs(fs)\n and parse_version(fsspec.__version__) > parse_version(\"2021.11.0\")\n ):\n kwargs.update(precache_options)\n row_groups = kwargs.pop(\"row_groups\", None) or ([None] * len(paths))\n cache_type = kwargs.pop(\"cache_type\", \"parts\")\n if cache_type != \"parts\":\n raise ValueError(\n f\"'parts' `cache_type` required for 'parquet' precaching,\"\n f\" got {cache_type}.\"\n )\n return [\n _set_context(\n fsspec_parquet.open_parquet_file(\n path,\n fs=fs,\n row_groups=rgs,\n **kwargs,\n ),\n context_stack,\n )\n for path, rgs in zip(paths, row_groups)\n ]\n elif fs is not None:\n return [_set_context(fs.open(path, **kwargs), context_stack) for path in paths]\n return [_set_context(open(path, **kwargs), context_stack) for path in paths]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py__cum_aggregate_apply_pivot_agg_last.return.df_groupby_level_0_last_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py__cum_aggregate_apply_pivot_agg_last.return.df_groupby_level_0_last_", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 277, "end_line": 410, "span_ids": ["cummax_aggregate", "cumsum_aggregate", "fillna_check", "unique", "pivot_agg_last", "_cum_aggregate_apply", "value_counts_aggregate", "values", "cummin_aggregate", "pivot_agg_first", "assign", "size", "value_counts_combine", "sample", "drop_columns", "pivot_agg", "cumprod_aggregate", "nbytes"], "tokens": 820}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _cum_aggregate_apply(aggregate, x, y):\n \"\"\"Apply aggregation function within a cumulative aggregation\n\n Parameters\n ----------\n aggregate: function (a, a) -> a\n The aggregation function, like add, which is used to and subsequent\n results\n x:\n y:\n \"\"\"\n if y is None:\n return x\n else:\n return aggregate(x, y)\n\n\ndef cumsum_aggregate(x, y):\n if x is None:\n return y\n elif y is None:\n return x\n else:\n return x + y\n\n\ndef cumprod_aggregate(x, y):\n if x is None:\n return y\n elif y is None:\n return x\n else:\n return x * y\n\n\ndef cummin_aggregate(x, y):\n if is_series_like(x) or is_dataframe_like(x):\n return x.where((x < y) | x.isnull(), y, axis=x.ndim - 1)\n else: # scalar\n return x if x < y else y\n\n\ndef cummax_aggregate(x, y):\n if is_series_like(x) or is_dataframe_like(x):\n return x.where((x > y) | x.isnull(), y, axis=x.ndim - 1)\n else: # scalar\n return x if x > y else y\n\n\ndef assign(df, *pairs):\n # Only deep copy when updating an element\n # (to avoid modifying the original)\n pairs = dict(partition(2, pairs))\n deep = bool(set(pairs) & set(df.columns))\n df = df.copy(deep=bool(deep))\n for name, val in pairs.items():\n df[name] = val\n return df\n\n\ndef unique(x, series_name=None):\n out = x.unique()\n # out can be either an np.ndarray or may already be a series\n # like object. When out is an np.ndarray, it must be wrapped.\n if not (is_series_like(out) or is_index_like(out)):\n out = pd.Series(out, name=series_name)\n return out\n\n\ndef value_counts_combine(x, sort=True, ascending=False, **groupby_kwargs):\n # sort and ascending don't actually matter until the agg step\n return x.groupby(level=0, **groupby_kwargs).sum()\n\n\ndef value_counts_aggregate(\n x, sort=True, ascending=False, normalize=False, total_length=None, **groupby_kwargs\n):\n out = value_counts_combine(x, **groupby_kwargs)\n if normalize:\n out /= total_length if total_length is not None else out.sum()\n if sort:\n return out.sort_values(ascending=ascending)\n return out\n\n\ndef nbytes(x):\n return x.nbytes\n\n\ndef size(x):\n return x.size\n\n\ndef values(df):\n return df.values\n\n\ndef sample(df, state, frac, replace):\n rs = np.random.RandomState(state)\n return df.sample(random_state=rs, frac=frac, replace=replace) if len(df) > 0 else df\n\n\ndef drop_columns(df, columns, dtype):\n df = df.drop(columns, axis=1)\n df.columns = df.columns.astype(dtype)\n return df\n\n\ndef fillna_check(df, method, check=True):\n out = df.fillna(method=method)\n if check and out.isnull().values.all(axis=0).any():\n raise ValueError(\n \"All NaN partition encountered in `fillna`. Try \"\n \"using ``df.repartition`` to increase the partition \"\n \"size, or specify `limit` in `fillna`.\"\n )\n return out\n\n\n# ---------------------------------\n# reshape\n# ---------------------------------\n\n\ndef pivot_agg(df):\n return df.groupby(level=0).sum()\n\n\ndef pivot_agg_first(df):\n return df.groupby(level=0).first()\n\n\ndef pivot_agg_last(df):\n return df.groupby(level=0).last()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_pivot_sum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/methods.py_pivot_sum_", "embedding": null, "metadata": {"file_path": "dask/dataframe/methods.py", "file_name": "methods.py", "file_type": "text/x-python", "category": "implementation", "start_line": 413, "end_line": 473, "span_ids": ["monotonic_decreasing_chunk", "pivot_last", "pivot_sum", "monotonic_decreasing_aggregate", "pivot_count", "pivot_first", "assign_index", "monotonic_increasing_chunk", "monotonic_increasing_aggregate"], "tokens": 471}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pivot_sum(df, index, columns, values):\n return pd.pivot_table(\n df, index=index, columns=columns, values=values, aggfunc=\"sum\", dropna=False\n )\n\n\ndef pivot_count(df, index, columns, values):\n # we cannot determine dtype until concatenationg all partitions.\n # make dtype deterministic, always coerce to np.float64\n return pd.pivot_table(\n df, index=index, columns=columns, values=values, aggfunc=\"count\", dropna=False\n ).astype(np.float64)\n\n\ndef pivot_first(df, index, columns, values):\n return pd.pivot_table(\n df, index=index, columns=columns, values=values, aggfunc=\"first\", dropna=False\n )\n\n\ndef pivot_last(df, index, columns, values):\n return pd.pivot_table(\n df, index=index, columns=columns, values=values, aggfunc=\"last\", dropna=False\n )\n\n\ndef assign_index(df, ind):\n df = df.copy()\n df.index = ind\n return df\n\n\ndef monotonic_increasing_chunk(x):\n data = x if is_index_like(x) else x.iloc\n return pd.DataFrame(\n data=[[x.is_monotonic_increasing, data[0], data[-1]]],\n columns=[\"monotonic\", \"first\", \"last\"],\n )\n\n\ndef monotonic_increasing_aggregate(concatenated):\n bounds_are_monotonic = pd.Series(\n concatenated[[\"first\", \"last\"]].to_numpy().ravel()\n ).is_monotonic_increasing\n return concatenated[\"monotonic\"].all() and bounds_are_monotonic\n\n\ndef monotonic_decreasing_chunk(x):\n data = x if is_index_like(x) else x.iloc\n return pd.DataFrame(\n data=[[x.is_monotonic_decreasing, data[0], data[-1]]],\n columns=[\"monotonic\", \"first\", \"last\"],\n )\n\n\ndef monotonic_decreasing_aggregate(concatenated):\n bounds_are_monotonic = pd.Series(\n concatenated[[\"first\", \"last\"]].to_numpy().ravel()\n ).is_monotonic_decreasing\n return concatenated[\"monotonic\"].all() and bounds_are_monotonic", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_3_pivot_table.if_aggfunc_in_count_.pv_count.apply_concat_apply_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_None_3_pivot_table.if_aggfunc_in_count_.pv_count.apply_concat_apply_", "embedding": null, "metadata": {"file_path": "dask/dataframe/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 181, "end_line": 291, "span_ids": ["get_dummies", "pivot_table"], "tokens": 793}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "###############################################################\n# Pivot table\n###############################################################\n\n\ndef pivot_table(df, index=None, columns=None, values=None, aggfunc=\"mean\"):\n \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``\n must have category dtype to infer result's ``columns``.\n ``index``, ``columns``, and ``aggfunc`` must be all scalar.\n ``values`` can be scalar or list-like.\n\n Parameters\n ----------\n df : DataFrame\n index : scalar\n column to be index\n columns : scalar\n column to be columns\n values : scalar or list(scalar)\n column(s) to aggregate\n aggfunc : {'mean', 'sum', 'count', 'first', 'last'}, default 'mean'\n\n Returns\n -------\n table : DataFrame\n\n See Also\n --------\n pandas.DataFrame.pivot_table\n \"\"\"\n\n if not is_scalar(index) or index is None:\n raise ValueError(\"'index' must be the name of an existing column\")\n if not is_scalar(columns) or columns is None:\n raise ValueError(\"'columns' must be the name of an existing column\")\n if not methods.is_categorical_dtype(df[columns]):\n raise ValueError(\"'columns' must be category dtype\")\n if not has_known_categories(df[columns]):\n raise ValueError(\n \"'columns' must have known categories. Please use \"\n \"`df[columns].cat.as_known()` beforehand to ensure \"\n \"known categories\"\n )\n if not (\n is_list_like(values)\n and all([is_scalar(v) for v in values])\n or is_scalar(values)\n ):\n raise ValueError(\"'values' must refer to an existing column or columns\")\n\n available_aggfuncs = [\"mean\", \"sum\", \"count\", \"first\", \"last\"]\n\n if not is_scalar(aggfunc) or aggfunc not in available_aggfuncs:\n raise ValueError(\n \"aggfunc must be either \" + \", \".join(f\"'{x}'\" for x in available_aggfuncs)\n )\n\n # _emulate can't work for empty data\n # the result must have CategoricalIndex columns\n\n columns_contents = pd.CategoricalIndex(df[columns].cat.categories, name=columns)\n if is_scalar(values):\n new_columns = columns_contents\n else:\n new_columns = pd.MultiIndex.from_product(\n (sorted(values), columns_contents), names=[None, columns]\n )\n\n if aggfunc in [\"first\", \"last\"]:\n # Infer datatype as non-numeric values are allowed\n if is_scalar(values):\n meta = pd.DataFrame(\n columns=new_columns,\n dtype=df[values].dtype,\n index=pd.Index(df._meta[index]),\n )\n else:\n meta = pd.DataFrame(\n columns=new_columns,\n index=pd.Index(df._meta[index]),\n )\n for value_col in values:\n meta[value_col] = meta[value_col].astype(df[values].dtypes[value_col])\n else:\n # Use float64 as other aggregate functions require numerical data\n meta = pd.DataFrame(\n columns=new_columns, dtype=np.float64, index=pd.Index(df._meta[index])\n )\n\n kwargs = {\"index\": index, \"columns\": columns, \"values\": values}\n\n if aggfunc in [\"sum\", \"mean\"]:\n pv_sum = apply_concat_apply(\n [df],\n chunk=methods.pivot_sum,\n aggregate=methods.pivot_agg,\n meta=meta,\n token=\"pivot_table_sum\",\n chunk_kwargs=kwargs,\n )\n\n if aggfunc in [\"count\", \"mean\"]:\n pv_count = apply_concat_apply(\n [df],\n chunk=methods.pivot_count,\n aggregate=methods.pivot_agg,\n meta=meta,\n token=\"pivot_table_count\",\n chunk_kwargs=kwargs,\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_pivot_table.if_aggfunc_sum__pivot_table.if_aggfunc_sum_.else_.raise_ValueError": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/reshape.py_pivot_table.if_aggfunc_sum__pivot_table.if_aggfunc_sum_.else_.raise_ValueError", "embedding": null, "metadata": {"file_path": "dask/dataframe/reshape.py", "file_name": "reshape.py", "file_type": "text/x-python", "category": "implementation", "start_line": 293, "end_line": 318, "span_ids": ["pivot_table"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pivot_table(df, index=None, columns=None, values=None, aggfunc=\"mean\"):\n # ... other code\n\n if aggfunc == \"sum\":\n return pv_sum\n elif aggfunc == \"count\":\n return pv_count\n elif aggfunc == \"mean\":\n return pv_sum / pv_count\n elif aggfunc == \"first\":\n return apply_concat_apply(\n [df],\n chunk=methods.pivot_first,\n aggregate=methods.pivot_agg_first,\n meta=meta,\n token=\"pivot_table_first\",\n chunk_kwargs=kwargs,\n )\n elif aggfunc == \"last\":\n return apply_concat_apply(\n [df],\n chunk=methods.pivot_last,\n aggregate=methods.pivot_agg_last,\n meta=meta,\n token=\"pivot_table_last\",\n chunk_kwargs=kwargs,\n )\n else:\n raise ValueError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py__compute_partition_stats__compute_partition_stats.if_not_allow_overlap_.else_.return._non_empty_mins_non_empt": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py__compute_partition_stats__compute_partition_stats.if_not_allow_overlap_.else_.return._non_empty_mins_non_empt", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1009, "end_line": 1045, "span_ids": ["_compute_partition_stats"], "tokens": 457}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compute_partition_stats(column, allow_overlap=False, **kwargs) -> tuple:\n \"\"\"For a given column, compute the min, max, and len of each partition.\n\n And make sure that the partitions are sorted relative to each other.\n NOTE: this does not guarantee that every partition is internally sorted.\n \"\"\"\n mins = column.map_partitions(M.min, meta=column)\n maxes = column.map_partitions(M.max, meta=column)\n lens = column.map_partitions(len, meta=column)\n mins, maxes, lens = compute(mins, maxes, lens, **kwargs)\n mins = remove_nans(mins)\n maxes = remove_nans(maxes)\n non_empty_mins = [m for m, l in zip(mins, lens) if l != 0]\n non_empty_maxes = [m for m, l in zip(maxes, lens) if l != 0]\n if (\n sorted(non_empty_mins) != non_empty_mins\n or sorted(non_empty_maxes) != non_empty_maxes\n ):\n raise ValueError(\n f\"Partitions are not sorted ascending by {column.name or 'the index'}\",\n f\"In your dataset the (min, max, len) values of {column.name or 'the index'} \"\n f\"for each partition are : {list(zip(mins, maxes, lens))}\",\n )\n if not allow_overlap and any(\n a <= b for a, b in zip(non_empty_mins[1:], non_empty_maxes[:-1])\n ):\n warnings.warn(\n \"Partitions have overlapping values, so divisions are non-unique.\"\n \"Use `set_index(sorted=True)` with no `divisions` to allow dask to fix the overlap. \"\n f\"In your dataset the (min, max, len) values of {column.name or 'the index'} \"\n f\"for each partition are : {list(zip(mins, maxes, lens))}\",\n UserWarning,\n )\n if not allow_overlap:\n return (mins, maxes, lens)\n else:\n return (non_empty_mins, non_empty_maxes, lens)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_compute_divisions_compute_and_set_divisions.return.fix_overlap_df_mins_max": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/shuffle.py_compute_divisions_compute_and_set_divisions.return.fix_overlap_df_mins_max", "embedding": null, "metadata": {"file_path": "dask/dataframe/shuffle.py", "file_name": "shuffle.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1048, "end_line": 1062, "span_ids": ["compute_divisions", "compute_and_set_divisions"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def compute_divisions(df, col=None, **kwargs) -> tuple:\n column = df.index if col is None else df[col]\n mins, maxes, _ = _compute_partition_stats(column, allow_overlap=False, **kwargs)\n\n return tuple(mins) + (maxes[-1],)\n\n\ndef compute_and_set_divisions(df, **kwargs):\n mins, maxes, lens = _compute_partition_stats(df.index, allow_overlap=True, **kwargs)\n if len(mins) == len(df.divisions) - 1:\n df.divisions = tuple(mins) + (maxes[-1],)\n if not any(mins[i] >= maxes[i - 1] for i in range(1, len(mins))):\n return df\n\n return fix_overlap(df, mins, maxes, lens)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_agg_with_min_count_assert_near_timedeltas.assert_eq_pd_to_numeric_t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_series_agg_with_min_count_assert_near_timedeltas.assert_eq_pd_to_numeric_t", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1457, "end_line": 1478, "span_ids": ["assert_near_timedeltas", "test_series_agg_with_min_count"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"sum\", \"prod\", \"product\"])\n@pytest.mark.parametrize(\"min_count\", [0, 9])\ndef test_series_agg_with_min_count(method, min_count):\n df = pd.DataFrame([[1]], columns=[\"a\"])\n ddf = dd.from_pandas(df, npartitions=1)\n func = getattr(ddf[\"a\"], method)\n result = func(min_count=min_count).compute()\n if min_count == 0:\n assert result == 1\n else:\n assert result is np.nan\n\n\n# Default absolute tolerance of 2000 nanoseconds\ndef assert_near_timedeltas(t1, t2, atol=2000):\n if is_scalar(t1):\n t1 = pd.Series([t1])\n if is_scalar(t2):\n t2 = pd.Series([t2])\n\n assert t1.dtype == t2.dtype\n assert_eq(pd.to_numeric(t1), pd.to_numeric(t2), atol=atol)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_datetime_std_creates_copy_cols_test_datetime_std_creates_copy_cols.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_datetime_std_creates_copy_cols_test_datetime_std_creates_copy_cols.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1481, "end_line": 1505, "span_ids": ["test_datetime_std_creates_copy_cols"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not PANDAS_GT_120, reason=\"std() for datetime only added in pandas>=1.2\"\n)\n@pytest.mark.parametrize(\"axis\", [0, 1])\ndef test_datetime_std_creates_copy_cols(axis):\n pdf = pd.DataFrame(\n {\n \"dt1\": [\n datetime.fromtimestamp(1636426700 + (i * 250000)) for i in range(10)\n ],\n \"dt2\": [\n datetime.fromtimestamp(1636426700 + (i * 300000)) for i in range(10)\n ],\n }\n )\n\n ddf = dd.from_pandas(pdf, 3)\n\n # Series test (same line twice to make sure data structure wasn't mutated)\n assert_eq(ddf[\"dt1\"].std(), pdf[\"dt1\"].std())\n assert_eq(ddf[\"dt1\"].std(), pdf[\"dt1\"].std())\n\n # DataFrame test (same line twice to make sure data structure wasn't mutated)\n assert_near_timedeltas(ddf.std(axis=axis).compute(), pdf.std(axis=axis))\n assert_near_timedeltas(ddf.std(axis=axis).compute(), pdf.std(axis=axis))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_datetime_std_with_larger_dataset_test_datetime_std_with_larger_dataset.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_datetime_std_with_larger_dataset_test_datetime_std_with_larger_dataset.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1508, "end_line": 1560, "span_ids": ["test_datetime_std_with_larger_dataset"], "tokens": 444}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not PANDAS_GT_120, reason=\"std() for datetime only added in pandas>=1.2\"\n)\n@pytest.mark.parametrize(\"axis\", [0, 1])\n@pytest.mark.parametrize(\"skipna\", [False, True])\ndef test_datetime_std_with_larger_dataset(axis, skipna):\n num_rows = 250\n\n dt1 = pd.concat(\n [\n pd.Series([pd.NaT] * 15, index=range(15)),\n pd.to_datetime(\n pd.Series(\n [\n datetime.fromtimestamp(1636426704 + (i * 250000))\n for i in range(num_rows - 15)\n ],\n index=range(15, 250),\n )\n ),\n ],\n ignore_index=False,\n )\n\n base_numbers = [\n (1638290040706793300 + (i * 69527182702409)) for i in range(num_rows)\n ]\n\n pdf = pd.DataFrame(\n {\"dt1\": dt1, \"dt2\": pd.to_datetime(pd.Series(base_numbers))}, index=range(250)\n )\n\n for i in range(3, 8):\n pdf[f\"dt{i}\"] = pd.to_datetime(\n pd.Series([int(x + (0.12 * i)) for x in base_numbers])\n )\n\n ddf = dd.from_pandas(pdf, 8)\n\n assert_near_timedeltas(\n ddf[[\"dt1\"]].std(axis=axis, skipna=skipna).compute(),\n pdf[[\"dt1\"]].std(axis=axis, skipna=skipna),\n )\n\n # Same thing but as Series. No axis, since axis=1 raises error\n assert_near_timedeltas(\n ddf[\"dt1\"].std(skipna=skipna).compute(), pdf[\"dt1\"].std(skipna=skipna)\n )\n\n # Computation on full dataset\n assert_near_timedeltas(\n ddf.std(axis=axis, skipna=skipna).compute(), pdf.std(axis=axis, skipna=skipna)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_datetime_std_across_axis1_null_results_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_datetime_std_across_axis1_null_results_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 1563, "end_line": 1619, "span_ids": ["test_std_raises_on_index", "test_datetime_std_across_axis1_null_results"], "tokens": 527}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not PANDAS_GT_120, reason=\"std() for datetime only added in pandas>=1.2\"\n)\n@pytest.mark.filterwarnings(\n \"ignore:Dropping of nuisance columns:FutureWarning\"\n) # https://github.com/dask/dask/issues/7714\n@pytest.mark.parametrize(\"skipna\", [False, True])\ndef test_datetime_std_across_axis1_null_results(skipna):\n pdf = pd.DataFrame(\n {\n \"dt1\": [\n datetime.fromtimestamp(1636426704 + (i * 250000)) for i in range(10)\n ],\n \"dt2\": [\n datetime.fromtimestamp(1636426704 + (i * 217790)) for i in range(10)\n ],\n \"nums\": [i for i in range(10)],\n }\n )\n\n ddf = dd.from_pandas(pdf, 3)\n\n # Single column always results in NaT\n assert_eq(\n ddf[[\"dt1\"]].std(axis=1, skipna=skipna), pdf[[\"dt1\"]].std(axis=1, skipna=skipna)\n )\n\n # Mix of datetimes with other numeric types produces NaNs\n assert_eq(ddf.std(axis=1, skipna=skipna), pdf.std(axis=1, skipna=skipna))\n\n # Test with mix of na and truthy datetimes\n pdf2 = pd.DataFrame(\n {\n \"dt1\": [pd.NaT]\n + [datetime.fromtimestamp(1636426704 + (i * 250000)) for i in range(10)]\n + [pd.NaT],\n \"dt2\": [\n datetime.fromtimestamp(1636426704 + (i * 250000)) for i in range(12)\n ],\n \"dt3\": [\n datetime.fromtimestamp(1636426704 + (i * 282616)) for i in range(12)\n ],\n }\n )\n\n ddf2 = dd.from_pandas(pdf2, 3)\n\n assert_eq(ddf2.std(axis=1, skipna=skipna), pdf2.std(axis=1, skipna=skipna))\n\n\ndef test_std_raises_on_index():\n with pytest.raises(\n NotImplementedError,\n match=\"`std` is only supported with objects that are Dataframes or Series\",\n ):\n dd.from_pandas(pd.DataFrame({\"test\": [1, 2]}), npartitions=2).index.std()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_astype_categoricals_test_column_assignment.assert_z_not_in_orig_co": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_astype_categoricals_test_column_assignment.assert_z_not_in_orig_co", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3450, "end_line": 3664, "span_ids": ["test_groupby_callable", "test_categorize_info", "test_gh_1301", "test_timeseries_sorted", "test_astype_categoricals_known", "test_astype_categoricals", "_assert_info", "test_info", "test_column_assignment", "test_groupby_multilevel_info", "test_methods_tokenize_differently"], "tokens": 1977}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_astype_categoricals():\n df = pd.DataFrame(\n {\n \"x\": [\"a\", \"b\", \"c\", \"b\", \"c\"],\n \"y\": [\"x\", \"y\", \"z\", \"x\", \"y\"],\n \"z\": [1, 2, 3, 4, 5],\n }\n )\n df = df.astype({\"y\": \"category\"})\n ddf = dd.from_pandas(df, 2)\n assert ddf.y.cat.known\n\n ddf2 = ddf.astype({\"x\": \"category\"})\n assert not ddf2.x.cat.known\n assert ddf2.y.cat.known\n assert ddf2.x.dtype == \"category\"\n assert ddf2.compute().x.dtype == \"category\"\n\n dx = ddf.x.astype(\"category\")\n assert not dx.cat.known\n assert dx.dtype == \"category\"\n assert dx.compute().dtype == \"category\"\n\n\ndef test_astype_categoricals_known():\n df = pd.DataFrame(\n {\n \"x\": [\"a\", \"b\", \"c\", \"b\", \"c\"],\n \"y\": [\"x\", \"y\", \"z\", \"y\", \"z\"],\n \"z\": [\"b\", \"b\", \"b\", \"c\", \"b\"],\n \"other\": [1, 2, 3, 4, 5],\n }\n )\n ddf = dd.from_pandas(df, 2)\n\n abc = pd.api.types.CategoricalDtype([\"a\", \"b\", \"c\"], ordered=False)\n category = pd.api.types.CategoricalDtype(ordered=False)\n\n # DataFrame\n ddf2 = ddf.astype({\"x\": abc, \"y\": category, \"z\": \"category\", \"other\": \"f8\"})\n\n for col, known in [(\"x\", True), (\"y\", False), (\"z\", False)]:\n x = getattr(ddf2, col)\n assert pd.api.types.is_categorical_dtype(x.dtype)\n assert x.cat.known == known\n\n # Series\n for dtype, known in [(\"category\", False), (category, False), (abc, True)]:\n dx2 = ddf.x.astype(dtype)\n assert pd.api.types.is_categorical_dtype(dx2.dtype)\n assert dx2.cat.known == known\n\n\ndef test_groupby_callable():\n a = pd.DataFrame({\"x\": [1, 2, 3, None], \"y\": [10, 20, 30, 40]}, index=[1, 2, 3, 4])\n b = dd.from_pandas(a, 2)\n\n def iseven(x):\n return x % 2 == 0\n\n assert_eq(a.groupby(iseven).y.sum(), b.groupby(iseven).y.sum())\n assert_eq(a.y.groupby(iseven).sum(), b.y.groupby(iseven).sum())\n\n\ndef test_methods_tokenize_differently():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n df = dd.from_pandas(df, npartitions=1)\n assert (\n df.x.map_partitions(lambda x: pd.Series(x.min()))._name\n != df.x.map_partitions(lambda x: pd.Series(x.max()))._name\n )\n\n\ndef _assert_info(df, ddf, memory_usage=True):\n from io import StringIO\n\n assert isinstance(df, pd.DataFrame)\n assert isinstance(ddf, dd.DataFrame)\n\n buf_pd, buf_da = StringIO(), StringIO()\n\n df.info(buf=buf_pd, memory_usage=memory_usage)\n ddf.info(buf=buf_da, verbose=True, memory_usage=memory_usage)\n\n stdout_pd = buf_pd.getvalue()\n stdout_da = buf_da.getvalue()\n stdout_da = stdout_da.replace(str(type(ddf)), str(type(df)))\n # TODO\n assert stdout_pd == stdout_da\n\n\ndef test_info():\n from io import StringIO\n\n pandas_format._put_lines = put_lines\n\n test_frames = [\n pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]}, index=[0, 1, 2, 3]),\n pd.DataFrame(),\n ]\n\n for df in test_frames:\n ddf = dd.from_pandas(df, npartitions=4)\n _assert_info(df, ddf)\n\n buf = StringIO()\n ddf = dd.from_pandas(\n pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]}, index=range(4)),\n npartitions=4,\n )\n\n # Verbose=False\n ddf.info(buf=buf, verbose=False)\n assert buf.getvalue() == (\n \"\\n\"\n \"Columns: 2 entries, x to y\\n\"\n \"dtypes: int64(2)\"\n )\n\n # buf=None\n assert ddf.info(buf=None) is None\n\n\ndef test_groupby_multilevel_info():\n # GH 1844\n from io import StringIO\n\n pandas_format._put_lines = put_lines\n\n df = pd.DataFrame({\"A\": [1, 1, 2, 2], \"B\": [1, 2, 3, 4], \"C\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n g = ddf.groupby([\"A\", \"B\"]).sum()\n # slight difference between memory repr (single additional space)\n _assert_info(g.compute(), g, memory_usage=True)\n\n buf = StringIO()\n g.info(buf, verbose=False)\n assert buf.getvalue() == (\n \"\\n\"\n \"Columns: 1 entries, C to C\\n\"\n \"dtypes: int64(1)\"\n )\n\n # multilevel\n g = ddf.groupby([\"A\", \"B\"]).agg([\"count\", \"sum\"])\n _assert_info(g.compute(), g, memory_usage=True)\n\n buf = StringIO()\n g.info(buf, verbose=False)\n expected = (\n \"\\n\"\n \"Columns: 2 entries, ('C', 'count') to ('C', 'sum')\\n\"\n \"dtypes: int64(2)\"\n )\n assert buf.getvalue() == expected\n\n\n@pytest.mark.skipif(not PANDAS_GT_120, reason=\"need newer version of Pandas\")\ndef test_categorize_info():\n # assert that we can call info after categorize\n # workaround for: https://github.com/pydata/pandas/issues/14368\n from io import StringIO\n\n pandas_format._put_lines = put_lines\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4], \"y\": pd.Series(list(\"aabc\")), \"z\": pd.Series(list(\"aabc\"))},\n index=[0, 1, 2, 3],\n )\n ddf = dd.from_pandas(df, npartitions=4).categorize([\"y\"])\n\n # Verbose=False\n buf = StringIO()\n ddf.info(buf=buf, verbose=True)\n expected = (\n \"\\n\"\n \"Int64Index: 4 entries, 0 to 3\\n\"\n \"Data columns (total 3 columns):\\n\"\n \" # Column Non-Null Count Dtype\\n\"\n \"--- ------ -------------- -----\\n\"\n \" 0 x 4 non-null int64\\n\"\n \" 1 y 4 non-null category\\n\"\n \" 2 z 4 non-null object\\n\"\n \"dtypes: category(1), object(1), int64(1)\\n\"\n \"memory usage: 496.0 bytes\\n\"\n )\n assert buf.getvalue() == expected\n\n\ndef test_gh_1301():\n df = pd.DataFrame([[\"1\", \"2\"], [\"3\", \"4\"]])\n ddf = dd.from_pandas(df, npartitions=2)\n ddf2 = ddf.assign(y=ddf[1].astype(int))\n assert_eq(ddf2, df.assign(y=df[1].astype(int)))\n\n assert ddf2.dtypes[\"y\"] == np.dtype(int)\n\n\ndef test_timeseries_sorted():\n df = _compat.makeTimeDataFrame()\n ddf = dd.from_pandas(df.reset_index(), npartitions=2)\n df.index.name = \"index\"\n assert_eq(ddf.set_index(\"index\", sorted=True, drop=True), df)\n\n\ndef test_column_assignment():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]})\n ddf = dd.from_pandas(df, npartitions=2)\n orig = ddf.copy()\n ddf[\"z\"] = ddf.x + ddf.y\n df[\"z\"] = df.x + df.y\n\n assert_eq(df, ddf)\n assert \"z\" not in orig.columns", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_array_assignment_test_inplace_operators.assert_eq_ddf_df_assign_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_array_assignment_test_inplace_operators.assert_eq_ddf_df_assign_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3667, "end_line": 3728, "span_ids": ["test_columns_assignment", "test_setitem_triggering_realign", "test_array_assignment", "test_inplace_operators", "test_attribute_assignment"], "tokens": 651}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_assignment():\n df = pd.DataFrame({\"x\": np.random.normal(size=50), \"y\": np.random.normal(size=50)})\n ddf = dd.from_pandas(df, npartitions=2)\n orig = ddf.copy()\n\n arr = np.array(np.random.normal(size=50))\n darr = da.from_array(arr, chunks=25)\n\n df[\"z\"] = arr\n ddf[\"z\"] = darr\n assert_eq(df, ddf)\n assert \"z\" not in orig.columns\n\n arr = np.array(np.random.normal(size=(50, 50)))\n darr = da.from_array(arr, chunks=25)\n msg = \"Array assignment only supports 1-D arrays\"\n with pytest.raises(ValueError, match=msg):\n ddf[\"z\"] = darr\n\n arr = np.array(np.random.normal(size=50))\n darr = da.from_array(arr, chunks=10)\n msg = \"Number of partitions do not match\"\n with pytest.raises(ValueError, match=msg):\n ddf[\"z\"] = darr\n\n\ndef test_columns_assignment():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n df2 = df.assign(y=df.x + 1, z=df.x - 1)\n df[[\"a\", \"b\"]] = df2[[\"y\", \"z\"]]\n\n ddf2 = ddf.assign(y=ddf.x + 1, z=ddf.x - 1)\n ddf[[\"a\", \"b\"]] = ddf2[[\"y\", \"z\"]]\n\n assert_eq(df, ddf)\n\n\ndef test_attribute_assignment():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [1.0, 2.0, 3.0, 4.0, 5.0]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.y = ddf.x + ddf.y\n assert_eq(ddf, df.assign(y=df.x + df.y))\n\n\ndef test_setitem_triggering_realign():\n a = dd.from_pandas(pd.DataFrame({\"A\": range(12)}), npartitions=3)\n b = dd.from_pandas(pd.Series(range(12), name=\"B\"), npartitions=4)\n a[\"C\"] = b\n assert len(a) == 12\n\n\ndef test_inplace_operators():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [1.0, 2.0, 3.0, 4.0, 5.0]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n ddf.y **= 0.5\n\n assert_eq(ddf.y, df.y**0.5)\n assert_eq(ddf, df.assign(y=df.y**0.5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_idxmaxmin_test_shift.with_pytest_raises_TypeEr.ddf_shift_1_5_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_idxmaxmin_test_shift.with_pytest_raises_TypeEr.ddf_shift_1_5_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3731, "end_line": 3930, "span_ids": ["test_getitem_meta", "test_getitem_column_types", "test_getitem_with_bool_dataframe_as_key", "test_diff", "test_idxmaxmin_empty_partitions", "test_getitem_string_subclass", "test_shift", "test_getitem_multilevel", "test_ipython_completion", "test_idxmaxmin", "test_getitem_with_non_series"], "tokens": 1937}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"skipna\", [True, False])\n@pytest.mark.parametrize(\n \"idx\",\n [\n np.arange(100),\n sorted(np.random.random(size=100)),\n pd.date_range(\"20150101\", periods=100),\n ],\n)\ndef test_idxmaxmin(idx, skipna):\n df = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"), index=idx)\n df.b.iloc[31] = np.nan\n df.d.iloc[78] = np.nan\n ddf = dd.from_pandas(df, npartitions=3)\n\n # https://github.com/pandas-dev/pandas/issues/43587\n check_dtype = not all(\n (_compat.PANDAS_GT_133, skipna is False, isinstance(idx, pd.DatetimeIndex))\n )\n\n with warnings.catch_warnings(record=True):\n assert_eq(df.idxmax(axis=1, skipna=skipna), ddf.idxmax(axis=1, skipna=skipna))\n assert_eq(df.idxmin(axis=1, skipna=skipna), ddf.idxmin(axis=1, skipna=skipna))\n\n assert_eq(\n df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna), check_dtype=check_dtype\n )\n assert_eq(\n df.idxmax(skipna=skipna),\n ddf.idxmax(skipna=skipna, split_every=2),\n check_dtype=check_dtype,\n )\n assert (\n ddf.idxmax(skipna=skipna)._name\n != ddf.idxmax(skipna=skipna, split_every=2)._name\n )\n\n assert_eq(\n df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna), check_dtype=check_dtype\n )\n assert_eq(\n df.idxmin(skipna=skipna),\n ddf.idxmin(skipna=skipna, split_every=2),\n check_dtype=check_dtype,\n )\n assert (\n ddf.idxmin(skipna=skipna)._name\n != ddf.idxmin(skipna=skipna, split_every=2)._name\n )\n\n assert_eq(df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna))\n assert_eq(\n df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna, split_every=2)\n )\n assert (\n ddf.a.idxmax(skipna=skipna)._name\n != ddf.a.idxmax(skipna=skipna, split_every=2)._name\n )\n\n assert_eq(df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna))\n assert_eq(\n df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna, split_every=2)\n )\n assert (\n ddf.a.idxmin(skipna=skipna)._name\n != ddf.a.idxmin(skipna=skipna, split_every=2)._name\n )\n\n\ndef test_idxmaxmin_empty_partitions():\n df = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [1.5, 2, 3], \"c\": [np.NaN] * 3, \"d\": [1, 2, np.NaN]}\n )\n empty = df.iloc[:0]\n\n ddf = dd.concat(\n [dd.from_pandas(df, npartitions=1)]\n + [dd.from_pandas(empty, npartitions=1)] * 10\n )\n\n for skipna in [True, False]:\n assert_eq(ddf.idxmin(skipna=skipna, split_every=3), df.idxmin(skipna=skipna))\n\n assert_eq(\n ddf[[\"a\", \"b\", \"d\"]].idxmin(skipna=skipna, split_every=3),\n df[[\"a\", \"b\", \"d\"]].idxmin(skipna=skipna),\n )\n\n assert_eq(ddf.b.idxmax(split_every=3), df.b.idxmax())\n\n # Completely empty raises\n ddf = dd.concat([dd.from_pandas(empty, npartitions=1)] * 10)\n with pytest.raises(ValueError):\n ddf.idxmax().compute()\n with pytest.raises(ValueError):\n ddf.b.idxmax().compute()\n\n\ndef test_getitem_meta():\n data = {\"col1\": [\"a\", \"a\", \"b\"], \"col2\": [0, 1, 0]}\n\n df = pd.DataFrame(data=data, columns=[\"col1\", \"col2\"])\n ddf = dd.from_pandas(df, npartitions=1)\n\n assert_eq(df.col2[df.col1 == \"a\"], ddf.col2[ddf.col1 == \"a\"])\n\n\ndef test_getitem_multilevel():\n pdf = pd.DataFrame({(\"A\", \"0\"): [1, 2, 2], (\"B\", \"1\"): [1, 2, 3]})\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n assert_eq(pdf[\"A\", \"0\"], ddf[\"A\", \"0\"])\n assert_eq(pdf[[(\"A\", \"0\"), (\"B\", \"1\")]], ddf[[(\"A\", \"0\"), (\"B\", \"1\")]])\n\n\ndef test_getitem_string_subclass():\n df = pd.DataFrame({\"column_1\": list(range(10))})\n ddf = dd.from_pandas(df, npartitions=3)\n\n class string_subclass(str):\n pass\n\n column_1 = string_subclass(\"column_1\")\n\n assert_eq(df[column_1], ddf[column_1])\n\n\n@pytest.mark.parametrize(\"col_type\", [list, np.array, pd.Series, pd.Index])\ndef test_getitem_column_types(col_type):\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n cols = col_type([\"C\", \"A\", \"B\"])\n\n assert_eq(df[cols], ddf[cols])\n\n\ndef test_getitem_with_bool_dataframe_as_key():\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4], \"C\": [5, 6]})\n ddf = dd.from_pandas(df, 2)\n assert_eq(df[df > 3], ddf[ddf > 3])\n\n\ndef test_getitem_with_non_series():\n s = pd.Series(list(range(10)), index=list(\"abcdefghij\"))\n ds = dd.from_pandas(s, npartitions=3)\n\n assert_eq(s[[\"a\", \"b\"]], ds[[\"a\", \"b\"]])\n\n\ndef test_ipython_completion():\n df = pd.DataFrame({\"a\": [1], \"b\": [2]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n completions = ddf._ipython_key_completions_()\n assert \"a\" in completions\n assert \"b\" in completions\n assert \"c\" not in completions\n\n\ndef test_diff():\n df = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"))\n ddf = dd.from_pandas(df, 5)\n\n assert_eq(ddf.diff(), df.diff())\n assert_eq(ddf.diff(0), df.diff(0))\n assert_eq(ddf.diff(2), df.diff(2))\n assert_eq(ddf.diff(-2), df.diff(-2))\n\n assert_eq(ddf.diff(2, axis=1), df.diff(2, axis=1))\n\n assert_eq(ddf.a.diff(), df.a.diff())\n assert_eq(ddf.a.diff(0), df.a.diff(0))\n assert_eq(ddf.a.diff(2), df.a.diff(2))\n assert_eq(ddf.a.diff(-2), df.a.diff(-2))\n\n assert ddf.diff(2)._name == ddf.diff(2)._name\n assert ddf.diff(2)._name != ddf.diff(3)._name\n pytest.raises(TypeError, lambda: ddf.diff(1.5))\n\n\ndef test_shift():\n df = _compat.makeTimeDataFrame()\n ddf = dd.from_pandas(df, npartitions=4)\n\n # DataFrame\n assert_eq(ddf.shift(), df.shift())\n assert_eq(ddf.shift(0), df.shift(0))\n assert_eq(ddf.shift(2), df.shift(2))\n assert_eq(ddf.shift(-2), df.shift(-2))\n\n assert_eq(ddf.shift(2, axis=1), df.shift(2, axis=1))\n\n # Series\n assert_eq(ddf.A.shift(), df.A.shift())\n assert_eq(ddf.A.shift(0), df.A.shift(0))\n assert_eq(ddf.A.shift(2), df.A.shift(2))\n assert_eq(ddf.A.shift(-2), df.A.shift(-2))\n\n with pytest.raises(TypeError):\n ddf.shift(1.5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_DatetimeIndex_test_memory_usage.assert_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_shift_with_freq_DatetimeIndex_test_memory_usage.assert_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3933, "end_line": 4128, "span_ids": ["test_hash_split_unique", "test_split_out_value_counts", "test_first_and_last", "test_values", "test_shift_with_freq_errors", "test_copy", "test_del", "test_shift_with_freq_DatetimeIndex", "test_split_out_drop_duplicates", "test_memory_usage", "test_shift_with_freq_PeriodIndex", "test_shift_with_freq_TimedeltaIndex"], "tokens": 2046}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"data_freq,divs1\", [(\"B\", False), (\"D\", True), (\"H\", True)])\ndef test_shift_with_freq_DatetimeIndex(data_freq, divs1):\n df = _compat.makeTimeDataFrame()\n df = df.set_index(_compat.makeDateIndex(30, freq=data_freq))\n ddf = dd.from_pandas(df, npartitions=4)\n for freq, divs2 in [(\"S\", True), (\"W\", False), (pd.Timedelta(10, unit=\"h\"), True)]:\n for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:\n res = d.shift(2, freq=freq)\n assert_eq(res, p.shift(2, freq=freq))\n assert res.known_divisions == divs2\n # Index shifts also work with freq=None\n res = ddf.index.shift(2)\n assert_eq(res, df.index.shift(2))\n assert res.known_divisions == divs1\n\n\n@pytest.mark.parametrize(\"data_freq,divs\", [(\"B\", False), (\"D\", True), (\"H\", True)])\ndef test_shift_with_freq_PeriodIndex(data_freq, divs):\n df = _compat.makeTimeDataFrame()\n # PeriodIndex\n df = df.set_index(pd.period_range(\"2000-01-01\", periods=30, freq=data_freq))\n ddf = dd.from_pandas(df, npartitions=4)\n for d, p in [(ddf, df), (ddf.A, df.A)]:\n res = d.shift(2, freq=data_freq)\n assert_eq(res, p.shift(2, freq=data_freq))\n assert res.known_divisions == divs\n # PeriodIndex.shift doesn't have `freq` parameter\n res = ddf.index.shift(2)\n assert_eq(res, df.index.shift(2))\n assert res.known_divisions == divs\n\n df = _compat.makeTimeDataFrame()\n with pytest.raises(ValueError):\n ddf.index.shift(2, freq=\"D\") # freq keyword not supported\n\n\ndef test_shift_with_freq_TimedeltaIndex():\n df = _compat.makeTimeDataFrame()\n # TimedeltaIndex\n for data_freq in [\"T\", \"D\", \"H\"]:\n df = df.set_index(_compat.makeTimedeltaIndex(30, freq=data_freq))\n ddf = dd.from_pandas(df, npartitions=4)\n for freq in [\"S\", pd.Timedelta(10, unit=\"h\")]:\n for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:\n res = d.shift(2, freq=freq)\n assert_eq(res, p.shift(2, freq=freq))\n assert res.known_divisions\n # Index shifts also work with freq=None\n res = ddf.index.shift(2)\n assert_eq(res, df.index.shift(2))\n assert res.known_divisions\n\n\ndef test_shift_with_freq_errors():\n # Other index types error\n df = _compat.makeDataFrame()\n ddf = dd.from_pandas(df, npartitions=4)\n pytest.raises(NotImplementedError, lambda: ddf.shift(2, freq=\"S\"))\n pytest.raises(NotImplementedError, lambda: ddf.A.shift(2, freq=\"S\"))\n pytest.raises(NotImplementedError, lambda: ddf.index.shift(2))\n\n\n@pytest.mark.parametrize(\"method\", [\"first\", \"last\"])\ndef test_first_and_last(method):\n f = lambda x, offset: getattr(x, method)(offset)\n freqs = [\"12h\", \"D\"]\n offsets = [\"0d\", \"100h\", \"20d\", \"20B\", \"3W\", \"3M\", \"400d\", \"13M\"]\n for freq in freqs:\n index = pd.date_range(\"1/1/2000\", \"1/1/2001\", freq=freq)[::4]\n df = pd.DataFrame(\n np.random.random((len(index), 4)), index=index, columns=[\"A\", \"B\", \"C\", \"D\"]\n )\n ddf = dd.from_pandas(df, npartitions=10)\n for offset in offsets:\n assert_eq(f(ddf, offset), f(df, offset))\n assert_eq(f(ddf.A, offset), f(df.A, offset))\n\n\n@pytest.mark.parametrize(\"npartitions\", [1, 4, 20])\n@pytest.mark.parametrize(\"split_every\", [2, 5])\n@pytest.mark.parametrize(\"split_out\", [None, 1, 5, 20])\ndef test_hash_split_unique(npartitions, split_every, split_out):\n from string import ascii_lowercase\n\n s = pd.Series(np.random.choice(list(ascii_lowercase), 1000, replace=True))\n ds = dd.from_pandas(s, npartitions=npartitions)\n\n dropped = ds.unique(split_every=split_every, split_out=split_out)\n\n dsk = dropped.__dask_optimize__(dropped.dask, dropped.__dask_keys__())\n from dask.core import get_deps\n\n dependencies, dependents = get_deps(dsk)\n\n assert len([k for k, v in dependencies.items() if not v]) == npartitions\n assert dropped.npartitions == (split_out or 1)\n assert sorted(dropped.compute(scheduler=\"sync\")) == sorted(s.unique())\n\n\n@pytest.mark.parametrize(\"split_every\", [None, 2])\ndef test_split_out_drop_duplicates(split_every):\n x = np.concatenate([np.arange(10)] * 100)[:, None]\n y = x.copy()\n z = np.concatenate([np.arange(20)] * 50)[:, None]\n rs = np.random.RandomState(1)\n rs.shuffle(x)\n rs.shuffle(y)\n rs.shuffle(z)\n df = pd.DataFrame(np.concatenate([x, y, z], axis=1), columns=[\"x\", \"y\", \"z\"])\n ddf = dd.from_pandas(df, npartitions=20)\n\n for subset, keep in product([None, [\"x\", \"z\"]], [\"first\", \"last\"]):\n sol = df.drop_duplicates(subset=subset, keep=keep)\n res = ddf.drop_duplicates(\n subset=subset, keep=keep, split_every=split_every, split_out=10\n )\n assert res.npartitions == 10\n assert_eq(sol, res)\n\n\n@pytest.mark.parametrize(\"split_every\", [None, 2])\ndef test_split_out_value_counts(split_every):\n df = pd.DataFrame({\"x\": [1, 2, 3] * 100})\n ddf = dd.from_pandas(df, npartitions=5)\n\n assert ddf.x.value_counts(split_out=10, split_every=split_every).npartitions == 10\n assert_eq(\n ddf.x.value_counts(split_out=10, split_every=split_every), df.x.value_counts()\n )\n\n\ndef test_values():\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n ddf = dd.from_pandas(df, 2)\n\n assert_eq(df.values, ddf.values)\n assert_eq(df.x.values, ddf.x.values)\n assert_eq(df.y.values, ddf.y.values)\n assert_eq(df.index.values, ddf.index.values)\n\n\ndef test_copy():\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n\n a = dd.from_pandas(df, npartitions=2)\n b = a.copy()\n c = a.copy(deep=False)\n\n a[\"y\"] = a.x * 2\n\n assert_eq(b, df)\n assert_eq(c, df)\n\n deep_err = (\n \"The `deep` value must be False. This is strictly a shallow copy \"\n \"of the underlying computational graph.\"\n )\n for deep in [True, None, \"\"]:\n with pytest.raises(ValueError, match=deep_err):\n a.copy(deep=deep)\n\n\ndef test_del():\n df = pd.DataFrame(\n {\"x\": [\"a\", \"b\", \"c\", \"d\"], \"y\": [2, 3, 4, 5]},\n index=pd.Index([1.0, 2.0, 3.0, 4.0], name=\"ind\"),\n )\n a = dd.from_pandas(df, 2)\n b = a.copy()\n\n del a[\"x\"]\n assert_eq(b, df)\n\n del df[\"x\"]\n assert_eq(a, df)\n\n\n@pytest.mark.parametrize(\"index\", [True, False])\n@pytest.mark.parametrize(\"deep\", [True, False])\ndef test_memory_usage(index, deep):\n df = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [1.0, 2.0, 3.0], \"z\": [\"a\", \"b\", \"c\"]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(\n df.memory_usage(index=index, deep=deep),\n ddf.memory_usage(index=index, deep=deep),\n )\n assert (\n df.x.memory_usage(index=index, deep=deep)\n == ddf.x.memory_usage(index=index, deep=deep).compute()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_memory_usage_per_partition_test_with_boundary.tm_assert_frame_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_memory_usage_per_partition_test_with_boundary.tm_assert_frame_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4131, "end_line": 4328, "span_ids": ["test_boundary_slice_empty", "test_with_boundary", "test_memory_usage_per_partition", "test_datetime_loc_open_slicing", "test_boundary_slice_nonmonotonic", "test_to_datetime", "test_dataframe_reductions_arithmetic", "test_isna", "test_to_timedelta", "test_slice_on_filtered_boundary", "test_dataframe_mode"], "tokens": 1942}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"index\", [True, False])\n@pytest.mark.parametrize(\"deep\", [True, False])\ndef test_memory_usage_per_partition(index, deep):\n df = pd.DataFrame(\n {\n \"x\": [1, 2, 3, 4, 5],\n \"y\": [1.0, 2.0, 3.0, 4.0, 5.0],\n \"z\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n }\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n # DataFrame.memory_usage_per_partition\n expected = pd.Series(\n part.compute().memory_usage(index=index, deep=deep).sum()\n for part in ddf.partitions\n )\n result = ddf.memory_usage_per_partition(index=index, deep=deep)\n assert_eq(expected, result)\n\n # Series.memory_usage_per_partition\n expected = pd.Series(\n part.x.compute().memory_usage(index=index, deep=deep) for part in ddf.partitions\n )\n result = ddf.x.memory_usage_per_partition(index=index, deep=deep)\n assert_eq(expected, result)\n\n\n@pytest.mark.parametrize(\n \"reduction\",\n [\n \"sum\",\n \"mean\",\n \"std\",\n \"var\",\n \"count\",\n \"min\",\n \"max\",\n \"idxmin\",\n \"idxmax\",\n \"prod\",\n \"all\",\n \"sem\",\n ],\n)\ndef test_dataframe_reductions_arithmetic(reduction):\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [1.1, 2.2, 3.3, 4.4, 5.5]})\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(\n ddf - (getattr(ddf, reduction)() + 1), df - (getattr(df, reduction)() + 1)\n )\n\n\ndef test_dataframe_mode():\n data = [[\"Tom\", 10, 7], [\"Farahn\", 14, 7], [\"Julie\", 14, 5], [\"Nick\", 10, 10]]\n\n df = pd.DataFrame(data, columns=[\"Name\", \"Num\", \"Num\"])\n ddf = dd.from_pandas(df, npartitions=3)\n\n assert_eq(ddf.mode(), df.mode())\n # name is not preserved in older pandas\n assert_eq(ddf.Name.mode(), df.Name.mode(), check_names=PANDAS_GT_140)\n\n # test empty\n df = pd.DataFrame(columns=[\"a\", \"b\"])\n ddf = dd.from_pandas(df, npartitions=1)\n # check_index=False should be removed once https://github.com/pandas-dev/pandas/issues/33321 is resolved.\n assert_eq(ddf.mode(), df.mode(), check_index=False)\n\n\ndef test_datetime_loc_open_slicing():\n dtRange = pd.date_range(\"01.01.2015\", \"05.05.2015\")\n df = pd.DataFrame(np.random.random((len(dtRange), 2)), index=dtRange)\n ddf = dd.from_pandas(df, npartitions=5)\n assert_eq(df.loc[:\"02.02.2015\"], ddf.loc[:\"02.02.2015\"])\n assert_eq(df.loc[\"02.02.2015\":], ddf.loc[\"02.02.2015\":])\n assert_eq(df[0].loc[:\"02.02.2015\"], ddf[0].loc[:\"02.02.2015\"])\n assert_eq(df[0].loc[\"02.02.2015\":], ddf[0].loc[\"02.02.2015\":])\n\n\ndef test_to_datetime():\n df = pd.DataFrame({\"year\": [2015, 2016], \"month\": [2, 3], \"day\": [4, 5]})\n df.index.name = \"ix\"\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(pd.to_datetime(df), dd.to_datetime(ddf))\n\n s = pd.Series([\"3/11/2000\", \"3/12/2000\", \"3/13/2000\"] * 100)\n s.index = s.values\n ds = dd.from_pandas(s, npartitions=10, sort=False)\n\n assert_eq(\n pd.to_datetime(s, infer_datetime_format=True),\n dd.to_datetime(ds, infer_datetime_format=True),\n )\n assert_eq(\n pd.to_datetime(s.index, infer_datetime_format=True),\n dd.to_datetime(ds.index, infer_datetime_format=True),\n check_divisions=False,\n )\n assert_eq(\n pd.to_datetime(s, utc=True),\n dd.to_datetime(ds, utc=True),\n )\n\n for arg in (\"2021-08-03\", 2021):\n with pytest.raises(NotImplementedError, match=\"non-index-able arguments\"):\n dd.to_datetime(arg)\n\n\ndef test_to_timedelta():\n s = pd.Series(range(10))\n ds = dd.from_pandas(s, npartitions=2)\n\n assert_eq(pd.to_timedelta(s), dd.to_timedelta(ds))\n assert_eq(pd.to_timedelta(s, unit=\"h\"), dd.to_timedelta(ds, unit=\"h\"))\n\n s = pd.Series([1, 2, \"this will error\"])\n ds = dd.from_pandas(s, npartitions=2)\n assert_eq(pd.to_timedelta(s, errors=\"coerce\"), dd.to_timedelta(ds, errors=\"coerce\"))\n\n\n@pytest.mark.parametrize(\"values\", [[np.NaN, 0], [1, 1]])\ndef test_isna(values):\n s = pd.Series(values)\n ds = dd.from_pandas(s, npartitions=2)\n\n assert_eq(pd.isna(s), dd.isna(ds))\n\n\n@pytest.mark.parametrize(\"drop\", [0, 9])\ndef test_slice_on_filtered_boundary(drop):\n # https://github.com/dask/dask/issues/2211\n x = np.arange(10)\n x[[5, 6]] -= 2\n df = pd.DataFrame({\"A\": x, \"B\": np.arange(len(x))})\n pdf = df.set_index(\"A\").query(f\"B != {drop}\")\n ddf = dd.from_pandas(df, 1).set_index(\"A\").query(f\"B != {drop}\")\n\n result = dd.concat([ddf, ddf.rename(columns={\"B\": \"C\"})], axis=1)\n expected = pd.concat([pdf, pdf.rename(columns={\"B\": \"C\"})], axis=1)\n assert_eq(result, expected)\n\n\ndef test_boundary_slice_nonmonotonic():\n x = np.array([-1, -2, 2, 4, 3])\n df = pd.DataFrame({\"B\": range(len(x))}, index=x)\n result = methods.boundary_slice(df, 0, 4)\n expected = df.iloc[2:]\n tm.assert_frame_equal(result, expected)\n\n result = methods.boundary_slice(df, -1, 4)\n expected = df.drop(-2)\n tm.assert_frame_equal(result, expected)\n\n result = methods.boundary_slice(df, -2, 3)\n expected = df.drop(4)\n tm.assert_frame_equal(result, expected)\n\n result = methods.boundary_slice(df, -2, 3.5)\n expected = df.drop(4)\n tm.assert_frame_equal(result, expected)\n\n result = methods.boundary_slice(df, -2, 4)\n expected = df\n tm.assert_frame_equal(result, expected)\n\n\ndef test_boundary_slice_empty():\n df = pd.DataFrame()\n result = methods.boundary_slice(df, 1, 4)\n expected = pd.DataFrame()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n \"start, stop, right_boundary, left_boundary, drop\",\n [\n (-1, None, False, False, [-1, -2]),\n (-1, None, False, True, [-2]),\n (None, 3, False, False, [3, 4]),\n (None, 3, True, False, [4]),\n # Missing keys\n (-0.5, None, False, False, [-1, -2]),\n (-0.5, None, False, True, [-1, -2]),\n (-1.5, None, False, True, [-2]),\n (None, 3.5, False, False, [4]),\n (None, 3.5, True, False, [4]),\n (None, 2.5, False, False, [3, 4]),\n ],\n)\ndef test_with_boundary(start, stop, right_boundary, left_boundary, drop):\n x = np.array([-1, -2, 2, 4, 3])\n df = pd.DataFrame({\"B\": range(len(x))}, index=x)\n result = methods.boundary_slice(df, start, stop, right_boundary, left_boundary)\n expected = df.drop(drop)\n tm.assert_frame_equal(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_error_test_groupby_error.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_error_test_groupby_error.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 103, "end_line": 133, "span_ids": ["test_groupby_error"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_error():\n pdf = pd.DataFrame({\"x\": [0, 1, 2, 3, 4, 6, 7, 8, 9, 10], \"y\": list(\"abcbabbcda\")})\n ddf = dd.from_pandas(pdf, 3)\n\n with pytest.raises(KeyError):\n ddf.groupby(\"A\")\n\n with pytest.raises(KeyError):\n ddf.groupby([\"x\", \"A\"])\n\n dp = ddf.groupby(\"y\")\n\n msg = \"Column not found: \"\n with pytest.raises(KeyError) as err:\n dp[\"A\"]\n assert msg in str(err.value)\n\n msg = \"Columns not found: \"\n with pytest.raises(KeyError) as err:\n dp[[\"x\", \"A\"]]\n assert msg in str(err.value)\n\n msg = (\n \"DataFrameGroupBy does not allow compute method.\"\n \"Please chain it with an aggregation method (like ``.mean()``) or get a \"\n \"specific group using ``.get_group()`` before calling ``compute()``\"\n )\n\n with pytest.raises(NotImplementedError) as err:\n dp.compute()\n assert msg in str(err.value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_cumfunc_with_named_index_test_series_groupby_cumfunc_with_named_index.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_series_groupby_cumfunc_with_named_index_test_series_groupby_cumfunc_with_named_index.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 444, "end_line": 454, "span_ids": ["test_series_groupby_cumfunc_with_named_index"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"npartitions\", (1, 2))\n@pytest.mark.parametrize(\"func\", (\"cumsum\", \"cumprod\", \"cumcount\"))\ndef test_series_groupby_cumfunc_with_named_index(npartitions, func):\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5, 6, 7], \"y\": [8, 9, 6, 2, 3, 5, 6]}\n ).set_index(\"x\")\n ddf = dd.from_pandas(df, npartitions)\n assert ddf.npartitions == npartitions\n expected = getattr(df[\"y\"].groupby(\"x\"), func)()\n result = getattr(ddf[\"y\"].groupby(\"x\"), func)()\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate_dask_test_aggregate_dask.for_spec_in_specs_.for_other_spec_in_specs_.if_isinstance_spec_list_.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_aggregate_dask_test_aggregate_dask.for_spec_in_specs_.for_other_spec_in_specs_.if_isinstance_spec_list_.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 1010, "end_line": 1090, "span_ids": ["test_aggregate_dask"], "tokens": 758}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_aggregate_dask():\n dask_holder = collections.namedtuple(\"dask_holder\", [\"dask\"])\n get_agg_dask = lambda obj: dask_holder(\n {\n k: v\n for (k, v) in obj.dask.items()\n # Skip \"chunk\" tasks, because they include\n # SubgraphCallable object with non-deterministic\n # (uuid-based) function names\n if (k[0].startswith(\"aggregate\") and \"-chunk-\" not in k[0])\n }\n )\n\n specs = [\n {\"b\": {\"c\": \"mean\"}, \"c\": {\"a\": \"max\", \"b\": \"min\"}},\n {\"b\": \"mean\", \"c\": [\"min\", \"max\"]},\n [\n \"sum\",\n \"mean\",\n \"min\",\n \"max\",\n \"count\",\n \"size\",\n ],\n [\n \"std\",\n \"var\",\n \"first\",\n \"last\",\n \"prod\",\n ],\n \"sum\",\n \"mean\",\n \"min\",\n \"max\",\n \"count\",\n \"std\",\n \"var\",\n \"first\",\n \"last\",\n \"prod\"\n # NOTE: the 'size' spec is special since it bypasses aggregate\n # 'size'\n ]\n\n pdf = pd.DataFrame(\n {\n \"a\": [1, 2, 3, 1, 1, 2, 4, 3, 7] * 100,\n \"b\": [4, 2, 7, 3, 3, 1, 1, 1, 2] * 100,\n \"c\": [0, 1, 2, 3, 4, 5, 6, 7, 8] * 100,\n \"d\": [3, 2, 1, 3, 2, 1, 2, 6, 4] * 100,\n },\n columns=[\"c\", \"b\", \"a\", \"d\"],\n )\n ddf = dd.from_pandas(pdf, npartitions=100)\n\n for spec in specs:\n result1 = ddf.groupby([\"a\", \"b\"]).agg(spec, split_every=2)\n result2 = ddf.groupby([\"a\", \"b\"]).agg(spec, split_every=2)\n\n agg_dask1 = get_agg_dask(result1)\n agg_dask2 = get_agg_dask(result2)\n\n # check that the number of partitions used is fixed by split_every\n assert_max_deps(agg_dask1, 2)\n assert_max_deps(agg_dask2, 2)\n\n # check for deterministic key names and values.\n # Require pickle since \"partial\" concat functions\n # used in tree-reduction cannot be compared\n assert pickle.dumps(agg_dask1[0]) == pickle.dumps(agg_dask2[0])\n\n # the length of the dask does not depend on the passed spec\n for other_spec in specs:\n # Note: List-based aggregation specs may result in\n # an extra delayed layer. This is because a \"long\" list\n # arg will be detected in `dask.array.core.normalize_arg`.\n if isinstance(spec, list) == isinstance(other_spec, list):\n other = ddf.groupby([\"a\", \"b\"]).agg(other_spec, split_every=2)\n assert len(other.dask) == len(result1.dask)\n assert len(other.dask) == len(result2.dask)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_within_partition_sorting_test_groupby_shift_within_partition_sorting.for___in_range_10_.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_shift_within_partition_sorting_test_groupby_shift_within_partition_sorting.for___in_range_10_.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2146, "end_line": 2164, "span_ids": ["test_groupby_shift_within_partition_sorting"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"ignore:`meta` is not specified\")\ndef test_groupby_shift_within_partition_sorting():\n # Result is non-deterministic. We run the assertion a few times to keep\n # the probability of false pass low.\n for _ in range(10):\n df = pd.DataFrame(\n {\n \"a\": range(60),\n \"b\": [2, 4, 3, 1] * 15,\n \"c\": [None, 10, 20, None, 30, 40] * 10,\n }\n )\n df = df.set_index(\"a\").sort_index()\n ddf = dd.from_pandas(df, npartitions=6)\n assert_eq(\n df.groupby(\"b\")[\"c\"].shift(1),\n ddf.groupby(\"b\")[\"c\"].shift(1),\n scheduler=\"threads\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multi_index_with_row_operations_test_groupby_multi_index_with_row_operations.assert_eq_expected_actua": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_multi_index_with_row_operations_test_groupby_multi_index_with_row_operations.assert_eq_expected_actua", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2621, "end_line": 2642, "span_ids": ["test_groupby_multi_index_with_row_operations"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"operation\", [\"head\", \"tail\"])\ndef test_groupby_multi_index_with_row_operations(operation):\n df = pd.DataFrame(\n data=[\n [\"a0\", \"b1\"],\n [\"a0\", \"b2\"],\n [\"a1\", \"b1\"],\n [\"a3\", \"b3\"],\n [\"a3\", \"b3\"],\n [\"a5\", \"b5\"],\n [\"a1\", \"b1\"],\n [\"a1\", \"b1\"],\n [\"a1\", \"b1\"],\n ],\n columns=[\"A\", \"B\"],\n )\n\n caller = operator.methodcaller(operation)\n expected = caller(df.groupby([\"A\", df[\"A\"].eq(\"a1\")])[\"B\"])\n ddf = dd.from_pandas(df, npartitions=3)\n actual = caller(ddf.groupby([\"A\", ddf[\"A\"].eq(\"a1\")])[\"B\"])\n assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_iter_fails_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_iter_fails_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 2645, "end_line": 2658, "span_ids": ["test_groupby_iter_fails"], "tokens": 105}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_groupby_iter_fails():\n df = pd.DataFrame(\n data=[\n [\"a0\", \"b1\"],\n [\"a1\", \"b1\"],\n [\"a3\", \"b3\"],\n [\"a5\", \"b5\"],\n ],\n columns=[\"A\", \"B\"],\n )\n ddf = dd.from_pandas(df, npartitions=1)\n with pytest.raises(NotImplementedError, match=\"computing the groups\"):\n list(ddf.groupby(\"A\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_kwargs_test_get_dummies_kwargs.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_get_dummies_kwargs_test_get_dummies_kwargs.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 56, "end_line": 79, "span_ids": ["test_get_dummies_kwargs"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_dummies_kwargs():\n s = pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype=\"category\")\n exp = pd.get_dummies(s, prefix=\"X\", prefix_sep=\"-\")\n\n ds = dd.from_pandas(s, 2)\n res = dd.get_dummies(ds, prefix=\"X\", prefix_sep=\"-\")\n assert_eq(res, exp)\n\n exp = pd.get_dummies(s, drop_first=True)\n res = dd.get_dummies(ds, drop_first=True)\n assert_eq(res, exp)\n\n # nan\n s = pd.Series([1, 1, 1, 2, np.nan, 3, np.nan, 5], dtype=\"category\")\n exp = pd.get_dummies(s)\n\n ds = dd.from_pandas(s, 2)\n res = dd.get_dummies(ds)\n assert_eq(res, exp)\n\n # dummy_na\n exp = pd.get_dummies(s, dummy_na=True)\n res = dd.get_dummies(ds, dummy_na=True)\n assert_eq(res, exp)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_check_pandas_issue_45618_warning_check_pandas_issue_45618_warning.return.decorator": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_check_pandas_issue_45618_warning_check_pandas_issue_45618_warning.return.decorator", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 84, "end_line": 100, "span_ids": ["check_pandas_issue_45618_warning"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_pandas_issue_45618_warning(test_func):\n # Check for FutureWarning raised in `pandas=1.4.0`-only.\n # This can be removed when `pandas=1.4.0` is no longer supported (PANDAS_GT_140).\n # See https://github.com/pandas-dev/pandas/issues/45618 for more details.\n\n def decorator():\n with warnings.catch_warnings(record=True) as record:\n test_func()\n if PANDAS_VERSION == parse_version(\"1.4.0\"):\n assert all(\n \"In a future version, passing a SparseArray\" in str(r.message)\n for r in record\n )\n else:\n assert not record\n\n return decorator", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_firstlast_test_pivot_table_firstlast.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_test_pivot_table_firstlast_test_pivot_table_firstlast.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 213, "end_line": 236, "span_ids": ["test_pivot_table_firstlast"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"values\", [\"B\", [\"D\"], [\"B\", \"D\"]])\n@pytest.mark.parametrize(\"aggfunc\", [\"first\", \"last\"])\ndef test_pivot_table_firstlast(values, aggfunc):\n\n df = pd.DataFrame(\n {\n \"A\": np.random.choice(list(\"XYZ\"), size=100),\n \"B\": np.random.randn(100),\n \"C\": pd.Categorical(np.random.choice(list(\"abc\"), size=100)),\n \"D\": np.random.choice(list(\"abc\"), size=100),\n }\n )\n ddf = dd.from_pandas(df, 5).repartition((0, 20, 40, 60, 80, 98, 99))\n\n res = dd.pivot_table(ddf, index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n exp = pd.pivot_table(df, index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n\n assert_eq(exp, res)\n\n # method\n res = ddf.pivot_table(index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n exp = df.pivot_table(index=\"A\", columns=\"C\", values=values, aggfunc=aggfunc)\n\n assert_eq(exp, res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_by_column_with_narrow_divisions_test_rearrange_by_column_with_narrow_divisions.list_eq_df_a_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_rearrange_by_column_with_narrow_divisions_test_rearrange_by_column_with_narrow_divisions.list_eq_df_a_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 338, "end_line": 345, "span_ids": ["test_rearrange_by_column_with_narrow_divisions"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_rearrange_by_column_with_narrow_divisions():\n from dask.dataframe.tests.test_multi import list_eq\n\n A = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": [1, 1, 2, 2, 3, 4]})\n a = dd.repartition(A, [0, 4, 5])\n\n df = rearrange_by_divisions(a, \"x\", (0, 2, 5))\n list_eq(df, a)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_maybe_buffered_partd_test_maybe_buffered_partd.assert_f4_tempdir_tmp_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_maybe_buffered_partd_test_maybe_buffered_partd.assert_f4_tempdir_tmp_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 348, "end_line": 368, "span_ids": ["test_maybe_buffered_partd"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_maybe_buffered_partd(tmp_path):\n import partd\n\n f = maybe_buffered_partd()\n p1 = f()\n assert isinstance(p1.partd, partd.Buffer)\n f2 = pickle.loads(pickle.dumps(f))\n assert not f2.buffer\n p2 = f2()\n assert isinstance(p2.partd, partd.File)\n\n f3 = maybe_buffered_partd(tempdir=tmp_path)\n p3 = f3()\n assert isinstance(p3.partd, partd.Buffer)\n contents = list(tmp_path.iterdir())\n assert len(contents) == 1\n assert contents[0].suffix == \".partd\"\n assert contents[0].parent == tmp_path\n f4 = pickle.loads(pickle.dumps(f3))\n assert not f4.buffer\n assert f4.tempdir == tmp_path", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_empty_divisions_test_set_index_divisions_2.assert_list_result_comput": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_empty_divisions_test_set_index_divisions_2.assert_list_result_comput", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 391, "end_line": 408, "span_ids": ["test_set_index_with_empty_divisions", "test_set_index_divisions_2"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_with_empty_divisions():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n\n ddf = dd.from_pandas(df, npartitions=2)\n\n # Divisions must not be empty\n with pytest.raises(ValueError):\n ddf.set_index(\"x\", divisions=[])\n\n\ndef test_set_index_divisions_2():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5, 6], \"y\": list(\"abdabd\")})\n ddf = dd.from_pandas(df, 2)\n\n result = ddf.set_index(\"y\", divisions=[\"a\", \"c\", \"d\"])\n assert result.divisions == (\"a\", \"c\", \"d\")\n\n assert list(result.compute(scheduler=\"sync\").index[-2:]) == [\"d\", \"d\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_empty_and_overlap_test_set_index_with_empty_and_overlap.assert_eq_result_expecte": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_set_index_with_empty_and_overlap_test_set_index_with_empty_and_overlap.assert_eq_result_expecte", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 938, "end_line": 952, "span_ids": ["test_set_index_with_empty_and_overlap"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_index_with_empty_and_overlap():\n # https://github.com/dask/dask/issues/8735\n df = pd.DataFrame(\n index=list(range(8)),\n data={\n \"a\": [1, 2, 2, 3, 3, 3, 4, 5],\n \"b\": [1, 1, 0, 0, 0, 1, 0, 0],\n },\n )\n ddf = dd.from_pandas(df, 4)\n result = ddf[ddf.b == 1].set_index(\"a\", sorted=True)\n expected = df[df.b == 1].set_index(\"a\")\n\n assert result.divisions == (1.0, 3.0, 3.0)\n assert_eq(result, expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_current_divisions_nan_partition_test_compute_current_divisions_nan_partition.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_current_divisions_nan_partition_test_compute_current_divisions_nan_partition.None_1", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1186, "end_line": 1199, "span_ids": ["test_compute_current_divisions_nan_partition"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_current_divisions_nan_partition():\n # Compute divisions 1 null partition\n a = d[d.a > 3].sort_values(\"a\")\n divisions = a.compute_current_divisions(\"a\")\n assert divisions == (4, 5, 8, 9)\n a.divisions = divisions\n assert_eq(a, a, check_divisions=False)\n\n # Compute divisions with 0 null partitions\n a = d[d.a > 1].sort_values(\"a\")\n divisions = a.compute_current_divisions(\"a\")\n assert divisions == (2, 4, 7, 9)\n a.divisions = divisions\n assert_eq(a, a, check_divisions=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_current_divisions_overlap_test_compute_current_divisions_overlap.with_pytest_warns_UserWar.assert_len_p_for_p_in_b": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_current_divisions_overlap_test_compute_current_divisions_overlap.with_pytest_warns_UserWar.assert_len_p_for_p_in_b", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1202, "end_line": 1209, "span_ids": ["test_compute_current_divisions_overlap"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_current_divisions_overlap():\n A = pd.DataFrame({\"key\": [1, 2, 3, 4, 4, 5, 6, 7], \"value\": list(\"abcd\" * 2)})\n a = dd.from_pandas(A, npartitions=2)\n with pytest.warns(UserWarning, match=\"Partitions have overlapping values\"):\n divisions = a.compute_current_divisions(\"key\")\n b = a.set_index(\"key\", divisions=divisions)\n assert b.divisions == (1, 4, 7)\n assert [len(p) for p in b.partitions] == [3, 5]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_current_divisions_overlap_2_test_compute_current_divisions_overlap_2.with_pytest_warns_UserWar.ddf2_compute_current_divi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_compute_current_divisions_overlap_2_test_compute_current_divisions_overlap_2.with_pytest_warns_UserWar.ddf2_compute_current_divi", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1212, "end_line": 1222, "span_ids": ["test_compute_current_divisions_overlap_2"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_current_divisions_overlap_2():\n data = pd.DataFrame(\n index=pd.Index(\n [\"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"C\"],\n name=\"index\",\n )\n )\n ddf1 = dd.from_pandas(data, npartitions=2)\n ddf2 = ddf1.clear_divisions().repartition(8)\n with pytest.warns(UserWarning, match=\"Partitions have overlapping values\"):\n ddf2.compute_current_divisions()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_sort_values_single_partition_test_sort_values_single_partition.dd_assert_eq_got_expect_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_sort_values_single_partition_test_sort_values_single_partition.dd_assert_eq_got_expect_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1316, "end_line": 1330, "span_ids": ["test_sort_values_single_partition"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ascending\", [True, False, [False, True], [True, False]])\n@pytest.mark.parametrize(\"by\", [[\"a\", \"b\"], [\"b\", \"a\"]])\n@pytest.mark.parametrize(\"nelem\", [10, 500])\ndef test_sort_values_single_partition(nelem, by, ascending):\n np.random.seed(0)\n df = pd.DataFrame()\n df[\"a\"] = np.ascontiguousarray(np.arange(nelem)[::-1])\n df[\"b\"] = np.arange(100, nelem + 100)\n ddf = dd.from_pandas(df, npartitions=1)\n\n # run on single-threaded scheduler for debugging purposes\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(by=by, ascending=ascending)\n expect = df.sort_values(by=by, ascending=ascending)\n dd.assert_eq(got, expect, check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_values_raises_test_noop.assert__noop_test_None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_shuffle.py_test_shuffle_values_raises_test_noop.assert__noop_test_None", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "file_type": "text/x-python", "category": "test", "start_line": 1361, "end_line": 1381, "span_ids": ["test_noop", "test_shuffle_values_raises", "test_shuffle_by_as_list"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_shuffle_values_raises():\n df = pd.DataFrame({\"a\": [1, 3, 2]})\n ddf = dd.from_pandas(df, npartitions=3)\n with pytest.raises(\n ValueError, match=\"na_position must be either 'first' or 'last'\"\n ):\n ddf.sort_values(by=\"a\", na_position=\"invalid\")\n\n\ndef test_shuffle_by_as_list():\n df = pd.DataFrame({\"a\": [1, 3, 2]})\n ddf = dd.from_pandas(df, npartitions=3)\n with dask.config.set(scheduler=\"single-threaded\"):\n got = ddf.sort_values(by=[\"a\"], npartitions=\"auto\", ascending=True)\n expect = pd.DataFrame({\"a\": [1, 2, 3]})\n dd.assert_eq(got, expect, check_index=False)\n\n\ndef test_noop():\n assert _noop(1, None) == 1\n assert _noop(\"test\", None) == \"test\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_make_meta_test_make_meta.meta_15.make_meta_1_0_parent_met": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_make_meta_test_make_meta.meta_15.make_meta_1_0_parent_met", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 40, "end_line": 140, "span_ids": ["test_make_meta"], "tokens": 864}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_make_meta():\n df = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": list(\"abc\"), \"c\": [1.0, 2.0, 3.0]}, index=[10, 20, 30]\n )\n\n # Pandas dataframe\n meta = make_meta(df)\n assert len(meta) == 0\n assert (meta.dtypes == df.dtypes).all()\n assert isinstance(meta.index, type(df.index))\n\n # Pandas series\n meta = make_meta(df.a)\n assert len(meta) == 0\n assert meta.dtype == df.a.dtype\n assert isinstance(meta.index, type(df.index))\n\n # Pandas index\n meta = make_meta(df.index)\n assert isinstance(meta, type(df.index))\n assert len(meta) == 0\n\n # Dask object\n ddf = dd.from_pandas(df, npartitions=2)\n assert make_meta(ddf) is ddf._meta\n\n # Dict\n meta = make_meta({\"a\": \"i8\", \"b\": \"O\", \"c\": \"f8\"})\n assert isinstance(meta, pd.DataFrame)\n assert len(meta) == 0\n assert (meta.dtypes == df.dtypes).all()\n assert isinstance(meta.index, pd.RangeIndex)\n\n # List\n meta = make_meta([(\"a\", \"i8\"), (\"c\", \"f8\"), (\"b\", \"O\")])\n assert (meta.columns == [\"a\", \"c\", \"b\"]).all()\n assert len(meta) == 0\n assert (meta.dtypes == df.dtypes[meta.dtypes.index]).all()\n assert isinstance(meta.index, pd.RangeIndex)\n\n # Tuple\n meta = make_meta((\"a\", \"i8\"))\n assert isinstance(meta, pd.Series)\n assert len(meta) == 0\n assert meta.dtype == \"i8\"\n assert meta.name == \"a\"\n\n # Iterable\n class CustomMetadata(Iterable):\n \"\"\"Custom class iterator returning pandas types.\"\"\"\n\n def __init__(self, max=0):\n self.types = [(\"a\", \"i8\"), (\"c\", \"f8\"), (\"b\", \"O\")]\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self.types):\n ret = self.types[self.n]\n self.n += 1\n return ret\n else:\n raise StopIteration\n\n meta = make_meta(CustomMetadata())\n assert (meta.columns == [\"a\", \"c\", \"b\"]).all()\n assert len(meta) == 0\n assert (meta.dtypes == df.dtypes[meta.dtypes.index]).all()\n assert isinstance(meta.index, pd.RangeIndex)\n\n # With index\n idx = pd.Index([1, 2], name=\"foo\")\n meta = make_meta(\n {\"a\": \"i8\", \"b\": \"i4\"},\n index=idx,\n )\n assert type(meta.index) is type(idx)\n assert meta.index.dtype == \"int64\"\n assert len(meta.index) == 0\n\n meta = make_meta((\"a\", \"i8\"), index=idx)\n assert type(meta.index) is type(idx)\n assert meta.index.dtype == \"int64\"\n assert len(meta.index) == 0\n\n # Categoricals\n meta = make_meta({\"a\": \"category\"}, parent_meta=df)\n assert len(meta.a.cat.categories) == 1\n assert meta.a.cat.categories[0] == UNKNOWN_CATEGORIES\n meta = make_meta((\"a\", \"category\"), parent_meta=df)\n assert len(meta.cat.categories) == 1\n assert meta.cat.categories[0] == UNKNOWN_CATEGORIES\n\n # Numpy scalar\n meta = make_meta(np.float64(1.0), parent_meta=df)\n assert isinstance(meta, np.float64)\n\n # Python scalar\n meta = make_meta(1.0, parent_meta=df)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_make_meta.None_36_test_make_meta.assert_pytest_raises_Type": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_utils_dataframe.py_test_make_meta.None_36_test_make_meta.assert_pytest_raises_Type", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_utils_dataframe.py", "file_name": "test_utils_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 141, "end_line": 160, "span_ids": ["test_make_meta"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_make_meta():\n # ... other code\n assert isinstance(meta, np.float64)\n\n # Timestamp\n x = pd.Timestamp(2000, 1, 1)\n meta = make_meta(x, parent_meta=df)\n assert meta is x\n\n # DatetimeTZDtype\n x = pd.DatetimeTZDtype(tz=\"UTC\")\n meta = make_meta(x)\n assert meta == pd.Timestamp(1, tz=x.tz, unit=x.unit)\n\n # Dtype expressions\n meta = make_meta(\"i8\", parent_meta=df)\n assert isinstance(meta, np.int64)\n meta = make_meta(float, parent_meta=df)\n assert isinstance(meta, np.dtype(float).type)\n meta = make_meta(np.dtype(\"bool\"), parent_meta=df)\n assert isinstance(meta, np.bool_)\n assert pytest.raises(TypeError, lambda: make_meta(None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_np_getnanos.try_.except_ValueError_.return.None": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py_np_getnanos.try_.except_ValueError_.return.None", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 18, "span_ids": ["imports", "getnanos"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import numpy as np\nimport pandas as pd\nfrom pandas.core.resample import Resampler as pd_Resampler\n\nfrom ...base import tokenize\nfrom ...highlevelgraph import HighLevelGraph\nfrom ...utils import _deprecated, derived_from\nfrom .. import methods\nfrom .._compat import PANDAS_GT_140\nfrom ..core import DataFrame, Series\n\n\n@_deprecated(after_version=\"2022.02.0\")\ndef getnanos(rule):\n try:\n return getattr(rule, \"nanos\", None)\n except ValueError:\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py__resample_series__resample_series.return.out_reindex_new_index_fi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tseries/resample.py__resample_series__resample_series.return.out_reindex_new_index_fi", "embedding": null, "metadata": {"file_path": "dask/dataframe/tseries/resample.py", "file_name": "resample.py", "file_type": "text/x-python", "category": "implementation", "start_line": 21, "end_line": 61, "span_ids": ["_resample_series"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _resample_series(\n series,\n start,\n end,\n reindex_closed,\n rule,\n resample_kwargs,\n how,\n fill_value,\n how_args,\n how_kwargs,\n):\n out = getattr(series.resample(rule, **resample_kwargs), how)(\n *how_args, **how_kwargs\n )\n\n if PANDAS_GT_140:\n if reindex_closed is None:\n inclusive = \"both\"\n else:\n inclusive = reindex_closed\n closed_kwargs = {\"inclusive\": inclusive}\n else:\n closed_kwargs = {\"closed\": reindex_closed}\n\n new_index = pd.date_range(\n start.tz_localize(None),\n end.tz_localize(None),\n freq=rule,\n **closed_kwargs,\n name=out.index.name,\n ).tz_localize(start.tz, nonexistent=\"shift_forward\")\n\n if not out.index.isin(new_index).all():\n raise ValueError(\n \"Index is not contained within new index. This can often be \"\n \"resolved by using larger partitions, or unambiguous \"\n \"frequencies: 'Q', 'A'...\"\n )\n\n return out.reindex(new_index, fill_value=fill_value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_from___future___import_an_is_float_na_dtype.return.isinstance_dtype_types_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/utils.py_from___future___import_an_is_float_na_dtype.return.isinstance_dtype_types_", "embedding": null, "metadata": {"file_path": "dask/dataframe/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 65, "span_ids": ["imports", "is_float_na_dtype", "is_integer_na_dtype"], "tokens": 452}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport math\nimport re\nimport sys\nimport textwrap\nimport traceback\nfrom collections.abc import Iterator, Mapping\nfrom contextlib import contextmanager\nfrom numbers import Number\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_scalar # noqa: F401\nfrom pandas.api.types import is_categorical_dtype, is_dtype_equal\n\nfrom ..base import get_scheduler, is_dask_collection\nfrom ..core import get_deps\nfrom ..utils import is_arraylike # noqa: F401\nfrom ..utils import asciitable\nfrom ..utils import is_dataframe_like as dask_is_dataframe_like\nfrom ..utils import is_index_like as dask_is_index_like\nfrom ..utils import is_series_like as dask_is_series_like\nfrom ..utils import typename\nfrom . import _dtypes # noqa: F401 register pandas extension types\nfrom . import methods\nfrom ._compat import PANDAS_GT_110, PANDAS_GT_120, tm # noqa: F401\nfrom .dispatch import make_meta # noqa : F401\nfrom .dispatch import make_meta_obj, meta_nonempty # noqa : F401\nfrom .extensions import make_scalar\n\nmeta_object_types: tuple[type, ...] = (pd.Series, pd.DataFrame, pd.Index, pd.MultiIndex)\ntry:\n import scipy.sparse as sp\n\n meta_object_types += (sp.spmatrix,)\nexcept ImportError:\n pass\n\n\ndef is_integer_na_dtype(t):\n dtype = getattr(t, \"dtype\", t)\n types = (\n pd.Int8Dtype,\n pd.Int16Dtype,\n pd.Int32Dtype,\n pd.Int64Dtype,\n pd.UInt8Dtype,\n pd.UInt16Dtype,\n pd.UInt32Dtype,\n pd.UInt64Dtype,\n )\n return isinstance(dtype, types)\n\n\ndef is_float_na_dtype(t):\n if not PANDAS_GT_120:\n return False\n\n dtype = getattr(t, \"dtype\", t)\n types = (\n pd.Float32Dtype,\n pd.Float64Dtype,\n )\n return isinstance(dtype, types)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_contextlib_dsk2._a_1_b_2_c_sl": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_contextlib_dsk2._a_1_b_2_c_sl", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 27, "span_ids": ["imports"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import contextlib\nimport os\nimport warnings\nfrom operator import add, mul\n\nimport pytest\n\nfrom dask.diagnostics import CacheProfiler, Profiler, ResourceProfiler\nfrom dask.diagnostics.profile_visualize import BOKEH_VERSION\nfrom dask.threaded import get\nfrom dask.utils import apply, tmpfile\nfrom dask.utils_test import slowadd\n\ntry:\n import bokeh\nexcept ImportError:\n bokeh = None\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\n\nprof = Profiler()\n\ndsk = {\"a\": 1, \"b\": 2, \"c\": (add, \"a\", \"b\"), \"d\": (mul, \"a\", \"b\"), \"e\": (mul, \"c\", \"d\")}\ndsk2 = {\"a\": 1, \"b\": 2, \"c\": (slowadd, \"a\", \"b\")}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__Tools_to_modify_alread_T.TypeVar_T_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py__Tools_to_modify_alread_T.TypeVar_T_", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 26, "span_ids": ["docstring"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Tools to modify already existing dask graphs. Unlike in :mod:`dask.optimization`, the\noutput collections produced by this module are typically not functionally equivalent to\ntheir inputs.\n\"\"\"\nfrom __future__ import annotations\n\nimport uuid\nfrom collections.abc import Callable, Hashable\nfrom typing import Callable, Hashable, Literal, TypeVar\n\nfrom .base import (\n clone_key,\n get_collection_names,\n get_name_from_key,\n replace_name_in_key,\n tokenize,\n unpack_collections,\n)\nfrom .blockwise import blockwise\nfrom .core import flatten\nfrom .delayed import Delayed, delayed\nfrom .highlevelgraph import HighLevelGraph, Layer, MaterializedLayer\n\n__all__ = (\"bind\", \"checkpoint\", \"clone\", \"wait_on\")\n\nT = TypeVar(\"T\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_bind_bind._parents_None_is_a_speci": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_bind_bind._parents_None_is_a_speci", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 204, "end_line": 283, "span_ids": ["bind"], "tokens": 736}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bind(\n children: T,\n parents,\n *,\n omit=None,\n seed: Hashable = None,\n assume_layers: bool = True,\n split_every: float | Literal[False] | None = None,\n) -> T:\n \"\"\"\n Make ``children`` collection(s), optionally omitting sub-collections, dependent on\n ``parents`` collection(s). Two examples follow.\n\n The first example creates an array ``b2`` whose computation first computes an array\n ``a`` completely and then computes ``b`` completely, recomputing ``a`` in the\n process:\n\n >>> import dask\n >>> import dask.array as da\n >>> a = da.ones(4, chunks=2)\n >>> b = a + 1\n >>> b2 = bind(b, a)\n >>> len(b2.dask)\n 9\n >>> b2.compute()\n array([2., 2., 2., 2.])\n\n The second example creates arrays ``b3`` and ``c3``, whose computation first\n computes an array ``a`` and then computes the additions, this time not\n recomputing ``a`` in the process:\n\n >>> c = a + 2\n >>> b3, c3 = bind((b, c), a, omit=a)\n >>> len(b3.dask), len(c3.dask)\n (7, 7)\n >>> dask.compute(b3, c3)\n (array([2., 2., 2., 2.]), array([3., 3., 3., 3.]))\n\n Parameters\n ----------\n children\n Dask collection or nested structure of Dask collections\n parents\n Dask collection or nested structure of Dask collections\n omit\n Dask collection or nested structure of Dask collections\n seed\n Hashable used to seed the key regeneration. Omit to default to a random number\n that will produce different keys at every call.\n assume_layers\n True\n Use a fast algorithm that works at layer level, which assumes that all\n collections in ``children`` and ``omit``\n\n #. use :class:`~dask.highlevelgraph.HighLevelGraph`,\n #. define the ``__dask_layers__()`` method, and\n #. never had their graphs squashed and rebuilt between the creation of the\n ``omit`` collections and the ``children`` collections; in other words if\n the keys of the ``omit`` collections can be found among the keys of the\n ``children`` collections, then the same must also hold true for the\n layers.\n False\n Use a slower algorithm that works at keys level, which makes none of the\n above assumptions.\n split_every\n See :func:`checkpoint`\n\n Returns\n -------\n Same as ``children``\n Dask collection or structure of dask collection equivalent to ``children``,\n which compute to the same values. All keys of ``children`` will be regenerated,\n up to and excluding the keys of ``omit``. Nodes immediately above ``omit``, or\n the leaf nodes if the collections in ``omit`` are not found, are prevented from\n computing until all collections in ``parents`` have been fully computed.\n \"\"\"\n if seed is None:\n seed = uuid.uuid4().bytes\n\n # parents=None is a special case invoked by the one-liner wrapper clone() below\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_bind.blocker_bind.return.repack_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/graph_manipulation.py_bind.blocker_bind.return.repack_", "embedding": null, "metadata": {"file_path": "dask/graph_manipulation.py", "file_name": "graph_manipulation.py", "file_type": "text/x-python", "category": "implementation", "start_line": 284, "end_line": 304, "span_ids": ["bind"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def bind(\n children: T,\n parents,\n *,\n omit=None,\n seed: Hashable = None,\n assume_layers: bool = True,\n split_every: float | Literal[False] | None = None,\n) -> T:\n # ... other code\n blocker = (\n checkpoint(parents, split_every=split_every) if parents is not None else None\n )\n\n omit, _ = unpack_collections(omit)\n if assume_layers:\n # Set of all the top-level layers of the collections in omit\n omit_layers = {layer for coll in omit for layer in coll.__dask_layers__()}\n omit_keys = set()\n else:\n omit_layers = set()\n # Set of *all* the keys, not just the top-level ones, of the collections in omit\n omit_keys = {key for coll in omit for key in coll.__dask_graph__()}\n\n unpacked_children, repack = unpack_collections(children)\n return repack(\n [\n _bind_one(child, blocker, omit_layers, omit_keys, seed)\n for child in unpacked_children\n ]\n )[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_from___future___import_an_compute_layer_dependencies.return.ret": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_from___future___import_an_compute_layer_dependencies.return.ret", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 33, "span_ids": ["imports", "compute_layer_dependencies"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport abc\nimport copy\nimport html\nfrom collections.abc import Hashable, Iterable, Mapping, MutableMapping, Set\nfrom typing import Any\n\nimport tlz as toolz\n\nfrom . import config\nfrom .base import clone_key, flatten, is_dask_collection\nfrom .core import keys_in_tasks, reverse_dict\nfrom .utils import ensure_dict, key_split, stringify\nfrom .utils_test import add, inc # noqa: F401\nfrom .widgets import get_template\n\n\ndef compute_layer_dependencies(layers):\n \"\"\"Returns the dependencies between layers\"\"\"\n\n def _find_layer_containing_key(key):\n for k, v in layers.items():\n if key in v:\n return k\n raise RuntimeError(f\"{repr(key)} not found\")\n\n all_keys = {key for layer in layers.values() for key in layer}\n ret = {k: set() for k in layers}\n for k, v in layers.items():\n for key in keys_in_tasks(all_keys - v.keys(), v.values()):\n ret[k].add(_find_layer_containing_key(key))\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_pack___Layer.__dask_distributed_pack__.dependencies_update_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_pack___Layer.__dask_distributed_pack__.dependencies_update_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 317, "end_line": 409, "span_ids": ["Layer.__dask_distributed_pack__"], "tokens": 808}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n def __dask_distributed_pack__(\n self,\n all_hlg_keys: Iterable[Hashable],\n known_key_dependencies: Mapping[Hashable, set],\n client,\n client_keys: Iterable[Hashable],\n ) -> Any:\n \"\"\"Pack the layer for scheduler communication in Distributed\n\n This method should pack its current state and is called by the Client when\n communicating with the Scheduler.\n The Scheduler will then use .__dask_distributed_unpack__(data, ...) to unpack\n the state, materialize the layer, and merge it into the global task graph.\n\n The returned state must be compatible with Distributed's scheduler, which\n means it must obey the following:\n - Serializable by msgpack (notice, msgpack converts lists to tuples)\n - All remote data must be unpacked (see unpack_remotedata())\n - All keys must be converted to strings now or when unpacking\n - All tasks must be serialized (see dumps_task())\n\n The default implementation materialize the layer thus layers such as Blockwise\n and ShuffleLayer should implement a specialized pack and unpack function in\n order to avoid materialization.\n\n Parameters\n ----------\n all_hlg_keys: Iterable[Hashable]\n All keys in the high level graph\n known_key_dependencies: Mapping[Hashable, set]\n Already known dependencies\n client: distributed.Client\n The client calling this function.\n client_keys : Iterable[Hashable]\n List of keys requested by the client.\n\n Returns\n -------\n state: Object serializable by msgpack\n Scheduler compatible state of the layer\n \"\"\"\n from distributed.client import Future\n from distributed.utils import CancelledError\n from distributed.utils_comm import subs_multiple, unpack_remotedata\n from distributed.worker import dumps_task\n\n dsk = dict(self)\n\n # Find aliases not in `client_keys` and substitute all matching keys\n # with its Future\n future_aliases = {\n k: v\n for k, v in dsk.items()\n if isinstance(v, Future) and k not in client_keys\n }\n if future_aliases:\n dsk = subs_multiple(dsk, future_aliases)\n\n # Remove `Future` objects from graph and note any future dependencies\n dsk2 = {}\n fut_deps = {}\n for k, v in dsk.items():\n dsk2[k], futs = unpack_remotedata(v, byte_keys=True)\n if futs:\n fut_deps[k] = futs\n dsk = dsk2\n\n # Check that any collected futures are valid\n unpacked_futures = set.union(*fut_deps.values()) if fut_deps else set()\n for future in unpacked_futures:\n if future.client is not client:\n raise ValueError(\n \"Inputs contain futures that were created by another client.\"\n )\n if stringify(future.key) not in client.futures:\n raise CancelledError(stringify(future.key))\n\n # Calculate dependencies without re-calculating already known dependencies\n # - Start with known dependencies\n dependencies = known_key_dependencies.copy()\n # - Remove aliases for any tasks that depend on both an alias and a future.\n # These can only be found in the known_key_dependencies cache, since\n # any dependencies computed in this method would have already had the\n # aliases removed.\n if future_aliases:\n alias_keys = set(future_aliases)\n dependencies = {k: v - alias_keys for k, v in dependencies.items()}\n # - Add in deps for any missing keys\n missing_keys = dsk.keys() - dependencies.keys()\n dependencies.update(\n (k, keys_in_tasks(all_hlg_keys, [dsk[k]], as_list=False))\n for k in missing_keys\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_pack__._Add_in_deps_for_any_t_Layer.__dask_distributed_pack__.return._dsk_dsk_dependencie": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_Layer.__dask_distributed_pack__._Add_in_deps_for_any_t_Layer.__dask_distributed_pack__.return._dsk_dsk_dependencie", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 410, "end_line": 426, "span_ids": ["Layer.__dask_distributed_pack__"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Layer(Mapping):\n\n def __dask_distributed_pack__(\n self,\n all_hlg_keys: Iterable[Hashable],\n known_key_dependencies: Mapping[Hashable, set],\n client,\n client_keys: Iterable[Hashable],\n ) -> Any:\n # - Add in deps for any tasks that depend on futures\n for k, futures in fut_deps.items():\n dependencies[k].update(f.key for f in futures)\n\n # The scheduler expect all keys to be strings\n dependencies = {\n stringify(k): {stringify(dep) for dep in deps}\n for k, deps in dependencies.items()\n }\n\n merged_hlg_keys = all_hlg_keys | dsk.keys()\n dsk = {\n stringify(k): stringify(v, exclusive=merged_hlg_keys)\n for k, v in dsk.items()\n }\n dsk = toolz.valmap(dumps_task, dsk)\n return {\"dsk\": dsk, \"dependencies\": dependencies}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__len___HighLevelGraph.keys.return.self_to_dict_keys_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/highlevelgraph.py_HighLevelGraph.__len___HighLevelGraph.keys.return.self_to_dict_keys_", "embedding": null, "metadata": {"file_path": "dask/highlevelgraph.py", "file_name": "highlevelgraph.py", "file_type": "text/x-python", "category": "implementation", "start_line": 734, "end_line": 759, "span_ids": ["HighLevelGraph.__len__", "HighLevelGraph.keys", "HighLevelGraph.__iter__", "HighLevelGraph.to_dict"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HighLevelGraph(Mapping):\n\n def __len__(self) -> int:\n # NOTE: this will double-count keys that are duplicated between layers, so it's\n # possible that `len(hlg) > len(hlg.to_dict())`. However, duplicate keys should\n # not occur through normal use, and their existence would usually be a bug.\n # So we ignore this case in favor of better performance.\n # https://github.com/dask/dask/issues/7271\n return sum(len(layer) for layer in self.layers.values())\n\n def __iter__(self):\n return iter(self.to_dict())\n\n def to_dict(self) -> dict:\n \"\"\"Efficiently convert to plain dict. This method is faster than dict(self).\"\"\"\n try:\n return self._to_dict\n except AttributeError:\n out = self._to_dict = ensure_dict(self)\n return out\n\n def keys(self) -> Set:\n \"\"\"Get all keys of all the layers.\n\n This will in many cases materialize layers, which makes it a relatively\n expensive operation. See :meth:`get_all_external_keys` for a faster alternative.\n \"\"\"\n return self.to_dict().keys()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_from___future___import_an_CallableLazyImport.__call__.return.import_term_self_function": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_from___future___import_an_CallableLazyImport.__call__.return.import_term_self_function", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 46, "span_ids": ["imports", "CallableLazyImport.__init__", "CallableLazyImport", "CallableLazyImport.__call__"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport functools\nimport math\nimport operator\nfrom collections import defaultdict\nfrom itertools import product\nfrom typing import Any\n\nimport tlz as toolz\nfrom tlz.curried import map\n\nfrom .base import tokenize\nfrom .blockwise import Blockwise, BlockwiseDep, BlockwiseDepDict, blockwise_token\nfrom .core import flatten, keys_in_tasks\nfrom .highlevelgraph import Layer\nfrom .utils import (\n apply,\n cached_cumsum,\n concrete,\n insert,\n stringify,\n stringify_collection_keys,\n)\n\n#\n##\n### General Utilities\n##\n#\n\n\nclass CallableLazyImport:\n \"\"\"Function Wrapper for Lazy Importing.\n\n This Class should only be used when materializing a graph\n on a distributed scheduler.\n \"\"\"\n\n def __init__(self, function_path):\n self.function_path = function_path\n\n def __call__(self, *args, **kwargs):\n from distributed.utils import import_term\n\n return import_term(self.function_path)(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_None_5_ArrayBlockwiseDep.__dask_distributed_unpack__.return.cls_state_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_None_5_ArrayBlockwiseDep.__dask_distributed_unpack__.return.cls_state_", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 49, "end_line": 81, "span_ids": ["ArrayBlockwiseDep.__dask_distributed_unpack__", "ArrayBlockwiseDep.__init__", "ArrayBlockwiseDep", "CallableLazyImport.__call__", "ArrayBlockwiseDep.__dask_distributed_pack__", "ArrayBlockwiseDep.__getitem__"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#\n##\n### Array Layers & Utilities\n##\n#\n\n\nclass ArrayBlockwiseDep(BlockwiseDep):\n \"\"\"\n Blockwise dep for array-likes, which only needs chunking\n information to compute its data.\n \"\"\"\n\n chunks: tuple[tuple[int, ...], ...]\n numblocks: tuple[int, ...]\n produces_tasks: bool = False\n\n def __init__(self, chunks: tuple[tuple[int, ...], ...]):\n self.chunks = chunks\n self.numblocks = tuple(len(chunk) for chunk in chunks)\n self.produces_tasks = False\n\n def __getitem__(self, idx: tuple[int, ...]):\n raise NotImplementedError(\"Subclasses must implement __getitem__\")\n\n def __dask_distributed_pack__(\n self, required_indices: list[tuple[int, ...]] | None = None\n ):\n return {\"chunks\": self.chunks}\n\n @classmethod\n def __dask_distributed_unpack__(cls, state):\n return cls(**state)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ArrayChunkShapeDep_ArraySliceDep.__getitem__.return.tuple_slice_s_None_for": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_ArrayChunkShapeDep_ArraySliceDep.__getitem__.return.tuple_slice_s_None_for", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 84, "end_line": 102, "span_ids": ["ArraySliceDep.__init__", "ArraySliceDep", "ArraySliceDep.__getitem__", "ArrayChunkShapeDep", "ArrayChunkShapeDep.__getitem__"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrayChunkShapeDep(ArrayBlockwiseDep):\n \"\"\"Produce chunk shapes given a chunk index\"\"\"\n\n def __getitem__(self, idx: tuple[int, ...]):\n return tuple(chunk[i] for i, chunk in zip(idx, self.chunks))\n\n\nclass ArraySliceDep(ArrayBlockwiseDep):\n \"\"\"Produce slice(s) into the full-sized array given a chunk index\"\"\"\n\n starts: tuple[tuple[int, ...], ...]\n\n def __init__(self, chunks: tuple[tuple[int, ...], ...]):\n super().__init__(chunks)\n self.starts = tuple(cached_cumsum(c, initial_zero=True) for c in chunks)\n\n def __getitem__(self, idx: tuple):\n loc = tuple((start[i], start[i + 1]) for i, start in zip(idx, self.starts))\n return tuple(slice(*s, None) for s in loc)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_None_10_SimpleShuffleLayer.__len__.return.len_self__dict_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_None_10_SimpleShuffleLayer.__len__.return.len_self__dict_", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 339, "end_line": 453, "span_ids": ["SimpleShuffleLayer._dict", "SimpleShuffleLayer.get_output_keys", "SimpleShuffleLayer.__iter__", "fractional_slice", "SimpleShuffleLayer.__getitem__", "SimpleShuffleLayer.__len__", "SimpleShuffleLayer.get_split_keys", "SimpleShuffleLayer.__repr__", "SimpleShuffleLayer", "SimpleShuffleLayer.__init__", "SimpleShuffleLayer.is_materialized"], "tokens": 829}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#\n##\n### DataFrame Layers & Utilities\n##\n#\n\n\nclass SimpleShuffleLayer(Layer):\n \"\"\"Simple HighLevelGraph Shuffle layer\n\n High-level graph layer for a simple shuffle operation in which\n each output partition depends on all input partitions.\n\n Parameters\n ----------\n name : str\n Name of new shuffled output collection.\n column : str or list of str\n Column(s) to be used to map rows to output partitions (by hashing).\n npartitions : int\n Number of output partitions.\n npartitions_input : int\n Number of partitions in the original (un-shuffled) DataFrame.\n ignore_index: bool, default False\n Ignore index during shuffle. If ``True``, performance may improve,\n but index values will not be preserved.\n name_input : str\n Name of input collection.\n meta_input : pd.DataFrame-like object\n Empty metadata of input collection.\n parts_out : list of int (optional)\n List of required output-partition indices.\n annotations : dict (optional)\n Layer annotations\n \"\"\"\n\n def __init__(\n self,\n name,\n column,\n npartitions,\n npartitions_input,\n ignore_index,\n name_input,\n meta_input,\n parts_out=None,\n annotations=None,\n ):\n super().__init__(annotations=annotations)\n self.name = name\n self.column = column\n self.npartitions = npartitions\n self.npartitions_input = npartitions_input\n self.ignore_index = ignore_index\n self.name_input = name_input\n self.meta_input = meta_input\n self.parts_out = parts_out or range(npartitions)\n self.split_name = \"split-\" + self.name\n\n # The scheduling policy of Dask is generally depth-first,\n # which works great in most cases. However, in case of shuffle,\n # it increases the memory usage significantly. This is because\n # depth-first delays the freeing of the result of `shuffle_group()`\n # until the end of the shuffling.\n #\n # We address this by manually setting a high \"prioroty\" to the\n # `getitem()` (\"split\") tasks, using annotations. This forces a\n # breadth-first scheduling of the tasks tath directly depend on\n # the `shuffle_group()` output, allowing that data to be freed\n # much earlier.\n #\n # See https://github.com/dask/dask/pull/6051 for a detailed discussion.\n self.annotations = self.annotations or {}\n if \"priority\" not in self.annotations:\n self.annotations[\"priority\"] = {}\n self.annotations[\"priority\"][\"__expanded_annotations__\"] = None\n self.annotations[\"priority\"].update({_key: 1 for _key in self.get_split_keys()})\n\n def get_split_keys(self):\n # Return SimpleShuffleLayer \"split\" keys\n return [\n stringify((self.split_name, part_out, part_in))\n for part_in in range(self.npartitions_input)\n for part_out in self.parts_out\n ]\n\n def get_output_keys(self):\n return {(self.name, part) for part in self.parts_out}\n\n def __repr__(self):\n return \"SimpleShuffleLayer\".format(\n self.name, self.npartitions\n )\n\n def is_materialized(self):\n return hasattr(self, \"_cached_dict\")\n\n @property\n def _dict(self):\n \"\"\"Materialize full dict representation\"\"\"\n if hasattr(self, \"_cached_dict\"):\n return self._cached_dict\n else:\n dsk = self._construct_graph()\n self._cached_dict = dsk\n return self._cached_dict\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self):\n return len(self._dict)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameIOLayer.project_columns_DataFrameIOLayer.__repr__.return._DataFrameIOLayer_name_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameIOLayer.project_columns_DataFrameIOLayer.__repr__.return._DataFrameIOLayer_name_", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1204, "end_line": 1234, "span_ids": ["DataFrameIOLayer.__repr__", "DataFrameIOLayer.project_columns"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrameIOLayer(Blockwise):\n\n def project_columns(self, columns):\n \"\"\"Produce a column projection for this IO layer.\n Given a list of required output columns, this method\n returns the projected layer.\n \"\"\"\n if columns and (self.columns is None or columns < set(self.columns)):\n\n # Apply column projection in IO function\n try:\n io_func = self.io_func.project_columns(list(columns))\n except AttributeError:\n io_func = self.io_func\n\n layer = DataFrameIOLayer(\n (self.label or \"subset-\") + tokenize(self.name, columns),\n list(columns),\n self.inputs,\n io_func,\n label=self.label,\n produces_tasks=self.produces_tasks,\n annotations=self.annotations,\n )\n return layer\n else:\n # Default behavior\n return self\n\n def __repr__(self):\n return \"DataFrameIOLayer\".format(\n self.name, len(self.inputs), self.columns\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction_DataFrameTreeReduction._make_key.return.name_parts_split_if_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction_DataFrameTreeReduction._make_key.return.name_parts_split_if_", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1237, "end_line": 1331, "span_ids": ["DataFrameTreeReduction", "DataFrameTreeReduction._make_key", "DataFrameTreeReduction.__init__"], "tokens": 802}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrameTreeReduction(Layer):\n \"\"\"DataFrame Tree-Reduction Layer\n\n Parameters\n ----------\n name : str\n Name to use for the constructed layer.\n name_input : str\n Name of the input layer that is being reduced.\n npartitions_input : str\n Number of partitions in the input layer.\n concat_func : callable\n Function used by each tree node to reduce a list of inputs\n into a single output value. This function must accept only\n a list as its first positional argument.\n tree_node_func : callable\n Function used on the output of ``concat_func`` in each tree\n node. This function must accept the output of ``concat_func``\n as its first positional argument.\n finalize_func : callable, optional\n Function used in place of ``tree_node_func`` on the final tree\n node(s) to produce the final output for each split. By default,\n ``tree_node_func`` will be used.\n split_every : int, optional\n This argument specifies the maximum number of input nodes\n to be handled by any one task in the tree. Defaults to 32.\n split_out : int, optional\n This argument specifies the number of output nodes in the\n reduction tree. If ``split_out`` is set to an integer >=1, the\n input tasks must contain data that can be indexed by a ``getitem``\n operation with a key in the range ``[0, split_out)``.\n output_partitions : list, optional\n List of required output partitions. This parameter is used\n internally by Dask for high-level culling.\n tree_node_name : str, optional\n Name to use for intermediate tree-node tasks.\n \"\"\"\n\n name: str\n name_input: str\n npartitions_input: str\n concat_func: callable\n tree_node_func: callable\n finalize_func: callable | None\n split_every: int\n split_out: int\n output_partitions: list[int]\n tree_node_name: str\n widths: list[int]\n height: int\n\n def __init__(\n self,\n name: str,\n name_input: str,\n npartitions_input: str,\n concat_func: callable,\n tree_node_func: callable,\n finalize_func: callable | None = None,\n split_every: int = 32,\n split_out: int | None = None,\n output_partitions: list[int] | None = None,\n tree_node_name: str | None = None,\n annotations: dict[str, Any] | None = None,\n ):\n super().__init__(annotations=annotations)\n self.name = name\n self.name_input = name_input\n self.npartitions_input = npartitions_input\n self.concat_func = concat_func\n self.tree_node_func = tree_node_func\n self.finalize_func = finalize_func\n self.split_every = split_every\n self.split_out = split_out\n self.output_partitions = (\n list(range(self.split_out or 1))\n if output_partitions is None\n else output_partitions\n )\n self.tree_node_name = tree_node_name or \"tree_node-\" + self.name\n\n # Calculate tree widths and height\n # (Used to get output keys without materializing)\n parts = self.npartitions_input\n self.widths = [parts]\n while parts > 1:\n parts = math.ceil(parts / self.split_every)\n self.widths.append(parts)\n self.height = len(self.widths)\n\n def _make_key(self, *name_parts, split=0):\n # Helper function construct a key\n # with a \"split\" element when\n # bool(split_out) is True\n return name_parts + (split,) if self.split_out else name_parts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction._define_task_DataFrameTreeReduction._construct_graph.return.dsk": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction._define_task_DataFrameTreeReduction._construct_graph.return.dsk", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1333, "end_line": 1413, "span_ids": ["DataFrameTreeReduction._define_task", "DataFrameTreeReduction._construct_graph"], "tokens": 658}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrameTreeReduction(Layer):\n\n def _define_task(self, input_keys, final_task=False):\n # Define nested concatenation and func task\n if final_task and self.finalize_func:\n outer_func = self.finalize_func\n else:\n outer_func = self.tree_node_func\n return (toolz.pipe, input_keys, self.concat_func, outer_func)\n\n def _construct_graph(self):\n \"\"\"Construct graph for a tree reduction.\"\"\"\n\n dsk = {}\n if not self.output_partitions:\n return dsk\n\n # Deal with `bool(split_out) == True`.\n # These cases require that the input tasks\n # return a type that enables getitem operation\n # with indices: [0, split_out)\n # Therefore, we must add \"getitem\" tasks to\n # select the appropriate element for each split\n name_input_use = self.name_input\n if self.split_out:\n name_input_use += \"-split\"\n for s in self.output_partitions:\n for p in range(self.npartitions_input):\n dsk[self._make_key(name_input_use, p, split=s)] = (\n operator.getitem,\n (self.name_input, p),\n s,\n )\n\n if self.height >= 2:\n # Loop over output splits\n for s in self.output_partitions:\n # Loop over reduction levels\n for depth in range(1, self.height):\n # Loop over reduction groups\n for group in range(self.widths[depth]):\n # Calculate inputs for the current group\n p_max = self.widths[depth - 1]\n lstart = self.split_every * group\n lstop = min(lstart + self.split_every, p_max)\n if depth == 1:\n # Input nodes are from input layer\n input_keys = [\n self._make_key(name_input_use, p, split=s)\n for p in range(lstart, lstop)\n ]\n else:\n # Input nodes are tree-reduction nodes\n input_keys = [\n self._make_key(\n self.tree_node_name, p, depth - 1, split=s\n )\n for p in range(lstart, lstop)\n ]\n\n # Define task\n if depth == self.height - 1:\n # Final Node (Use fused `self.tree_finalize` task)\n assert (\n group == 0\n ), f\"group = {group}, not 0 for final tree reduction task\"\n dsk[(self.name, s)] = self._define_task(\n input_keys, final_task=True\n )\n else:\n # Intermediate Node\n dsk[\n self._make_key(\n self.tree_node_name, group, depth, split=s\n )\n ] = self._define_task(input_keys, final_task=False)\n else:\n # Deal with single-partition case\n for s in self.output_partitions:\n input_keys = [self._make_key(name_input_use, 0, split=s)]\n dsk[(self.name, s)] = self._define_task(input_keys, final_task=True)\n\n return dsk", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction.__repr___DataFrameTreeReduction.cull.if_output_partitions_s.else_.return.self_deps": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction.__repr___DataFrameTreeReduction.cull.if_output_partitions_s.else_.return.self_deps", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1415, "end_line": 1498, "span_ids": ["DataFrameTreeReduction.__repr__", "DataFrameTreeReduction.cull", "DataFrameTreeReduction.__iter__", "DataFrameTreeReduction.get_output_keys", "DataFrameTreeReduction.__getitem__", "DataFrameTreeReduction._dict", "DataFrameTreeReduction._keys_to_output_partitions", "DataFrameTreeReduction._cull", "DataFrameTreeReduction.is_materialized", "DataFrameTreeReduction.__len__", "DataFrameTreeReduction._output_keys"], "tokens": 595}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrameTreeReduction(Layer):\n\n def __repr__(self):\n return \"DataFrameTreeReduction\".format(\n self.name, self.name_input, self.split_out\n )\n\n def _output_keys(self):\n return {(self.name, s) for s in self.output_partitions}\n\n def get_output_keys(self):\n if hasattr(self, \"_cached_output_keys\"):\n return self._cached_output_keys\n else:\n output_keys = self._output_keys()\n self._cached_output_keys = output_keys\n return self._cached_output_keys\n\n def is_materialized(self):\n return hasattr(self, \"_cached_dict\")\n\n @property\n def _dict(self):\n \"\"\"Materialize full dict representation\"\"\"\n if hasattr(self, \"_cached_dict\"):\n return self._cached_dict\n else:\n dsk = self._construct_graph()\n self._cached_dict = dsk\n return self._cached_dict\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __iter__(self):\n return iter(self._dict)\n\n def __len__(self):\n # Start with \"base\" tree-reduction size\n tree_size = (sum(self.widths[1:]) or 1) * (self.split_out or 1)\n if self.split_out:\n # Add on \"split-*\" tasks used for `getitem` ops\n return tree_size + self.npartitions_input * len(self.output_partitions)\n return tree_size\n\n def _keys_to_output_partitions(self, keys):\n \"\"\"Simple utility to convert keys to output partition indices.\"\"\"\n splits = set()\n for key in keys:\n try:\n _name, _split = key\n except ValueError:\n continue\n if _name != self.name:\n continue\n splits.add(_split)\n return splits\n\n def _cull(self, output_partitions):\n return DataFrameTreeReduction(\n self.name,\n self.name_input,\n self.npartitions_input,\n self.concat_func,\n self.tree_node_func,\n finalize_func=self.finalize_func,\n split_every=self.split_every,\n split_out=self.split_out,\n output_partitions=output_partitions,\n tree_node_name=self.tree_node_name,\n annotations=self.annotations,\n )\n\n def cull(self, keys, all_keys):\n \"\"\"Cull a DataFrameTreeReduction HighLevelGraph layer\"\"\"\n deps = {\n (self.name, 0): {\n (self.name_input, i) for i in range(self.npartitions_input)\n }\n }\n output_partitions = self._keys_to_output_partitions(keys)\n if output_partitions != set(self.output_partitions):\n culled_layer = self._cull(output_partitions)\n return culled_layer, deps\n else:\n return self, deps", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction.__dask_distributed_pack___DataFrameTreeReduction.__dask_distributed_pack__.return._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction.__dask_distributed_pack___DataFrameTreeReduction.__dask_distributed_pack__.return._", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1500, "end_line": 1522, "span_ids": ["DataFrameTreeReduction.__dask_distributed_pack__"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrameTreeReduction(Layer):\n\n def __dask_distributed_pack__(self, *args, **kwargs):\n from distributed.protocol.serialize import to_serialize\n\n # Pickle the (possibly) user-defined functions here\n _concat_func = to_serialize(self.concat_func)\n _tree_node_func = to_serialize(self.tree_node_func)\n if self.finalize_func:\n _finalize_func = to_serialize(self.finalize_func)\n else:\n _finalize_func = None\n\n return {\n \"name\": self.name,\n \"name_input\": self.name_input,\n \"npartitions_input\": self.npartitions_input,\n \"concat_func\": _concat_func,\n \"tree_node_func\": _tree_node_func,\n \"finalize_func\": _finalize_func,\n \"split_every\": self.split_every,\n \"split_out\": self.split_out,\n \"output_partitions\": self.output_partitions,\n \"tree_node_name\": self.tree_node_name,\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction.__dask_distributed_unpack___": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/layers.py_DataFrameTreeReduction.__dask_distributed_unpack___", "embedding": null, "metadata": {"file_path": "dask/layers.py", "file_name": "layers.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1524, "end_line": 1545, "span_ids": ["DataFrameTreeReduction.__dask_distributed_unpack__"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DataFrameTreeReduction(Layer):\n\n @classmethod\n def __dask_distributed_unpack__(cls, state, dsk, dependencies):\n from distributed.protocol.serialize import to_serialize\n\n # Materialize the layer\n raw = cls(**state)._construct_graph()\n\n # Convert all keys to strings and dump tasks\n raw = {stringify(k): stringify_collection_keys(v) for k, v in raw.items()}\n keys = raw.keys() | dsk.keys()\n deps = {k: keys_in_tasks(keys, [v]) for k, v in raw.items()}\n\n # Must use `to_serialize` on the entire task.\n # This is required because the task-tuples contain `Serialized`\n # function objects instead of real functions. Using `dumps_task`\n # may or may not correctly wrap the entire tuple in `to_serialize`.\n # So we use `to_serialize` here to be explicit. When the task\n # arrives at a worker, both the `Serialized` task-tuples and the\n # `Serialized` functions nested within them should be deserialzed\n # automatically by the comm.\n return {\"dsk\": toolz.valmap(to_serialize, raw), \"deps\": deps}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_from___future___import_an__process_get_id.return.multiprocessing_current_p": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/multiprocessing.py_from___future___import_an__process_get_id.return.multiprocessing_current_p", "embedding": null, "metadata": {"file_path": "dask/multiprocessing.py", "file_name": "multiprocessing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 35, "span_ids": ["imports", "_process_get_id", "_reduce_method_descriptor", "impl"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport copyreg\nimport multiprocessing\nimport multiprocessing.pool\nimport os\nimport pickle\nimport sys\nimport traceback\nfrom concurrent.futures import ProcessPoolExecutor\nfrom functools import partial\nfrom warnings import warn\n\nimport cloudpickle\n\nfrom . import config\nfrom .local import MultiprocessingPoolExecutor, get_async, reraise\nfrom .optimization import cull, fuse\nfrom .system import CPU_COUNT\nfrom .utils import ensure_dict\n\n\ndef _reduce_method_descriptor(m):\n return getattr, (m.__objclass__, m.__name__)\n\n\n# type(set.union) is used as a proxy to \ncopyreg.pickle(type(set.union), _reduce_method_descriptor)\n\n_dumps = partial(cloudpickle.dumps, protocol=pickle.HIGHEST_PROTOCOL)\n_loads = cloudpickle.loads\n\n\ndef _process_get_id():\n return multiprocessing.current_process().ident", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_dataclasses_f3.pass": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_dataclasses_f3.pass", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 62, "span_ids": ["imports", "f1", "f2", "f3"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import dataclasses\nimport datetime\nimport os\nimport subprocess\nimport sys\nimport time\nfrom collections import OrderedDict\nfrom concurrent.futures import Executor\nfrom operator import add, mul\nfrom typing import Union\n\nimport pytest\nfrom tlz import compose, curry, merge, partial\n\nimport dask\nimport dask.bag as db\nfrom dask import delayed\nfrom dask.base import (\n DaskMethodsMixin,\n clone_key,\n collections_to_dsk,\n compute,\n compute_as_if_collection,\n function_cache,\n get_collection_names,\n get_name_from_key,\n get_scheduler,\n is_dask_collection,\n named_schedulers,\n normalize_function,\n normalize_token,\n optimize,\n persist,\n replace_name_in_key,\n tokenize,\n unpack_collections,\n visualize,\n)\nfrom dask.core import literal\nfrom dask.delayed import Delayed\nfrom dask.diagnostics import Profiler\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.utils import tmpdir, tmpfile\nfrom dask.utils_test import dec, import_or_none, inc\n\nda = import_or_none(\"dask.array\")\ndd = import_or_none(\"dask.dataframe\")\nnp = import_or_none(\"numpy\")\nsp = import_or_none(\"scipy.sparse\")\npd = import_or_none(\"pandas\")\n\n\ndef f1(a, b, c=1):\n pass\n\n\ndef f2(a, b=1, c=2):\n pass\n\n\ndef f3(a):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_partial_func_args_kwargs_consistent_test_tokenize_partial_func_args_kwargs_consistent.assert_res_sol": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_partial_func_args_kwargs_consistent_test_tokenize_partial_func_args_kwargs_consistent.assert_res_sol", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 226, "end_line": 241, "span_ids": ["test_tokenize_partial_func_args_kwargs_consistent"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize_partial_func_args_kwargs_consistent():\n f = partial(f3, f2, c=f1)\n res = normalize_token(f)\n sol = (\n b\"\\x80\\x04\\x95\\x1f\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x14dask.tests.test_base\\x94\\x8c\\x02f3\\x94\\x93\\x94.\",\n (\n b\"\\x80\\x04\\x95\\x1f\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x14dask.tests.test_base\\x94\\x8c\\x02f2\\x94\\x93\\x94.\",\n ),\n (\n (\n \"c\",\n b\"\\x80\\x04\\x95\\x1f\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x8c\\x14dask.tests.test_base\\x94\\x8c\\x02f1\\x94\\x93\\x94.\",\n ),\n ),\n )\n assert res == sol", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_base_test_tokenize_callable._Consistent_token": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_normalize_base_test_tokenize_callable._Consistent_token", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 244, "end_line": 263, "span_ids": ["test_tokenize_object", "test_tokenize_callable", "test_normalize_base"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_normalize_base():\n for i in [1, 1.1, \"1\", slice(1, 2, 3), datetime.date(2021, 6, 25)]:\n assert normalize_token(i) is i\n\n\ndef test_tokenize_object():\n o = object()\n # Defaults to non-deterministic tokenization\n assert normalize_token(o) != normalize_token(o)\n\n with dask.config.set({\"tokenize.ensure-deterministic\": True}):\n with pytest.raises(RuntimeError, match=\"cannot be deterministically hashed\"):\n normalize_token(o)\n\n\ndef test_tokenize_callable():\n def my_func(a, b, c=1):\n return a + b + c\n\n assert tokenize(my_func) == tokenize(my_func) # Consistent token", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_sequences_BDataClass.dataclasses_make_dataclas": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_sequences_BDataClass.dataclasses_make_dataclas", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 387, "end_line": 419, "span_ids": ["test_tokenize_dict", "impl:11", "test_tokenize_set", "test_tokenize_ordered_dict", "test_tokenize_sequences"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not np\")\ndef test_tokenize_sequences():\n assert tokenize([1]) != tokenize([2])\n assert tokenize([1]) != tokenize((1,))\n assert tokenize([1]) == tokenize([1])\n\n x = np.arange(2000) # long enough to drop information in repr\n y = np.arange(2000)\n y[1000] = 0 # middle isn't printed in repr\n assert tokenize([x]) != tokenize([y])\n\n\ndef test_tokenize_dict():\n assert tokenize({\"x\": 1, 1: \"x\"}) == tokenize({\"x\": 1, 1: \"x\"})\n\n\ndef test_tokenize_set():\n assert tokenize({1, 2, \"x\", (1, \"x\")}) == tokenize({1, 2, \"x\", (1, \"x\")})\n\n\ndef test_tokenize_ordered_dict():\n from collections import OrderedDict\n\n a = OrderedDict([(\"a\", 1), (\"b\", 2)])\n b = OrderedDict([(\"a\", 1), (\"b\", 2)])\n c = OrderedDict([(\"b\", 2), (\"a\", 1)])\n\n assert tokenize(a) == tokenize(b)\n assert tokenize(a) != tokenize(c)\n\n\nADataClass = dataclasses.make_dataclass(\"ADataClass\", [(\"a\", int)])\nBDataClass = dataclasses.make_dataclass(\"BDataClass\", [(\"a\", Union[int, float])])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_dataclass_test_tokenize_dataclass.None_6": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_dataclass_test_tokenize_dataclass.None_6", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 422, "end_line": 445, "span_ids": ["test_tokenize_dataclass"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tokenize_dataclass():\n a1 = ADataClass(1)\n a2 = ADataClass(2)\n assert tokenize(a1) == tokenize(a1)\n assert tokenize(a1) != tokenize(a2)\n\n # Same field names and values, but dataclass types are different\n b1 = BDataClass(1)\n assert tokenize(a1) != tokenize(b1)\n\n class SubA(ADataClass):\n pass\n\n assert dataclasses.is_dataclass(\n SubA\n ), \"Python regression: SubA should be considered a dataclass\"\n assert tokenize(SubA(1)) == tokenize(SubA(1))\n assert tokenize(SubA(1)) != tokenize(a1)\n\n # Same name, same values, new definition: tokenize differently\n ADataClassRedefinedDifferently = dataclasses.make_dataclass(\n \"ADataClass\", [(\"a\", Union[int, str])]\n )\n assert tokenize(a1) != tokenize(ADataClassRedefinedDifferently(1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_is_dask_collection_test_unpack_collections.build.return.t": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_is_dask_collection_test_unpack_collections.build.return.t", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 536, "end_line": 576, "span_ids": ["test_unpack_collections", "test_is_dask_collection"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_is_dask_collection():\n class DummyCollection:\n def __init__(self, dsk):\n self.dask = dsk\n\n def __dask_graph__(self):\n return self.dask\n\n x = delayed(1) + 2\n assert is_dask_collection(x)\n assert not is_dask_collection(2)\n assert is_dask_collection(DummyCollection({}))\n assert not is_dask_collection(DummyCollection(None))\n assert not is_dask_collection(DummyCollection)\n\n\ndef test_unpack_collections():\n a = delayed(1) + 5\n b = a + 1\n c = a + 2\n\n def build(a, b, c, iterator):\n t = (\n a,\n b, # Top-level collections\n {\n \"a\": a, # dict\n a: b, # collections as keys\n \"b\": [1, 2, [b]], # list\n \"c\": 10, # other builtins pass through unchanged\n \"d\": (c, 2), # tuple\n \"e\": {a, 2, 3}, # set\n \"f\": OrderedDict([(\"a\", a)]),\n }, # OrderedDict\n iterator,\n ) # Iterator\n\n t[2][\"f\"] = ADataClass(a=a)\n t[2][\"g\"] = (ADataClass, a)\n\n return t\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_get_collection_names_test_get_collection_names.None_1": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_get_collection_names_test_get_collection_names.None_1", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 609, "end_line": 644, "span_ids": ["test_get_collection_names"], "tokens": 320}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_collection_names():\n class DummyCollection:\n def __init__(self, dsk, keys):\n self.dask = dsk\n self.keys = keys\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_keys__(self):\n return self.keys\n\n with pytest.raises(TypeError):\n get_collection_names(object())\n with pytest.raises(TypeError):\n get_collection_names(DummyCollection(None, []))\n # Keys must either be a string or a tuple where the first element is a string\n with pytest.raises(TypeError):\n get_collection_names(DummyCollection({1: 2}, [1]))\n with pytest.raises(TypeError):\n get_collection_names(DummyCollection({(): 1}, [()]))\n with pytest.raises(TypeError):\n get_collection_names(DummyCollection({(1,): 1}, [(1,)]))\n\n assert get_collection_names(DummyCollection({}, [])) == set()\n\n # Arbitrary hashables\n h1 = object()\n h2 = object()\n # __dask_keys__() returns a nested list\n assert get_collection_names(\n DummyCollection(\n {(\"a-1\", h1): 1, (\"a-1\", h2): 2, \"b-2\": 3, \"c\": 4},\n [[[(\"a-1\", h1), (\"a-1\", h2), \"b-2\", \"c\"]]],\n )\n ) == {\"a-1\", \"b-2\", \"c\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_item_change_name_test_normalize_function_dataclass_field_no_repr.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_persist_item_change_name_test_normalize_function_dataclass_field_no_repr.None_2", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1313, "end_line": 1339, "span_ids": ["test_persist_item_change_name", "test_normalize_function_limited_size", "test_normalize_function_dataclass_field_no_repr"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_persist_item_change_name():\n a = db.from_sequence([1, 2, 3]).min()\n rebuild, args = a.__dask_postpersist__()\n b = rebuild({\"x\": 4}, *args, rename={a.name: \"x\"})\n assert isinstance(b, db.Item)\n assert b.__dask_keys__() == [\"x\"]\n db.utils.assert_eq(b, 4)\n\n\ndef test_normalize_function_limited_size():\n for i in range(1000):\n normalize_function(lambda x: x)\n\n assert 50 < len(function_cache) < 600\n\n\ndef test_normalize_function_dataclass_field_no_repr():\n A = dataclasses.make_dataclass(\n \"A\",\n [(\"param\", float, dataclasses.field(repr=False))],\n namespace={\"__dask_tokenize__\": lambda self: self.param},\n )\n\n a1, a2 = A(1), A(2)\n\n assert normalize_function(a1) == normalize_function(a1)\n assert normalize_function(a1) != normalize_function(a2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_get_scheduler_with_distributed_active_test_callable_scheduler.assert_called_0_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_get_scheduler_with_distributed_active_test_callable_scheduler.assert_called_0_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1427, "end_line": 1448, "span_ids": ["test_callable_scheduler", "test_get_scheduler_with_distributed_active"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_scheduler_with_distributed_active():\n\n with dask.config.set(scheduler=\"dask.distributed\"):\n warning_message = (\n \"Running on a single-machine scheduler when a distributed client \"\n \"is active might lead to unexpected results.\"\n )\n with pytest.warns(UserWarning, match=warning_message) as user_warnings_a:\n get_scheduler(scheduler=\"threads\")\n get_scheduler(scheduler=\"sync\")\n assert len(user_warnings_a) == 2\n\n\ndef test_callable_scheduler():\n called = [False]\n\n def get(dsk, keys, *args, **kwargs):\n called[0] = True\n return dask.get(dsk, keys)\n\n assert delayed(lambda: 1)().compute(scheduler=get) == 1\n assert called[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_num_workers_config_test_optimizations_ctd.assert_dsk1_dsk2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_num_workers_config_test_optimizations_ctd.assert_dsk1_dsk2", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1451, "end_line": 1477, "span_ids": ["test_optimizations_ctd", "test_num_workers_config"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.flaky(reruns=10, reruns_delay=5)\n@pytest.mark.slow\n@pytest.mark.parametrize(\"scheduler\", [\"threads\", \"processes\"])\ndef test_num_workers_config(scheduler):\n # Regression test for issue #4082\n\n f = delayed(pure=False)(time.sleep)\n # Be generous with the initial sleep times, as process have been observed\n # to take >0.5s to spin up\n num_workers = 3\n a = [f(1.0) for i in range(num_workers)]\n with dask.config.set(num_workers=num_workers, chunksize=1), Profiler() as prof:\n compute(*a, scheduler=scheduler)\n\n workers = {i.worker_id for i in prof.results}\n\n assert len(workers) == num_workers\n\n\ndef test_optimizations_ctd():\n da = pytest.importorskip(\"dask.array\")\n x = da.arange(2, chunks=1)[:1]\n dsk1 = collections_to_dsk([x])\n with dask.config.set({\"optimizations\": [lambda dsk, keys: dsk]}):\n dsk2 = collections_to_dsk([x])\n\n assert dsk1 == dsk2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_clone_key_test_clone_key.with_pytest_raises_TypeEr.clone_key_1_2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_clone_key_test_clone_key.with_pytest_raises_TypeEr.clone_key_1_2_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1482, "end_line": 1494, "span_ids": ["test_clone_key"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_clone_key():\n h = object() # arbitrary hashable\n assert clone_key(\"inc-1-2-3\", 123) == \"inc-4dfeea2f9300e67a75f30bf7d6182ea4\"\n assert clone_key(\"x\", 123) == \"x-dc2b8d1c184c72c19faa81c797f8c6b0\"\n assert clone_key(\"x\", 456) == \"x-b76f061b547b00d18b9c7a18ccc47e2d\"\n assert clone_key((\"x\", 1), 456) == (\"x-b76f061b547b00d18b9c7a18ccc47e2d\", 1)\n assert clone_key((\"sum-1-2-3\", h, 1), 123) == (\n \"sum-1efd41f02035dc802f4ebb9995d07e9d\",\n h,\n 1,\n )\n with pytest.raises(TypeError):\n clone_key(1, 2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_nested_test_set_hard_to_copyables.with_dask_config_set_x_th.with_dask_config_set_y_1_.pass": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_set_nested_test_set_hard_to_copyables.with_dask_config_set_x_th.with_dask_config_set_y_1_.pass", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 277, "end_line": 291, "span_ids": ["test_set_hard_to_copyables", "test_set_nested"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_set_nested():\n with dask.config.set({\"abc\": {\"x\": 123}}):\n assert config[\"abc\"] == {\"x\": 123}\n with dask.config.set({\"abc.y\": 456}):\n assert config[\"abc\"] == {\"x\": 123, \"y\": 456}\n assert config[\"abc\"] == {\"x\": 123}\n assert \"abc\" not in config\n\n\ndef test_set_hard_to_copyables():\n import threading\n\n with dask.config.set(x=threading.Lock()):\n with dask.config.set(y=1):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_config_serialization_test_config_inheritance.assert_dask_config_get_a": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test_config_serialization_test_config_inheritance.assert_dask_config_get_a", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 489, "end_line": 505, "span_ids": ["test_config_serialization", "test_config_inheritance"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_config_serialization():\n # Use context manager without changing the value to ensure test side effects are restored\n with dask.config.set({\"array.svg.size\": dask.config.get(\"array.svg.size\")}):\n\n # Take a round trip through the serialization\n serialized = serialize({\"array\": {\"svg\": {\"size\": 150}}})\n config = deserialize(serialized)\n\n dask.config.update(dask.config.global_config, config)\n assert dask.config.get(\"array.svg.size\") == 150\n\n\ndef test_config_inheritance():\n config = collect_env(\n {\"DASK_INTERNAL_INHERIT_CONFIG\": serialize({\"array\": {\"svg\": {\"size\": 150}}})}\n )\n assert dask.config.get(\"array.svg.size\", config=config) == 150", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test__get_paths_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_config.py_test__get_paths_", "embedding": null, "metadata": {"file_path": "dask/tests/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 508, "end_line": 548, "span_ids": ["test__get_paths", "test_default_search_paths"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test__get_paths(monkeypatch):\n # These settings are used by Dask's config system. We temporarily\n # remove them to avoid interference from the machine where tests\n # are being run.\n monkeypatch.delenv(\"DASK_CONFIG\", raising=False)\n monkeypatch.delenv(\"DASK_ROOT_CONFIG\", raising=False)\n monkeypatch.setattr(site, \"PREFIXES\", [])\n\n expected = [\n \"/etc/dask\",\n os.path.join(sys.prefix, \"etc\", \"dask\"),\n os.path.join(os.path.expanduser(\"~\"), \".config\", \"dask\"),\n ]\n paths = _get_paths()\n assert paths == expected\n assert len(paths) == len(set(paths)) # No duplicate paths\n\n with monkeypatch.context() as m:\n m.setenv(\"DASK_CONFIG\", \"foo-bar\")\n paths = _get_paths()\n assert paths == expected + [\"foo-bar\"]\n assert len(paths) == len(set(paths))\n\n with monkeypatch.context() as m:\n m.setenv(\"DASK_ROOT_CONFIG\", \"foo-bar\")\n paths = _get_paths()\n assert paths == [\"foo-bar\"] + expected[1:]\n assert len(paths) == len(set(paths))\n\n with monkeypatch.context() as m:\n prefix = os.path.join(\"include\", \"this\", \"path\")\n m.setattr(site, \"PREFIXES\", site.PREFIXES + [prefix])\n paths = _get_paths()\n assert os.path.join(prefix, \"etc\", \"dask\") in paths\n assert len(paths) == len(set(paths))\n\n\ndef test_default_search_paths():\n # Ensure _get_paths() is used for default paths\n assert dask.config.paths == _get_paths()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_serializable_groupby_agg_test_serializable_groupby_agg.dd_utils_assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_serializable_groupby_agg_test_serializable_groupby_agg.dd_utils_assert_eq_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 186, "end_line": 201, "span_ids": ["test_serializable_groupby_agg"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_serializable_groupby_agg(c, s, a, b):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [1, 0, 1, 0]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n result = ddf.groupby(\"y\").agg(\"count\", split_out=2)\n\n # Check Culling and Compute\n agg0 = await c.compute(result.partitions[0])\n agg1 = await c.compute(result.partitions[1])\n dd.utils.assert_eq(\n pd.concat([agg0, agg1]),\n pd.DataFrame({\"x\": [2, 2], \"y\": [0, 1]}).set_index(\"y\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_concatenate_test_blockwise_concatenate.da_assert_eq_z_x_schedu": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_blockwise_concatenate_test_blockwise_concatenate.da_assert_eq_z_x_schedu", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 494, "end_line": 516, "span_ids": ["test_blockwise_concatenate"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_blockwise_concatenate(c):\n \"\"\"Test a blockwise operation with concatenated axes\"\"\"\n da = pytest.importorskip(\"dask.array\")\n np = pytest.importorskip(\"numpy\")\n\n def f(x, y):\n da.assert_eq(y, [[0, 1, 2]])\n return x\n\n x = da.from_array(np.array([0, 1, 2]))\n y = da.from_array(np.array([[0, 1, 2]]))\n z = da.blockwise(\n f,\n (\"i\"),\n x,\n (\"i\"),\n y,\n (\"ij\"),\n dtype=x.dtype,\n concatenate=True,\n )\n c.compute(z, optimize_graph=False)\n da.assert_eq(z, x, scheduler=c)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_pack_MaterializedLayer_handles_futures_in_graph_properly_test_pack_MaterializedLayer_handles_futures_in_graph_properly.assert_unpacked_deps_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_pack_MaterializedLayer_handles_futures_in_graph_properly_test_pack_MaterializedLayer_handles_futures_in_graph_properly.assert_unpacked_deps_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 658, "end_line": 672, "span_ids": ["test_pack_MaterializedLayer_handles_futures_in_graph_properly"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_pack_MaterializedLayer_handles_futures_in_graph_properly(c, s, a, b):\n fut = c.submit(inc, 1)\n\n hlg = HighLevelGraph(\n {\"l1\": MaterializedLayer({\"x\": fut, \"y\": (inc, \"x\"), \"z\": (inc, \"y\")})},\n {\"l1\": set()},\n )\n # fill hlg.key_dependencies cache. This excludes known futures, so only\n # includes a subset of all dependencies. Previously if the cache was present\n # the future dependencies would be missing when packed.\n hlg.get_all_dependencies()\n packed = hlg.__dask_distributed_pack__(c, [\"z\"], {})\n unpacked = HighLevelGraph.__dask_distributed_unpack__(packed)\n assert unpacked[\"deps\"] == {\"x\": {fut.key}, \"y\": {fut.key}, \"z\": {\"y\"}}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_to_sql_engine_kwargs_test_to_sql_engine_kwargs.with_tmpfile_as_f_.dd_utils_assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_to_sql_engine_kwargs_test_to_sql_engine_kwargs.with_tmpfile_as_f_.dd_utils_assert_eq_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 675, "end_line": 700, "span_ids": ["test_to_sql_engine_kwargs"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\n \"ignore:Running on a single-machine scheduler when a distributed client \"\n \"is active might lead to unexpected results.\"\n)\n@gen_cluster(client=True)\nasync def test_to_sql_engine_kwargs(c, s, a, b):\n # https://github.com/dask/dask/issues/8738\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n pytest.importorskip(\"sqlalchemy\")\n\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10)})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=1)\n with tmpfile() as f:\n uri = f\"sqlite:///{f}\"\n result = ddf.to_sql(\n \"test\", uri, index=True, engine_kwargs={\"echo\": False}, compute=False\n )\n await c.compute(result)\n\n dd.utils.assert_eq(\n ddf,\n dd.read_sql_table(\"test\", uri, \"index\"),\n check_divisions=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_non_recursive_df_reduce_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_distributed.py_test_non_recursive_df_reduce_", "embedding": null, "metadata": {"file_path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "file_type": "text/x-python", "category": "test", "start_line": 703, "end_line": 726, "span_ids": ["test_non_recursive_df_reduce"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@gen_cluster(client=True)\nasync def test_non_recursive_df_reduce(c, s, a, b):\n # See https://github.com/dask/dask/issues/8773\n\n dd = pytest.importorskip(\"dask.dataframe\")\n pd = pytest.importorskip(\"pandas\")\n\n class SomeObject:\n def __init__(self, val):\n self.val = val\n\n N = 170\n series = pd.Series(data=[1] * N, index=range(2, N + 2))\n dask_series = dd.from_pandas(series, npartitions=34)\n result = dask_series.reduction(\n chunk=lambda x: x,\n aggregate=lambda x: SomeObject(x.sum().sum()),\n split_every=False,\n token=\"commit-dataset\",\n meta=object,\n )\n\n assert (await c.compute(result)).val == 170", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_len_does_not_materialize_test_len_does_not_materialize.None_4": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_highgraph.py_test_len_does_not_materialize_test_len_does_not_materialize.None_4", "embedding": null, "metadata": {"file_path": "dask/tests/test_highgraph.py", "file_name": "test_highgraph.py", "file_type": "text/x-python", "category": "test", "start_line": 237, "end_line": 258, "span_ids": ["test_len_does_not_materialize"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_len_does_not_materialize():\n a = {\"x\": 1}\n b = Blockwise(\n output=\"b\",\n output_indices=tuple(\"ij\"),\n dsk={\"b\": [[blockwise_token(0)]]},\n indices=(),\n numblocks={},\n new_axes={\"i\": (1, 1, 1), \"j\": (1, 1)},\n )\n assert len(b) == len(b.get_output_keys())\n\n layers = {\"a\": a, \"b\": b}\n dependencies = {\"a\": set(), \"b\": {\"a\"}}\n hg = HighLevelGraph(layers, dependencies)\n\n assert hg.layers[\"a\"].is_materialized()\n assert not hg.layers[\"b\"].is_materialized()\n\n assert len(hg) == len(a) + len(b) == 7\n\n assert not hg.layers[\"b\"].is_materialized()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_os_SchedulerImportCheck.start.for_mod_in_set_sys_module.if_not_mod_startswith_sel.else_.sys_modules_pop_mod_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_os_SchedulerImportCheck.start.for_mod_in_set_sys_module.if_not_mod_startswith_sel.else_.sys_modules_pop_mod_", "embedding": null, "metadata": {"file_path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 32, "span_ids": ["SchedulerImportCheck.__init__", "imports", "SchedulerImportCheck.start", "SchedulerImportCheck"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\n\nimport pytest\n\ndistributed = pytest.importorskip(\"distributed\")\n\nimport sys\nfrom operator import getitem\n\nfrom distributed import Client, SchedulerPlugin\nfrom distributed.utils_test import cluster, loop # noqa F401\n\nfrom dask.layers import ArrayChunkShapeDep, ArraySliceDep, fractional_slice\n\n\nclass SchedulerImportCheck(SchedulerPlugin):\n \"\"\"Plugin to help record which modules are imported on the scheduler\"\"\"\n\n name = \"import-check\"\n\n def __init__(self, pattern):\n self.pattern = pattern\n\n async def start(self, scheduler):\n # Record the modules that have been imported when the scheduler starts\n self.start_modules = set()\n for mod in set(sys.modules):\n if not mod.startswith(self.pattern):\n self.start_modules.add(mod)\n else:\n # Maually remove the target library\n sys.modules.pop(mod)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_array_chunk_shape_dep_test_array_chunk_shape_dep.for_i_in_range_d_.for_j_in_range_d_.check_i_j_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_array_chunk_shape_dep_test_array_chunk_shape_dep.for_i_in_range_d_.for_j_in_range_d_.check_i_j_", "embedding": null, "metadata": {"file_path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "file_type": "text/x-python", "category": "test", "start_line": 35, "end_line": 49, "span_ids": ["test_array_chunk_shape_dep"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_chunk_shape_dep():\n dac = pytest.importorskip(\"dask.array.core\")\n d = 2 # number of chunks in x,y\n chunk = (2, 3) # chunk shape\n shape = tuple(d * n for n in chunk) # array shape\n chunks = dac.normalize_chunks(chunk, shape)\n array_deps = ArrayChunkShapeDep(chunks)\n\n def check(i, j):\n chunk_shape = array_deps[(i, j)]\n assert chunk_shape == chunk\n\n for i in range(d):\n for j in range(d):\n check(i, j)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_array_slice_deps_test_array_slice_deps.for_i_in_range_d_.for_j_in_range_d_.check_i_j_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py_test_array_slice_deps_test_array_slice_deps.for_i_in_range_d_.for_j_in_range_d_.check_i_j_", "embedding": null, "metadata": {"file_path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 69, "span_ids": ["test_array_slice_deps"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_array_slice_deps():\n dac = pytest.importorskip(\"dask.array.core\")\n d = 2 # number of chunks in x,y\n chunk = (2, 3) # chunk shape\n shape = tuple(d * n for n in chunk) # array shape\n chunks = dac.normalize_chunks(chunk, shape)\n array_deps = ArraySliceDep(chunks)\n\n def check(i, j):\n slices = array_deps[(i, j)]\n assert slices == (\n slice(chunk[0] * i, chunk[0] * (i + 1), None),\n slice(chunk[1] * j, chunk[1] * (j + 1), None),\n )\n\n for i in range(d):\n for j in range(d):\n check(i, j)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__dataframe_shuffle__dataframe_tree_reduction.return.dd_from_pandas_df_nparti": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_layers.py__dataframe_shuffle__dataframe_tree_reduction.return.dd_from_pandas_df_nparti", "embedding": null, "metadata": {"file_path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "file_type": "text/x-python", "category": "test", "start_line": 72, "end_line": 87, "span_ids": ["_dataframe_tree_reduction", "_dataframe_shuffle"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _dataframe_shuffle(tmpdir):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n # Perform a computation using an HLG-based shuffle\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10, 20)})\n return dd.from_pandas(df, npartitions=2).shuffle(\"a\", shuffle=\"tasks\")\n\n\ndef _dataframe_tree_reduction(tmpdir):\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n # Perform a computation using an HLG-based tree reduction\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10, 20)})\n return dd.from_pandas(df, npartitions=2).mean()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_func_test_derived_from_dask_dataframe.assert_Object_with_missi": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils.py_test_derived_from_func_test_derived_from_dask_dataframe.assert_Object_with_missi", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils.py", "file_name": "test_utils.py", "file_type": "text/x-python", "category": "test", "start_line": 536, "end_line": 560, "span_ids": ["test_derived_from_func", "test_derived_from_dask_dataframe"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_derived_from_func():\n import builtins\n\n @derived_from(builtins)\n def sum():\n \"extra docstring\"\n pass\n\n assert \"extra docstring\\n\\n\" in sum.__doc__\n assert \"Return the sum of\" in sum.__doc__\n assert \"This docstring was copied from builtins.sum\" in sum.__doc__\n\n\ndef test_derived_from_dask_dataframe():\n dd = pytest.importorskip(\"dask.dataframe\")\n\n assert \"inconsistencies\" in dd.DataFrame.dropna.__doc__\n\n [axis_arg] = [\n line for line in dd.DataFrame.dropna.__doc__.split(\"\\n\") if \"axis :\" in line\n ]\n assert \"not supported\" in axis_arg.lower()\n assert \"dask\" in axis_arg.lower()\n\n assert \"Object with missing values filled\" in dd.DataFrame.ffill.__doc__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils_test.py_warnings_test_hlg_layer.with_pytest_raises_KeyErr.utils_test_hlg_layer_hg_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_utils_test.py_warnings_test_hlg_layer.with_pytest_raises_KeyErr.utils_test_hlg_layer_hg_", "embedding": null, "metadata": {"file_path": "dask/tests/test_utils_test.py", "file_name": "test_utils_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["imports", "test_hlg_layer"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport pytest\n\nfrom dask import utils_test\nfrom dask.highlevelgraph import HighLevelGraph\nfrom dask.utils_test import _check_warning\n\n\ndef test_hlg_layer():\n a = {\"x\": 1}\n b = {\"y\": (utils_test.inc, \"x\")}\n layers = {\"a-layer\": a, \"bee-layer\": b}\n dependencies = {\"a-layer\": set(), \"bee-layer\": {\"a-layer\"}}\n hg = HighLevelGraph(layers, dependencies)\n\n assert utils_test.hlg_layer(hg, \"a\") is hg.layers[\"a-layer\"]\n assert utils_test.hlg_layer(hg, \"b\") is hg.layers[\"bee-layer\"]\n with pytest.raises(KeyError, match=\"No layer starts with\"):\n utils_test.hlg_layer(hg, \"foo\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_cached_property__cumsum.if_initial_zero_.else_.return.tuple_toolz_accumulate_ad": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_cached_property__cumsum.if_initial_zero_.else_.return.tuple_toolz_accumulate_ad", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1810, "end_line": 1845, "span_ids": ["_HashIdWrapper.__eq__", "_HashIdWrapper", "_HashIdWrapper.__ne__", "_cumsum", "_HashIdWrapper.__hash__", "cached_property.__set__", "_HashIdWrapper.__init__", "cached_property"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class cached_property(functools.cached_property):\n \"\"\"Read only version of functools.cached_property.\"\"\"\n\n def __set__(self, instance, val):\n \"\"\"Raise an error when attempting to set a cached property.\"\"\"\n raise AttributeError(\"Can't set attribute\")\n\n\nclass _HashIdWrapper:\n \"\"\"Hash and compare a wrapped object by identity instead of value\"\"\"\n\n def __init__(self, wrapped):\n self.wrapped = wrapped\n\n def __eq__(self, other):\n if not isinstance(other, _HashIdWrapper):\n return NotImplemented\n return self.wrapped is other.wrapped\n\n def __ne__(self, other):\n if not isinstance(other, _HashIdWrapper):\n return NotImplemented\n return self.wrapped is not other.wrapped\n\n def __hash__(self):\n return id(self.wrapped)\n\n\n@functools.lru_cache\ndef _cumsum(seq, initial_zero):\n if isinstance(seq, _HashIdWrapper):\n seq = seq.wrapped\n if initial_zero:\n return tuple(toolz.accumulate(add, seq, 0))\n else:\n return tuple(toolz.accumulate(add, seq))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_cached_cumsum_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/utils.py_cached_cumsum_", "embedding": null, "metadata": {"file_path": "dask/utils.py", "file_name": "utils.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1848, "end_line": 1875, "span_ids": ["cached_cumsum"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cached_cumsum(seq, initial_zero=False):\n \"\"\"Compute :meth:`toolz.accumulate` with caching.\n\n Caching is by the identify of `seq` rather than the value. It is thus\n important that `seq` is a tuple of immutable objects, and this function\n is intended for use where `seq` is a value that will persist (generally\n block sizes).\n\n Parameters\n ----------\n seq : tuple\n Values to cumulatively sum.\n initial_zero : bool, optional\n If true, the return value is prefixed with a zero.\n\n Returns\n -------\n tuple\n \"\"\"\n if isinstance(seq, tuple):\n # Look up by identity first, to avoid a linear-time __hash__\n # if we've seen this tuple object before.\n result = _cumsum(_HashIdWrapper(seq), initial_zero)\n else:\n # Construct a temporary tuple, and look up by value.\n result = _cumsum(tuple(seq), initial_zero)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_redirect_files_redirect_files._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_redirect_files_redirect_files._", "embedding": null, "metadata": {"file_path": "docs/source/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 338, "end_line": 394, "span_ids": ["docstring:137"], "tokens": 811}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "redirect_files = [\n # old html, new html\n (\"bytes.html\", \"remote-data-services.html\"),\n (\"array-overview.html\", \"array.html\"),\n (\"array-ghost.html\", \"array-overlap.html\"),\n (\"dataframe-overview.html\", \"dataframe.html\"),\n (\"dataframe-performance.html\", \"dataframe-best-practices.html\"),\n (\"delayed-overview.html\", \"delayed.html\"),\n (\"educational-resources.html\", \"presentations.html\"),\n (\"scheduler-choice.html\", \"setup.html\"),\n (\"diagnostics.html\", \"diagnostics-local.html\"),\n (\"inspect.html\", \"graphviz.html\"),\n (\"funding.html\", \"https://dask.org/#supported-by\"),\n (\"examples-tutorials.html\", \"https://examples.dask.org\"),\n (\"examples/array-extend.html\", \"https://examples.dask.org\"),\n (\"examples/array-hdf5.html\", \"https://examples.dask.org\"),\n (\"examples/array-numpy.html\", \"https://examples.dask.org\"),\n (\"examples/array-random.html\", \"https://examples.dask.org\"),\n (\"examples/bag-json.html\", \"https://examples.dask.org\"),\n (\"examples/bag-word-count-hdfs.html\", \"https://examples.dask.org\"),\n (\"examples/dataframe-csv.html\", \"https://examples.dask.org\"),\n (\"examples/dataframe-hdf5.html\", \"https://examples.dask.org\"),\n (\"examples/delayed-array.html\", \"https://examples.dask.org\"),\n (\"examples/delayed-custom.html\", \"https://examples.dask.org\"),\n (\"docs.html\", \"index.html\"),\n (\"use-cases.html\", \"https://stories.dask.org\"),\n (\"bag-overview.html\", \"bag.html\"),\n (\"distributed.html\", \"https://distributed.dask.org\"),\n (\"institutional-faq.html\", \"faq.html\"),\n (\"cite.html\", \"faq.html#how-do-I-cite-dask\"),\n (\"remote-data-services.html\", \"how-to/connect-to-remote-data.html\"),\n (\"debugging.html\", \"how-to/debug.html\"),\n (\"setup.html\", \"deploying.html\"),\n (\"how-to/deploy-dask-clusters.html\", \"deploying.html\"),\n (\"setup/cli.html\", \"deploying-cli.html\"),\n (\"how-to/deploy-dask/cli.html\", \"deploying-cli.html\"),\n (\"setup/cloud.html\", \"deploying-cloud.html\"),\n (\"how-to/deploy-dask/cloud.html\", \"deploying-cloud.html\"),\n (\"setup/docker.html\", \"hdeploying-docker.html\"),\n (\"how-to/deploy-dask/docker.html\", \"deploying-docker.html\"),\n (\"setup/hpc.html\", \"deploying-hpc.html\"),\n (\"how-to/deploy-dask/hpc.html\", \"deploying-hpc.html\"),\n (\"setup/kubernetes.html\", \"deploying-kubernetes.html\"),\n (\"how-to/deploy-dask/kubernetes.html\", \"deploying-kubernetes.html\"),\n (\"setup/python-advanced.html\", \"deploying-python-advanced.html\"),\n (\"how-to/deploy-dask/python-advanced.html\", \"deploying-python-advanced.html\"),\n (\"setup/single-distributed.html\", \"deploying-python.html\"),\n (\"how-to/deploy-dask/single-distributed.html\", \"deploying-python.html\"),\n (\"setup/single-machine.html\", \"scheduling.html\"),\n (\"how-to/deploy-dask/single-machine.html\", \"scheduling.html\"),\n (\"setup/ssh.html\", \"deploying-ssh.html\"),\n (\"how-to/deploy-dask/ssh.html\", \"deploying-ssh.html\"),\n (\"setup/adaptive.html\", \"how-to/adaptive.html\"),\n (\"setup/custom-startup.html\", \"how-to/customize-initialization.html\"),\n (\"setup/environment.html\", \"how-to/manage-environments.html\"),\n (\"setup/prometheus.html\", \"how-to/setup-prometheus.html\"),\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_redirect_template_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_redirect_template_", "embedding": null, "metadata": {"file_path": "docs/source/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 394, "end_line": 430, "span_ids": ["setup", "copy_legacy_redirects", "impl:54"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "redirect_template = \"\"\"\\\n\n \n \n \n \n\n\"\"\"\n\n# Rate limiting issue for github: https://github.com/sphinx-doc/sphinx/issues/7388\nlinkcheck_ignore = [\n r\"^https?:\\/\\/(?:www\\.)?github.com\\/\",\n r\"^https?:\\/\\/localhost(?:[:\\/].+)?$\",\n]\n\ndoctest_global_setup = \"\"\"\nimport numpy as np\n\"\"\"\n\n\ndef copy_legacy_redirects(app, docname):\n if app.builder.name == \"html\":\n for html_src_path, new in redirect_files:\n # add ../ to old nested paths\n new = f\"{'../' * html_src_path.count('/')}{new}\"\n page = redirect_template.format(new=new)\n target_path = app.outdir + \"/\" + html_src_path\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\n with open(target_path, \"w\") as f:\n f.write(page)\n\n\ndef setup(app):\n app.connect(\"build-finished\", copy_legacy_redirects)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_3d_test_field_access_with_shape.assert_eq_x_col1_col": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_block_3d_test_field_access_with_shape.assert_eq_x_col1_col", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 776, "end_line": 994, "span_ids": ["test_operator_dtype_promotion", "test_block_with_mismatched_shape", "test_block_empty_lists", "test_block_tuple", "test_block_invalid_nesting", "test_block_3d", "test_elemwise_on_scalars", "test_field_access", "test_block_no_lists", "test_broadcast_shapes", "test_elemwise_with_ndarrays", "test_operators", "test_elemwise_differently_chunked", "test_field_access_with_shape", "test_elemwise_dtype"], "tokens": 2136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_block_3d():\n a000 = np.ones((2, 2, 2), int) * 1\n\n a100 = np.ones((3, 2, 2), int) * 2\n a010 = np.ones((2, 3, 2), int) * 3\n a001 = np.ones((2, 2, 3), int) * 4\n\n a011 = np.ones((2, 3, 3), int) * 5\n a101 = np.ones((3, 2, 3), int) * 6\n a110 = np.ones((3, 3, 2), int) * 7\n\n a111 = np.ones((3, 3, 3), int) * 8\n\n d000 = da.asarray(a000)\n\n d100 = da.asarray(a100)\n d010 = da.asarray(a010)\n d001 = da.asarray(a001)\n\n d011 = da.asarray(a011)\n d101 = da.asarray(a101)\n d110 = da.asarray(a110)\n\n d111 = da.asarray(a111)\n\n expected = np.block([[[a000, a001], [a010, a011]], [[a100, a101], [a110, a111]]])\n result = da.block([[[d000, d001], [d010, d011]], [[d100, d101], [d110, d111]]])\n\n assert_eq(expected, result)\n\n expected = np.block(\n [\n [[a000, a001[:, :, :0]], [a010[:, :0, :], a011[:, :0, :0]]],\n [[a100[:0, :, :], a101[:0, :, :0]], [a110[:0, :0, :], a111[:0, :0, :0]]],\n ]\n )\n result = da.block(\n [\n [[d000, d001[:, :, :0]], [d010[:, :0, :], d011[:, :0, :0]]],\n [[d100[:0, :, :], d101[:0, :, :0]], [d110[:0, :0, :], d111[:0, :0, :0]]],\n ]\n )\n\n assert result is d000\n assert_eq(expected, result)\n\n\ndef test_block_with_mismatched_shape():\n a = np.array([0, 0])\n b = np.eye(2)\n\n for arrays in [[a, b], [b, a]]:\n with pytest.raises(ValueError):\n da.block(arrays)\n\n\ndef test_block_no_lists():\n assert_eq(da.block(1), np.block(1))\n assert_eq(da.block(np.eye(3)), np.block(np.eye(3)))\n\n\ndef test_block_invalid_nesting():\n for arrays in [\n [1, [2]],\n [1, []],\n [[1], 2],\n [[], 2],\n [[[1], [2]], [[3, 4]], [5]], # missing brackets\n ]:\n with pytest.raises(ValueError) as e:\n da.block(arrays)\n e.match(r\"depths are mismatched\")\n\n\ndef test_block_empty_lists():\n for arrays in [[], [[]], [[1], []]]:\n with pytest.raises(ValueError) as e:\n da.block(arrays)\n e.match(r\"empty\")\n\n\ndef test_block_tuple():\n for arrays in [([1, 2], [3, 4]), [(1, 2), (3, 4)]]:\n with pytest.raises(TypeError) as e:\n da.block(arrays)\n e.match(r\"tuple\")\n\n\ndef test_broadcast_shapes():\n assert () == broadcast_shapes()\n assert (2, 5) == broadcast_shapes((2, 5))\n assert (0, 5) == broadcast_shapes((0, 1), (1, 5))\n assert np.allclose(\n (2, np.nan), broadcast_shapes((1, np.nan), (2, 1)), equal_nan=True\n )\n assert np.allclose(\n (2, np.nan), broadcast_shapes((2, 1), (1, np.nan)), equal_nan=True\n )\n assert (3, 4, 5) == broadcast_shapes((3, 4, 5), (4, 1), ())\n assert (3, 4) == broadcast_shapes((3, 1), (1, 4), (4,))\n assert (5, 6, 7, 3, 4) == broadcast_shapes((3, 1), (), (5, 6, 7, 1, 4))\n\n pytest.raises(ValueError, lambda: broadcast_shapes((3,), (3, 4)))\n pytest.raises(ValueError, lambda: broadcast_shapes((2, 3), (2, 3, 1)))\n pytest.raises(ValueError, lambda: broadcast_shapes((2, 3), (1, np.nan)))\n\n\ndef test_elemwise_on_scalars():\n x = np.arange(10, dtype=np.int64)\n a = from_array(x, chunks=(5,))\n assert len(a.__dask_keys__()) == 2\n assert_eq(a.sum() ** 2, x.sum() ** 2)\n\n y = np.arange(10, dtype=np.int32)\n b = from_array(y, chunks=(5,))\n result = a.sum() * b\n # Dask 0-d arrays do not behave like numpy scalars for type promotion\n assert result.dtype == np.int64\n assert result.compute().dtype == np.int64\n assert (x.sum() * y).dtype == np.int32\n assert_eq((x.sum() * y).astype(np.int64), result)\n\n\ndef test_elemwise_with_ndarrays():\n x = np.arange(3)\n y = np.arange(12).reshape(4, 3)\n a = from_array(x, chunks=(3,))\n b = from_array(y, chunks=(2, 3))\n\n assert_eq(x + a, 2 * x)\n assert_eq(a + x, 2 * x)\n\n assert_eq(x + b, x + y)\n assert_eq(b + x, x + y)\n assert_eq(a + y, x + y)\n assert_eq(y + a, x + y)\n # Error on shape mismatch\n pytest.raises(ValueError, lambda: a + y.T)\n pytest.raises(ValueError, lambda: a + np.arange(2))\n\n\ndef test_elemwise_differently_chunked():\n x = np.arange(3)\n y = np.arange(12).reshape(4, 3)\n a = from_array(x, chunks=(3,))\n b = from_array(y, chunks=(2, 2))\n\n assert_eq(a + b, x + y)\n assert_eq(b + a, x + y)\n\n\ndef test_elemwise_dtype():\n values = [\n da.from_array(np.ones(5, np.float32), chunks=3),\n da.from_array(np.ones(5, np.int16), chunks=3),\n da.from_array(np.ones(5, np.int64), chunks=3),\n da.from_array(np.ones((), np.float64), chunks=()) * 1e200,\n np.ones(5, np.float32),\n 1,\n 1.0,\n 1e200,\n np.int64(1),\n np.ones((), np.int64),\n ]\n for x in values:\n for y in values:\n assert da.maximum(x, y).dtype == da.result_type(x, y)\n\n\ndef test_operators():\n x = np.arange(10)\n y = np.arange(10).reshape((10, 1))\n a = from_array(x, chunks=(5,))\n b = from_array(y, chunks=(5, 1))\n\n c = a + 1\n assert_eq(c, x + 1)\n\n c = a + b\n assert_eq(c, x + x.reshape((10, 1)))\n\n expr = (3 / a * b) ** 2 > 5\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning) # divide by zero\n assert_eq(expr, (3 / x * y) ** 2 > 5)\n\n c = da.exp(a)\n assert_eq(c, np.exp(x))\n\n assert_eq(abs(-a), a)\n assert_eq(a, +x)\n\n\ndef test_operator_dtype_promotion():\n x = np.arange(10, dtype=np.float32)\n y = np.array([1])\n a = from_array(x, chunks=(5,))\n\n assert_eq(x + 1, a + 1) # still float32\n assert_eq(x + 1e50, a + 1e50) # now float64\n assert_eq(x + y, a + y) # also float64\n\n\ndef test_field_access():\n x = np.array([(1, 1.0), (2, 2.0)], dtype=[(\"a\", \"i4\"), (\"b\", \"f4\")])\n y = from_array(x, chunks=(1,))\n assert_eq(y[\"a\"], x[\"a\"])\n assert_eq(y[[\"b\", \"a\"]], x[[\"b\", \"a\"]])\n assert same_keys(y[[\"b\", \"a\"]], y[[\"b\", \"a\"]])\n\n\ndef test_field_access_with_shape():\n dtype = [(\"col1\", (\"f4\", (3, 2))), (\"col2\", (\"f4\", 3))]\n data = np.ones((100, 50), dtype=dtype)\n x = da.from_array(data, 10)\n assert_eq(x[\"col1\"], data[\"col1\"])\n assert_eq(x[[\"col1\"]], data[[\"col1\"]])\n assert_eq(x[\"col2\"], data[\"col2\"])\n assert_eq(x[[\"col1\", \"col2\"]], data[[\"col1\", \"col2\"]])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_matmul_test_broadcast_operator.assert_eq_w_d_w_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_matmul_test_broadcast_operator.assert_eq_w_d_w_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 997, "end_line": 1148, "span_ids": ["test_matmul_array_ufunc", "test_broadcast_to_chunks", "test_broadcast_arrays_uneven_chunks", "test_broadcast_operator", "test_matmul", "test_broadcast_to_array", "test_broadcast_to_scalar", "test_broadcast_to", "test_T", "test_broadcast_arrays"], "tokens": 1489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_matmul():\n x = np.random.random((5, 5))\n y = np.random.random((5, 2))\n a = from_array(x, chunks=(1, 5))\n b = from_array(y, chunks=(5, 1))\n assert_eq(operator.matmul(a, b), a.dot(b))\n assert_eq(operator.matmul(a, b), operator.matmul(x, y))\n assert_eq(operator.matmul(a, y), operator.matmul(x, b))\n list_vec = list(range(1, 6))\n assert_eq(operator.matmul(list_vec, b), operator.matmul(list_vec, y))\n assert_eq(operator.matmul(x, list_vec), operator.matmul(a, list_vec))\n z = np.random.random((5, 5, 5))\n c = from_array(z, chunks=(1, 5, 1))\n assert_eq(operator.matmul(a, z), operator.matmul(x, c))\n assert_eq(operator.matmul(z, a), operator.matmul(c, x))\n\n\ndef test_matmul_array_ufunc():\n # regression test for https://github.com/dask/dask/issues/4353\n x = np.random.random((5, 5))\n y = np.random.random((5, 2))\n a = from_array(x, chunks=(1, 5))\n b = from_array(y, chunks=(5, 1))\n result = b.__array_ufunc__(np.matmul, \"__call__\", a, b)\n assert_eq(result, x.dot(y))\n\n\ndef test_T():\n x = np.arange(400).reshape((20, 20))\n a = from_array(x, chunks=(5, 5))\n\n assert_eq(x.T, a.T)\n\n\ndef test_broadcast_to():\n x = np.random.randint(10, size=(5, 1, 6))\n a = from_array(x, chunks=(3, 1, 3))\n\n for shape in [a.shape, (5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:\n xb = np.broadcast_to(x, shape)\n ab = broadcast_to(a, shape)\n\n assert_eq(xb, ab)\n\n if a.shape == ab.shape:\n assert a is ab\n\n pytest.raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))\n pytest.raises(ValueError, lambda: broadcast_to(a, (3,)))\n\n\ndef test_broadcast_to_array():\n x = np.random.randint(10, size=(5, 1, 6))\n\n for shape in [(5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:\n a = np.broadcast_to(x, shape)\n d = broadcast_to(x, shape)\n\n assert_eq(a, d)\n\n\ndef test_broadcast_to_scalar():\n x = 5\n\n for shape in [tuple(), (0,), (2, 3), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:\n a = np.broadcast_to(x, shape)\n d = broadcast_to(x, shape)\n\n assert_eq(a, d)\n\n\ndef test_broadcast_to_chunks():\n x = np.random.randint(10, size=(5, 1, 6))\n a = from_array(x, chunks=(3, 1, 3))\n\n for shape, chunks, expected_chunks in [\n ((5, 3, 6), (3, -1, 3), ((3, 2), (3,), (3, 3))),\n ((5, 3, 6), (3, 1, 3), ((3, 2), (1, 1, 1), (3, 3))),\n ((2, 5, 3, 6), (1, 3, 1, 3), ((1, 1), (3, 2), (1, 1, 1), (3, 3))),\n ]:\n xb = np.broadcast_to(x, shape)\n ab = broadcast_to(a, shape, chunks=chunks)\n assert_eq(xb, ab)\n assert ab.chunks == expected_chunks\n\n with pytest.raises(ValueError):\n broadcast_to(a, a.shape, chunks=((2, 3), (1,), (3, 3)))\n with pytest.raises(ValueError):\n broadcast_to(a, a.shape, chunks=((3, 2), (3,), (3, 3)))\n with pytest.raises(ValueError):\n broadcast_to(a, (5, 2, 6), chunks=((3, 2), (3,), (3, 3)))\n\n\ndef test_broadcast_arrays():\n assert np.broadcast_arrays() == da.broadcast_arrays()\n\n a = np.arange(4)\n d_a = da.from_array(a, chunks=tuple(s // 2 for s in a.shape))\n\n a_0 = np.arange(4)[None, :]\n a_1 = np.arange(4)[:, None]\n\n d_a_0 = d_a[None, :]\n d_a_1 = d_a[:, None]\n\n a_r = np.broadcast_arrays(a_0, a_1)\n d_r = da.broadcast_arrays(d_a_0, d_a_1)\n\n assert isinstance(d_r, list)\n assert len(a_r) == len(d_r)\n\n for e_a_r, e_d_r in zip(a_r, d_r):\n assert_eq(e_a_r, e_d_r)\n\n\ndef test_broadcast_arrays_uneven_chunks():\n x = da.ones(30, chunks=(3,))\n y = da.ones(30, chunks=(5,))\n z = np.broadcast_arrays(x, y)\n\n assert_eq(z, z)\n\n x = da.ones((1, 30), chunks=(1, 3))\n y = da.ones(30, chunks=(5,))\n z = np.broadcast_arrays(x, y)\n\n assert_eq(z, z)\n\n\n@pytest.mark.parametrize(\n \"u_shape, v_shape\",\n [\n [tuple(), (2, 3)],\n [(1,), (2, 3)],\n [(1, 1), (2, 3)],\n [(0, 3), (1, 3)],\n [(2, 0), (2, 1)],\n [(1, 0), (2, 1)],\n [(0, 1), (1, 3)],\n ],\n)\ndef test_broadcast_operator(u_shape, v_shape):\n u = np.random.random(u_shape)\n v = np.random.random(v_shape)\n\n d_u = from_array(u, chunks=1)\n d_v = from_array(v, chunks=1)\n\n w = u * v\n d_w = d_u * d_v\n\n assert_eq(w, d_w)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic_test_elemwise_consistent_names.assert_same_keys_da_maxim": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_arithmetic_test_elemwise_consistent_names.assert_same_keys_da_maxim", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2289, "end_line": 2443, "span_ids": ["test_elemwise_consistent_names", "test_arithmetic"], "tokens": 2116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_arithmetic():\n x = np.arange(5).astype(\"f4\") + 2\n y = np.arange(5).astype(\"i8\") + 2\n z = np.arange(5).astype(\"i4\") + 2\n a = da.from_array(x, chunks=(2,))\n b = da.from_array(y, chunks=(2,))\n c = da.from_array(z, chunks=(2,))\n assert_eq(a + b, x + y)\n assert_eq(a * b, x * y)\n assert_eq(a - b, x - y)\n assert_eq(a / b, x / y)\n assert_eq(b & b, y & y)\n assert_eq(b | b, y | y)\n assert_eq(b ^ b, y ^ y)\n assert_eq(a // b, x // y)\n assert_eq(a**b, x**y)\n assert_eq(a % b, x % y)\n assert_eq(a > b, x > y)\n assert_eq(a < b, x < y)\n assert_eq(a >= b, x >= y)\n assert_eq(a <= b, x <= y)\n assert_eq(a == b, x == y)\n assert_eq(a != b, x != y)\n\n assert_eq(a + 2, x + 2)\n assert_eq(a * 2, x * 2)\n assert_eq(a - 2, x - 2)\n assert_eq(a / 2, x / 2)\n assert_eq(b & True, y & True)\n assert_eq(b | True, y | True)\n assert_eq(b ^ True, y ^ True)\n assert_eq(a // 2, x // 2)\n assert_eq(a**2, x**2)\n assert_eq(a % 2, x % 2)\n assert_eq(a > 2, x > 2)\n assert_eq(a < 2, x < 2)\n assert_eq(a >= 2, x >= 2)\n assert_eq(a <= 2, x <= 2)\n assert_eq(a == 2, x == 2)\n assert_eq(a != 2, x != 2)\n\n assert_eq(2 + b, 2 + y)\n assert_eq(2 * b, 2 * y)\n assert_eq(2 - b, 2 - y)\n assert_eq(2 / b, 2 / y)\n assert_eq(True & b, True & y)\n assert_eq(True | b, True | y)\n assert_eq(True ^ b, True ^ y)\n assert_eq(2 // b, 2 // y)\n assert_eq(2**b, 2**y)\n assert_eq(2 % b, 2 % y)\n assert_eq(2 > b, 2 > y)\n assert_eq(2 < b, 2 < y)\n assert_eq(2 >= b, 2 >= y)\n assert_eq(2 <= b, 2 <= y)\n assert_eq(2 == b, 2 == y)\n assert_eq(2 != b, 2 != y)\n\n assert_eq(-a, -x)\n assert_eq(abs(a), abs(x))\n assert_eq(~(a == b), ~(x == y))\n assert_eq(~(a == b), ~(x == y))\n\n assert_eq(da.logaddexp(a, b), np.logaddexp(x, y))\n assert_eq(da.logaddexp2(a, b), np.logaddexp2(x, y))\n assert_eq(da.exp(b), np.exp(y))\n assert_eq(da.log(a), np.log(x))\n assert_eq(da.log10(a), np.log10(x))\n assert_eq(da.log1p(a), np.log1p(x))\n assert_eq(da.expm1(b), np.expm1(y))\n assert_eq(da.sqrt(a), np.sqrt(x))\n assert_eq(da.square(a), np.square(x))\n\n assert_eq(da.sin(a), np.sin(x))\n assert_eq(da.cos(b), np.cos(y))\n assert_eq(da.tan(a), np.tan(x))\n assert_eq(da.arcsin(b / 10), np.arcsin(y / 10))\n assert_eq(da.arccos(b / 10), np.arccos(y / 10))\n assert_eq(da.arctan(b / 10), np.arctan(y / 10))\n assert_eq(da.arctan2(b * 10, a), np.arctan2(y * 10, x))\n assert_eq(da.hypot(b, a), np.hypot(y, x))\n assert_eq(da.sinh(a), np.sinh(x))\n assert_eq(da.cosh(b), np.cosh(y))\n assert_eq(da.tanh(a), np.tanh(x))\n assert_eq(da.arcsinh(b * 10), np.arcsinh(y * 10))\n assert_eq(da.arccosh(b * 10), np.arccosh(y * 10))\n assert_eq(da.arctanh(b / 10), np.arctanh(y / 10))\n assert_eq(da.deg2rad(a), np.deg2rad(x))\n assert_eq(da.rad2deg(a), np.rad2deg(x))\n\n assert_eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))\n assert_eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))\n assert_eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))\n assert_eq(da.logical_not(a < 1), np.logical_not(x < 1))\n assert_eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))\n assert_eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))\n assert_eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))\n assert_eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))\n\n assert_eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))\n assert_eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))\n assert_eq(da.isfinite(a), np.isfinite(x))\n assert_eq(da.isinf(a), np.isinf(x))\n assert_eq(da.isnan(a), np.isnan(x))\n assert_eq(da.signbit(a - 3), np.signbit(x - 3))\n assert_eq(da.copysign(a - 3, b), np.copysign(x - 3, y))\n assert_eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))\n assert_eq(da.ldexp(c, c), np.ldexp(z, z))\n assert_eq(da.fmod(a * 12, b), np.fmod(x * 12, y))\n assert_eq(da.floor(a * 0.5), np.floor(x * 0.5))\n assert_eq(da.ceil(a), np.ceil(x))\n assert_eq(da.trunc(a / 2), np.trunc(x / 2))\n\n assert_eq(da.degrees(b), np.degrees(y))\n assert_eq(da.radians(a), np.radians(x))\n\n assert_eq(da.rint(a + 0.3), np.rint(x + 0.3))\n assert_eq(da.fix(a - 2.5), np.fix(x - 2.5))\n\n assert_eq(da.angle(a + 1j), np.angle(x + 1j))\n assert_eq(da.real(a + 1j), np.real(x + 1j))\n assert_eq((a + 1j).real, np.real(x + 1j))\n assert_eq(da.imag(a + 1j), np.imag(x + 1j))\n assert_eq((a + 1j).imag, np.imag(x + 1j))\n assert_eq(da.conj(a + 1j * b), np.conj(x + 1j * y))\n assert_eq((a + 1j * b).conj(), (x + 1j * y).conj())\n\n assert_eq(da.clip(b, 1, 4), np.clip(y, 1, 4))\n assert_eq(b.clip(1, 4), y.clip(1, 4))\n assert_eq(da.fabs(b), np.fabs(y))\n assert_eq(da.sign(b - 2), np.sign(y - 2))\n assert_eq(da.absolute(b - 2), np.absolute(y - 2))\n assert_eq(da.absolute(b - 2 + 1j), np.absolute(y - 2 + 1j))\n\n l1, l2 = da.frexp(a)\n r1, r2 = np.frexp(x)\n assert_eq(l1, r1)\n assert_eq(l2, r2)\n\n l1, l2 = da.modf(a)\n r1, r2 = np.modf(x)\n assert_eq(l1, r1)\n assert_eq(l2, r2)\n\n assert_eq(da.around(a, -1), np.around(x, -1))\n\n\ndef test_elemwise_consistent_names():\n a = da.from_array(np.arange(5, dtype=\"f4\"), chunks=(2,))\n b = da.from_array(np.arange(5, dtype=\"f4\"), chunks=(2,))\n assert same_keys(a + b, a + b)\n assert same_keys(a + 2, a + 2)\n assert same_keys(da.exp(a), da.exp(a))\n assert same_keys(da.exp(a, dtype=\"f8\"), da.exp(a, dtype=\"f8\"))\n assert same_keys(da.maximum(a, b), da.maximum(a, b))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_optimize_test_Array_normalizes_dtype.assert_isinstance_x_dtype": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_optimize_test_Array_normalizes_dtype.assert_isinstance_x_dtype", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2446, "end_line": 2513, "span_ids": ["test_size", "test_optimize", "test_Array_normalizes_dtype", "test_slicing_with_non_ndarrays", "test_itemsize", "test_getter", "test_nbytes"], "tokens": 539}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_optimize():\n x = np.arange(5).astype(\"f4\")\n a = da.from_array(x, chunks=(2,))\n expr = a[1:4] + 1\n result = optimize(expr.dask, expr.__dask_keys__())\n assert isinstance(result, dict)\n assert all(key in result for key in expr.__dask_keys__())\n\n\ndef test_slicing_with_non_ndarrays():\n class ARangeSlice:\n dtype = np.dtype(\"i8\")\n ndim = 1\n\n def __init__(self, start, stop):\n self.start = start\n self.stop = stop\n\n def __array__(self):\n return np.arange(self.start, self.stop)\n\n class ARangeSlicable:\n dtype = np.dtype(\"i8\")\n ndim = 1\n\n def __init__(self, n):\n self.n = n\n\n @property\n def shape(self):\n return (self.n,)\n\n def __getitem__(self, key):\n return ARangeSlice(key[0].start, key[0].stop)\n\n x = da.from_array(ARangeSlicable(10), chunks=(4,))\n\n assert_eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())\n\n\n@pytest.mark.filterwarnings(\"ignore:the matrix subclass\")\ndef test_getter():\n assert type(getter(np.matrix([[1]]), 0)) is np.ndarray\n assert type(getter(np.matrix([[1]]), 0, asarray=False)) is np.matrix\n assert_eq(getter([1, 2, 3, 4, 5], slice(1, 4)), np.array([2, 3, 4]))\n\n assert_eq(getter(np.arange(5), (None, slice(None, None))), np.arange(5)[None, :])\n\n\ndef test_size():\n x = da.ones((10, 2), chunks=(3, 1))\n assert x.size == np.array(x).size\n assert isinstance(x.size, int)\n\n\ndef test_nbytes():\n x = da.ones((10, 2), chunks=(3, 1))\n assert x.nbytes == np.array(x).nbytes\n\n\ndef test_itemsize():\n x = da.ones((10, 2), chunks=(3, 1))\n assert x.itemsize == 8\n\n\ndef test_Array_normalizes_dtype():\n x = da.ones((3,), chunks=(1,), dtype=int)\n assert isinstance(x.dtype, np.dtype)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_with_lock_test_asanyarray.assert_da_asanyarray_dx_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_from_array_with_lock_test_asanyarray.assert_da_asanyarray_dx_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2516, "end_line": 2770, "span_ids": ["MyArray.__init__", "test_asarray", "test_from_array_ndarray_onechunk", "MyArray.__getitem__", "test_from_array_ndarray_getitem", "test_asarray_chunks", "test_from_array_scalar", "test_asarray_h5py", "test_from_array_with_lock", "test_from_array_tasks_always_call_getter", "test_from_array_list", "MyArray", "test_asanyarray", "test_asarray_dask_dataframe", "test_from_array_copy", "test_from_array_getitem", "test_from_array_minus_one", "test_from_array_dask_array", "test_from_array_dask_collection_warns", "test_from_array_no_asarray", "test_from_array_inline"], "tokens": 2124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"inline_array\", [True, False])\ndef test_from_array_with_lock(inline_array):\n x = np.arange(10)\n\n class FussyLock(SerializableLock):\n def acquire(self, blocking=True, timeout=-1):\n if self.locked():\n raise RuntimeError(\"I am locked\")\n return super().acquire(blocking, timeout)\n\n lock = FussyLock()\n d = da.from_array(x, chunks=5, lock=lock, inline_array=inline_array)\n\n lock.acquire()\n with pytest.raises(RuntimeError):\n d.compute()\n\n lock.release()\n assert_eq(d, x)\n\n lock = CounterLock()\n e = da.from_array(x, chunks=5, lock=lock, inline_array=inline_array)\n\n assert_eq(e, x)\n # Note: the specific counts for composite arithmetic operations can vary\n # significantly based on the complexity of the computation, whether we are inlining,\n # and optimization fusion settings. But for this simple comparison it seems pretty\n # stable.\n assert lock.release_count == 2\n assert lock.acquire_count == 2\n\n\nclass MyArray:\n def __init__(self, x):\n self.x = x\n self.dtype = x.dtype\n self.shape = x.shape\n self.ndim = len(x.shape)\n\n def __getitem__(self, i):\n return self.x[i]\n\n\n@pytest.mark.parametrize(\n \"x,chunks\",\n [\n (np.arange(25).reshape((5, 5)), (5, 5)),\n (np.arange(25).reshape((5, 5)), -1),\n (np.array([[1]]), 1),\n (np.array(1), 1),\n ],\n)\n@pytest.mark.parametrize(\"inline_array\", [True, False])\ndef test_from_array_tasks_always_call_getter(x, chunks, inline_array):\n dx = da.from_array(\n MyArray(x), chunks=chunks, asarray=False, inline_array=inline_array\n )\n assert_eq(x, dx)\n\n\ndef test_from_array_ndarray_onechunk():\n \"\"\"ndarray with a single chunk produces a minimal single key dict\"\"\"\n x = np.array([[1, 2], [3, 4]])\n dx = da.from_array(x, chunks=-1)\n assert_eq(x, dx)\n assert len(dx.dask) == 1\n assert dx.dask[dx.name, 0, 0] is x\n\n\ndef test_from_array_ndarray_getitem():\n \"\"\"For ndarray, don't use getter / getter_nofancy; use the cleaner\n operator.getitem\"\"\"\n x = np.array([[1, 2], [3, 4]])\n dx = da.from_array(x, chunks=(1, 2))\n assert_eq(x, dx)\n assert (dx.dask[dx.name, 0, 0] == np.array([[1, 2]])).all()\n\n\n@pytest.mark.parametrize(\"x\", [[1, 2], (1, 2), memoryview(b\"abc\")])\ndef test_from_array_list(x):\n \"\"\"Lists, tuples, and memoryviews are automatically converted to ndarray\"\"\"\n dx = da.from_array(x, chunks=-1)\n assert_eq(np.array(x), dx)\n assert isinstance(dx.dask[dx.name, 0], np.ndarray)\n\n dx = da.from_array(x, chunks=1)\n assert_eq(np.array(x), dx)\n assert dx.dask[dx.name, 0][0] == x[0]\n\n\n# On MacOS Python 3.9, the order of the np.ScalarType tuple randomly changes across\n# interpreter restarts, thus causing pytest-xdist failures; setting PYTHONHASHSEED does\n# not help\n@pytest.mark.parametrize(\n \"type_\", sorted((t for t in np.ScalarType if t is not memoryview), key=str)\n)\ndef test_from_array_scalar(type_):\n \"\"\"Python and numpy scalars are automatically converted to ndarray\"\"\"\n if type_ == np.datetime64:\n x = np.datetime64(\"2000-01-01\")\n else:\n x = type_(1)\n\n dx = da.from_array(x, chunks=-1)\n assert_eq(np.array(x), dx)\n assert isinstance(\n dx.dask[\n dx.name,\n ],\n np.ndarray,\n )\n\n\n@pytest.mark.parametrize(\"asarray,cls\", [(True, np.ndarray), (False, np.matrix)])\n@pytest.mark.parametrize(\"inline_array\", [True, False])\n@pytest.mark.filterwarnings(\"ignore:the matrix subclass\")\ndef test_from_array_no_asarray(asarray, cls, inline_array):\n def assert_chunks_are_of_type(x):\n chunks = compute_as_if_collection(Array, x.dask, x.__dask_keys__())\n # If it's a tuple of tuples we want to concat, but if it's a tuple\n # of 1d arrays, we just want to iterate directly\n for c in concat(chunks) if isinstance(chunks[0], tuple) else chunks:\n assert type(c) is cls\n\n x = np.matrix(np.arange(100).reshape((10, 10)))\n dx = da.from_array(x, chunks=(5, 5), asarray=asarray, inline_array=inline_array)\n assert_chunks_are_of_type(dx)\n assert_chunks_are_of_type(dx[0:5])\n assert_chunks_are_of_type(dx[0:5][:, 0])\n\n\ndef test_from_array_getitem():\n x = np.arange(10)\n\n def my_getitem(x, ind):\n return x[ind]\n\n y = da.from_array(x, chunks=(5,), getitem=my_getitem)\n\n for k, v in y.dask.items():\n if isinstance(v, tuple):\n assert v[0] is my_getitem\n\n assert_eq(x, y)\n\n\ndef test_from_array_minus_one():\n x = np.arange(10)\n y = da.from_array(x, -1)\n assert y.chunks == ((10,),)\n assert_eq(x, y)\n\n\ndef test_from_array_copy():\n # Regression test for https://github.com/dask/dask/issues/3751\n x = np.arange(10)\n y = da.from_array(x, -1)\n assert y.npartitions == 1\n y_c = y.copy()\n assert y is not y_c\n assert y.compute() is not y_c.compute()\n\n\ndef test_from_array_dask_array():\n x = np.array([[1, 2], [3, 4]])\n dx = da.from_array(x, chunks=(1, 2))\n with pytest.raises(ValueError):\n da.from_array(dx)\n\n\ndef test_from_array_dask_collection_warns():\n class CustomCollection(np.ndarray):\n def __dask_graph__(self):\n return {\"bar\": 1}\n\n x = CustomCollection([1, 2, 3])\n with pytest.warns(UserWarning):\n da.from_array(x)\n\n # Ensure da.array warns too\n with pytest.warns(UserWarning):\n da.array(x)\n\n\ndef test_from_array_inline():\n class MyArray(np.ndarray):\n pass\n\n a = np.array([1, 2, 3]).view(MyArray)\n dsk = dict(da.from_array(a, name=\"my-array\", inline_array=False).dask)\n assert dsk[\"original-my-array\"] is a\n\n dsk = dict(da.from_array(a, name=\"my-array\", inline_array=True).dask)\n assert \"original-my-array\" not in dsk\n\n\n@pytest.mark.parametrize(\"asarray\", [da.asarray, da.asanyarray])\ndef test_asarray(asarray):\n assert_eq(asarray([1, 2, 3]), np.asarray([1, 2, 3]))\n\n x = asarray([1, 2, 3])\n assert asarray(x) is x\n\n y = [x[0], 2, x[2]]\n assert_eq(asarray(y), x)\n\n\n@pytest.mark.parametrize(\"asarray\", [da.asarray, da.asanyarray])\ndef test_asarray_dask_dataframe(asarray):\n # https://github.com/dask/dask/issues/3885\n dd = pytest.importorskip(\"dask.dataframe\")\n import pandas as pd\n\n s = dd.from_pandas(pd.Series([1, 2, 3, 4]), 2)\n result = asarray(s)\n expected = s.values\n assert_eq(result, expected)\n\n df = s.to_frame(name=\"s\")\n result = asarray(df)\n expected = df.values\n assert_eq(result, expected)\n\n\n@pytest.mark.parametrize(\"asarray\", [da.asarray, da.asanyarray])\n@pytest.mark.parametrize(\"inline_array\", [True, False])\ndef test_asarray_h5py(asarray, inline_array):\n h5py = pytest.importorskip(\"h5py\")\n\n with tmpfile(\".hdf5\") as fn:\n with h5py.File(fn, mode=\"a\") as f:\n d = f.create_dataset(\"/x\", shape=(2, 2), dtype=float)\n x = asarray(d, inline_array=inline_array)\n\n # Check for the array in the dsk\n dsk = dict(x.dask)\n assert (d in dsk.values()) is not inline_array\n assert not any(isinstance(v, np.ndarray) for v in dsk.values())\n\n\ndef test_asarray_chunks():\n with dask.config.set({\"array.chunk-size\": \"100 B\"}):\n x = np.ones(1000)\n d = da.asarray(x)\n assert d.npartitions > 1\n\n\n@pytest.mark.filterwarnings(\"ignore:the matrix subclass\")\ndef test_asanyarray():\n x = np.matrix([1, 2, 3])\n dx = da.asanyarray(x)\n assert dx.numblocks == (1, 1)\n chunks = compute_as_if_collection(Array, dx.dask, dx.__dask_keys__())\n assert isinstance(chunks[0][0], np.matrix)\n assert da.asanyarray(dx) is dx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asanyarray_dataframe_test_from_func.assert_same_keys_d_from_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_array_core.py_test_asanyarray_dataframe_test_from_func.assert_same_keys_d_from_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_array_core.py", "file_name": "test_array_core.py", "file_type": "text/x-python", "category": "test", "start_line": 2773, "end_line": 2808, "span_ids": ["test_asanyarray_dataframe", "test_asanyarray_datetime64", "test_from_func"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_asanyarray_dataframe():\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n df = pd.DataFrame({\"x\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n x = np.asanyarray(df)\n dx = da.asanyarray(ddf)\n assert isinstance(dx, da.Array)\n\n assert_eq(x, dx)\n\n x = np.asanyarray(df.x)\n dx = da.asanyarray(ddf.x)\n assert isinstance(dx, da.Array)\n\n assert_eq(x, dx)\n\n\ndef test_asanyarray_datetime64():\n x = np.array([\"2000-01-01\"], dtype=\"datetime64\")\n dx = da.asanyarray(x)\n assert isinstance(dx, da.Array)\n assert_eq(x, dx)\n\n\ndef test_from_func():\n x = np.arange(10)\n f = lambda n: n * x\n d = from_func(f, (10,), x.dtype, kwargs={\"n\": 2})\n\n assert d.shape == x.shape\n assert d.dtype == x.dtype\n assert_eq(d, 2 * x)\n assert same_keys(d, from_func(f, (10,), x.dtype, kwargs={\"n\": 2}))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_reductions.py_warnings_test_arg_reductions.assert_eq_dfunc_a2_0_sp": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_cupy_reductions.py_warnings_test_arg_reductions.assert_eq_dfunc_a2_0_sp", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_cupy_reductions.py", "file_name": "test_cupy_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 47, "span_ids": ["imports", "test_arg_reductions"], "tokens": 420}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport numpy as np\nimport pytest\n\npytestmark = pytest.mark.gpu\n\nimport dask\nimport dask.array as da\nfrom dask.array.numpy_compat import _numpy_120\nfrom dask.array.utils import assert_eq\n\ncupy = pytest.importorskip(\"cupy\")\n\n\n@pytest.mark.skipif(not _numpy_120, reason=\"NEP-35 is not available\")\n@pytest.mark.parametrize(\n [\"dfunc\", \"func\"],\n [\n (da.argmin, np.argmin),\n (da.argmax, np.argmax),\n (da.nanargmin, np.nanargmin),\n (da.nanargmax, np.nanargmax),\n ],\n)\ndef test_arg_reductions(dfunc, func):\n x = cupy.random.random((10, 10, 10))\n a = da.from_array(x, chunks=(3, 4, 5))\n\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n assert_eq(dfunc(a, 1), func(x, 1))\n assert_eq(dfunc(a, 2), func(x, 2))\n with dask.config.set(split_every=2):\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n assert_eq(dfunc(a, 1), func(x, 1))\n assert_eq(dfunc(a, 2), func(x, 2))\n\n pytest.raises(ValueError, lambda: dfunc(a, 3))\n pytest.raises(TypeError, lambda: dfunc(a, (0, 1)))\n\n x2 = cupy.arange(10)\n a2 = da.from_array(x2, chunks=3)\n assert_eq(dfunc(a2), func(x2))\n assert_eq(dfunc(a2, 0), func(x2, 0))\n assert_eq(dfunc(a2, 0, split_every=2), func(x2, 0))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_2d_test_reduction_2d_test.if_split_every_.None_8": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_reduction_2d_test_reduction_2d_test.if_split_every_.None_8", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 132, "end_line": 180, "span_ids": ["reduction_2d_test"], "tokens": 670}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reduction_2d_test(da_func, darr, np_func, narr, use_dtype=True, split_every=True):\n assert_eq(da_func(darr), np_func(narr))\n assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))\n assert_eq(da_func(darr, axis=()), np_func(narr, axis=()))\n assert_eq(da_func(darr, axis=0), np_func(narr, axis=0))\n assert_eq(da_func(darr, axis=1), np_func(narr, axis=1))\n assert_eq(da_func(darr, axis=-1), np_func(narr, axis=-1))\n assert_eq(da_func(darr, axis=-2), np_func(narr, axis=-2))\n assert_eq(\n da_func(darr, axis=1, keepdims=True), np_func(narr, axis=1, keepdims=True)\n )\n assert_eq(\n da_func(darr, axis=(), keepdims=True), np_func(narr, axis=(), keepdims=True)\n )\n assert_eq(da_func(darr, axis=(1, 0)), np_func(narr, axis=(1, 0)))\n\n assert same_keys(da_func(darr, axis=()), da_func(darr, axis=()))\n assert same_keys(da_func(darr, axis=1), da_func(darr, axis=1))\n assert same_keys(da_func(darr, axis=(1, 0)), da_func(darr, axis=(1, 0)))\n\n if use_dtype:\n assert_eq(da_func(darr, dtype=\"f8\"), np_func(narr, dtype=\"f8\"))\n assert_eq(da_func(darr, dtype=\"i8\"), np_func(narr, dtype=\"i8\"))\n\n if split_every:\n a1 = da_func(darr, split_every=4)\n a2 = da_func(darr, split_every={0: 2, 1: 2})\n assert same_keys(a1, a2)\n assert_eq(a1, np_func(narr))\n assert_eq(a2, np_func(narr))\n assert_eq(\n da_func(darr, keepdims=True, split_every=4),\n np_func(narr, keepdims=True),\n )\n assert_eq(da_func(darr, axis=(), split_every=2), np_func(narr, axis=()))\n assert_eq(da_func(darr, axis=0, split_every=2), np_func(narr, axis=0))\n assert_eq(\n da_func(darr, axis=(), keepdims=True, split_every=2),\n np_func(narr, axis=(), keepdims=True),\n )\n assert_eq(\n da_func(darr, axis=0, keepdims=True, split_every=2),\n np_func(narr, axis=0, keepdims=True),\n )\n assert_eq(da_func(darr, axis=1, split_every=2), np_func(narr, axis=1))\n assert_eq(\n da_func(darr, axis=1, keepdims=True, split_every=2),\n np_func(narr, axis=1, keepdims=True),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nanarg_reductions_test_nanarg_reductions.with_warnings_catch_warni.None_2.dfunc_a_compute_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_nanarg_reductions_test_nanarg_reductions.with_warnings_catch_warni.None_2.dfunc_a_compute_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 253, "end_line": 274, "span_ids": ["test_nanarg_reductions"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n [\"dfunc\", \"func\"], [(da.nanargmin, np.nanargmin), (da.nanargmax, np.nanargmax)]\n)\ndef test_nanarg_reductions(dfunc, func):\n\n x = np.random.random((10, 10, 10))\n x[5] = np.nan\n a = da.from_array(x, chunks=(3, 4, 5))\n assert_eq(dfunc(a), func(x))\n assert_eq(dfunc(a, 0), func(x, 0))\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning) # All-NaN slice encountered\n with pytest.raises(ValueError):\n dfunc(a, 1).compute()\n\n with pytest.raises(ValueError):\n dfunc(a, 2).compute()\n\n x[:] = np.nan\n a = da.from_array(x, chunks=(3, 4, 5))\n with pytest.raises(ValueError):\n dfunc(a).compute()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_2D_nans_test_reductions_2D_nans.with_warnings_catch_warni.None_17": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_reductions.py_test_reductions_2D_nans_test_reductions_2D_nans.with_warnings_catch_warni.None_17", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_reductions.py", "file_name": "test_reductions.py", "file_type": "text/x-python", "category": "test", "start_line": 308, "end_line": 350, "span_ids": ["test_reductions_2D_nans"], "tokens": 678}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reductions_2D_nans():\n # chunks are a mix of some/all/no NaNs\n x = np.full((4, 4), np.nan)\n x[:2, :2] = np.array([[1, 2], [3, 4]])\n x[2, 2] = 5\n x[3, 3] = 6\n a = da.from_array(x, chunks=(2, 2))\n\n reduction_2d_test(da.sum, a, np.sum, x, False, False)\n reduction_2d_test(da.prod, a, np.prod, x, False, False)\n reduction_2d_test(da.mean, a, np.mean, x, False, False)\n reduction_2d_test(da.var, a, np.var, x, False, False)\n reduction_2d_test(da.std, a, np.std, x, False, False)\n reduction_2d_test(da.min, a, np.min, x, False, False)\n reduction_2d_test(da.max, a, np.max, x, False, False)\n reduction_2d_test(da.any, a, np.any, x, False, False)\n reduction_2d_test(da.all, a, np.all, x, False, False)\n\n reduction_2d_test(da.nansum, a, np.nansum, x, False, False)\n reduction_2d_test(da.nanprod, a, np.nanprod, x, False, False)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n reduction_2d_test(da.nanmean, a, np.nanmean, x, False, False)\n reduction_2d_test(da.nanvar, a, np.nanvar, x, False, False)\n reduction_2d_test(da.nanstd, a, np.nanstd, x, False, False)\n reduction_2d_test(da.nanmin, a, np.nanmin, x, False, False)\n reduction_2d_test(da.nanmax, a, np.nanmax, x, False, False)\n\n assert_eq(da.argmax(a), np.argmax(x))\n assert_eq(da.argmin(a), np.argmin(x))\n assert_eq(da.nanargmax(a), np.nanargmax(x))\n assert_eq(da.nanargmin(a), np.nanargmin(x))\n\n assert_eq(da.argmax(a, axis=0), np.argmax(x, axis=0))\n assert_eq(da.argmin(a, axis=0), np.argmin(x, axis=0))\n assert_eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))\n assert_eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))\n\n assert_eq(da.argmax(a, axis=1), np.argmax(x, axis=1))\n assert_eq(da.argmin(a, axis=1), np.argmin(x, axis=1))\n assert_eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))\n assert_eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_test_einsum.with_warnings_catch_warni.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_routines.py_test_einsum_test_einsum.with_warnings_catch_warni.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_routines.py", "file_name": "test_routines.py", "file_type": "text/x-python", "category": "test", "start_line": 2359, "end_line": 2403, "span_ids": ["test_einsum"], "tokens": 368}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"einsum_signature\",\n [\n \"abc,bad->abcd\",\n \"abcdef,bcdfg->abcdeg\",\n \"ea,fb,abcd,gc,hd->efgh\",\n \"ab,b\",\n \"aa\",\n \"a,a->\",\n \"a,a->a\",\n \"a,a\",\n \"a,b\",\n \"a,b,c\",\n \"a\",\n \"ba,b\",\n \"ba,b->\",\n \"defab,fedbc->defac\",\n \"ab...,bc...->ac...\",\n \"a...a\",\n \"abc...->cba...\",\n \"...ab->...a\",\n \"a...a->a...\",\n # Following 2 from # https://stackoverflow.com/a/19203475/1611416\n \"...abc,...abcd->...d\",\n \"ab...,b->ab...\",\n # https://github.com/dask/dask/pull/3412#discussion_r182413444\n \"aa->a\",\n \"ab,ab,c->c\",\n \"aab,bc->ac\",\n \"aab,bcc->ac\",\n \"fdf,cdd,ccd,afe->ae\",\n \"fff,fae,bef,def->abd\",\n ],\n)\ndef test_einsum(einsum_signature):\n input_sigs = einsum_signature.split(\"->\")[0].replace(\"...\", \"*\").split(\",\")\n\n np_inputs, da_inputs = _numpy_and_dask_inputs(input_sigs)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=da.PerformanceWarning)\n assert_eq(\n np.einsum(einsum_signature, *np_inputs),\n da.einsum(einsum_signature, *da_inputs),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_warnings_test_measures.assert_isinstance_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/array/tests/test_stats.py_warnings_test_measures.assert_isinstance_result_", "embedding": null, "metadata": {"file_path": "dask/array/tests/test_stats.py", "file_name": "test_stats.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 34, "span_ids": ["imports", "test_measures"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport pytest\n\nscipy = pytest.importorskip(\"scipy\")\nimport numpy as np\n\nimport dask.array as da\nimport dask.array.stats\nfrom dask.array.utils import allclose, assert_eq\nfrom dask.delayed import Delayed\n\n\n@pytest.mark.parametrize(\n \"kind, kwargs\", [(\"skew\", {}), (\"kurtosis\", {}), (\"kurtosis\", {\"fisher\": False})]\n)\n@pytest.mark.parametrize(\"single_dim\", [True, False])\ndef test_measures(kind, kwargs, single_dim):\n np.random.seed(seed=1337)\n if single_dim:\n x = np.random.random(size=(30,))\n else:\n x = np.random.random(size=(30, 2))\n y = da.from_array(x, 3)\n dfunc = getattr(dask.array.stats, kind)\n sfunc = getattr(scipy.stats, kind)\n\n expected = sfunc(x, **kwargs)\n result = dfunc(y, **kwargs)\n if np.isscalar(expected):\n # make it an array to account for possible numeric errors\n expected = np.array(expected)\n assert_eq(result, expected)\n assert isinstance(result, da.Array)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_npartitions_warn_test_read_text.pytest_raises_ValueError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_take_npartitions_warn_test_read_text.pytest_raises_ValueError_", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 599, "end_line": 640, "span_ids": ["test_map_is_lazy", "test_from_url", "test_take_npartitions_warn", "test_can_use_dict_to_make_concrete", "test_read_text"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_take_npartitions_warn():\n # Use single-threaded scheduler so warnings are properly captured in the\n # same process\n with dask.config.set(scheduler=\"sync\"):\n with pytest.warns(UserWarning):\n b.take(100)\n\n with pytest.warns(UserWarning):\n b.take(7)\n\n with warnings.catch_warnings(record=True) as record:\n b.take(7, npartitions=2)\n b.take(7, warn=False)\n assert not record\n\n\ndef test_map_is_lazy():\n assert isinstance(map(lambda x: x, [1, 2, 3]), Iterator)\n\n\ndef test_can_use_dict_to_make_concrete():\n assert isinstance(dict(b.frequencies()), dict)\n\n\n@pytest.mark.slow\n@pytest.mark.network\n@pytest.mark.skip(reason=\"Hangs\")\ndef test_from_url():\n a = db.from_url([\"http://google.com\", \"http://github.com\"])\n assert a.npartitions == 2\n\n b = db.from_url(\"http://raw.githubusercontent.com/dask/dask/main/README.rst\")\n assert b.npartitions == 1\n assert b\"Dask\\n\" in b.take(10)\n\n\ndef test_read_text():\n with filetexts({\"a1.log\": \"A\\nB\", \"a2.log\": \"C\\nD\"}) as fns:\n assert {line.strip() for line in db.read_text(fns)} == set(\"ABCD\")\n assert {line.strip() for line in db.read_text(\"a*.log\")} == set(\"ABCD\")\n\n pytest.raises(ValueError, lambda: db.read_text(\"non-existent-*-path\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_warn_test_to_textfiles_name_function_warn.with_tmpdir_as_dn_.a_to_textfiles_dn_name_f": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/bag/tests/test_bag.py_test_to_textfiles_name_function_warn_test_to_textfiles_name_function_warn.with_tmpdir_as_dn_.a_to_textfiles_dn_name_f", "embedding": null, "metadata": {"file_path": "dask/bag/tests/test_bag.py", "file_name": "test_bag.py", "file_type": "text/x-python", "category": "test", "start_line": 942, "end_line": 963, "span_ids": ["test_to_textfiles_name_function_warn"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_textfiles_name_function_warn():\n seq = [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ]\n a = db.from_sequence(seq, npartitions=16)\n with tmpdir() as dn:\n a.to_textfiles(dn, name_function=str)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_from___future___import_an_maybe_wrap_pandas.return.x": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_from___future___import_an_maybe_wrap_pandas.return.x", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 42, "span_ids": ["imports", "_bind_property", "maybe_wrap_pandas", "_bind_method"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import annotations\n\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..utils import derived_from\n\n\ndef _bind_method(cls, pd_cls, attr):\n def func(self, *args, **kwargs):\n return self._function_map(attr, *args, **kwargs)\n\n func.__name__ = attr\n func.__qualname__ = f\"{cls.__name__}.{attr}\"\n try:\n func.__wrapped__ = getattr(pd_cls, attr)\n except Exception:\n pass\n setattr(cls, attr, derived_from(pd_cls)(func))\n\n\ndef _bind_property(cls, pd_cls, attr):\n def func(self):\n return self._property_map(attr)\n\n func.__name__ = attr\n func.__qualname__ = f\"{cls.__name__}.{attr}\"\n try:\n func.__wrapped__ = getattr(pd_cls, attr)\n except Exception:\n pass\n setattr(cls, attr, property(derived_from(pd_cls)(func)))\n\n\ndef maybe_wrap_pandas(obj, x):\n if isinstance(x, np.ndarray):\n if isinstance(obj, pd.Series):\n return pd.Series(x, index=obj.index, dtype=x.dtype)\n return pd.Index(x)\n return x", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor_Accessor._property_map.return.self__series_map_partitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_Accessor_Accessor._property_map.return.self__series_map_partitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 95, "span_ids": ["Accessor", "Accessor._delegate_property", "Accessor.__init__", "Accessor._delegate_method", "Accessor.__init_subclass__", "Accessor._property_map"], "tokens": 414}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Accessor:\n \"\"\"\n Base class for pandas Accessor objects cat, dt, and str.\n\n Notes\n -----\n Subclasses should define ``_accessor_name``, ``_accessor_methods``, and\n ``_accessor_properties``.\n \"\"\"\n\n def __init__(self, series):\n from .core import Series\n\n if not isinstance(series, Series):\n raise ValueError(\"Accessor cannot be initialized\")\n\n series_meta = series._meta\n if hasattr(series_meta, \"to_series\"): # is index-like\n series_meta = series_meta.to_series()\n meta = getattr(series_meta, self._accessor_name)\n\n self._meta = meta\n self._series = series\n\n def __init_subclass__(cls, **kwargs):\n \"\"\"Bind all auto-generated methods & properties\"\"\"\n super().__init_subclass__(**kwargs)\n pd_cls = getattr(pd.Series, cls._accessor_name)\n for attr in cls._accessor_methods:\n if not hasattr(cls, attr):\n _bind_method(cls, pd_cls, attr)\n for attr in cls._accessor_properties:\n if not hasattr(cls, attr):\n _bind_property(cls, pd_cls, attr)\n\n @staticmethod\n def _delegate_property(obj, accessor, attr):\n out = getattr(getattr(obj, accessor, obj), attr)\n return maybe_wrap_pandas(obj, out)\n\n @staticmethod\n def _delegate_method(obj, accessor, attr, args, kwargs):\n out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)\n return maybe_wrap_pandas(obj, out)\n\n def _property_map(self, attr):\n meta = self._delegate_property(self._series._meta, self._accessor_name, attr)\n token = f\"{self._accessor_name}-{attr}\"\n return self._series.map_partitions(\n self._delegate_property, self._accessor_name, attr, token=token, meta=meta\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_DatetimeAccessor_DatetimeAccessor._accessor_properties._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_DatetimeAccessor_DatetimeAccessor._accessor_properties._", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 116, "end_line": 185, "span_ids": ["DatetimeAccessor"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DatetimeAccessor(Accessor):\n \"\"\"Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n\n >>> s.dt.microsecond # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"dt\"\n\n _accessor_methods = (\n \"asfreq\",\n \"ceil\",\n \"day_name\",\n \"floor\",\n \"isocalendar\",\n \"month_name\",\n \"normalize\",\n \"round\",\n \"strftime\",\n \"to_period\",\n \"to_pydatetime\",\n \"to_pytimedelta\",\n \"to_timestamp\",\n \"total_seconds\",\n \"tz_convert\",\n \"tz_localize\",\n )\n\n _accessor_properties = (\n \"components\",\n \"date\",\n \"day\",\n \"day_of_week\",\n \"day_of_year\",\n \"dayofweek\",\n \"dayofyear\",\n \"days\",\n \"days_in_month\",\n \"daysinmonth\",\n \"end_time\",\n \"freq\",\n \"hour\",\n \"is_leap_year\",\n \"is_month_end\",\n \"is_month_start\",\n \"is_quarter_end\",\n \"is_quarter_start\",\n \"is_year_end\",\n \"is_year_start\",\n \"microsecond\",\n \"microseconds\",\n \"minute\",\n \"month\",\n \"nanosecond\",\n \"nanoseconds\",\n \"quarter\",\n \"qyear\",\n \"second\",\n \"seconds\",\n \"start_time\",\n \"time\",\n \"timetz\",\n \"tz\",\n \"week\",\n \"weekday\",\n \"weekofyear\",\n \"year\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor_StringAccessor._accessor_properties._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor_StringAccessor._accessor_properties._", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 188, "end_line": 250, "span_ids": ["StringAccessor"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StringAccessor(Accessor):\n \"\"\"Accessor object for string properties of the Series values.\n\n Examples\n --------\n\n >>> s.str.lower() # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"str\"\n\n _accessor_methods = (\n \"capitalize\",\n \"casefold\",\n \"center\",\n \"contains\",\n \"count\",\n \"decode\",\n \"encode\",\n \"endswith\",\n \"extract\",\n \"find\",\n \"findall\",\n \"fullmatch\",\n \"get\",\n \"index\",\n \"isalnum\",\n \"isalpha\",\n \"isdecimal\",\n \"isdigit\",\n \"islower\",\n \"isnumeric\",\n \"isspace\",\n \"istitle\",\n \"isupper\",\n \"join\",\n \"len\",\n \"ljust\",\n \"lower\",\n \"lstrip\",\n \"match\",\n \"normalize\",\n \"pad\",\n \"partition\",\n \"repeat\",\n \"replace\",\n \"rfind\",\n \"rindex\",\n \"rjust\",\n \"rpartition\",\n \"rstrip\",\n \"slice\",\n \"slice_replace\",\n \"startswith\",\n \"strip\",\n \"swapcase\",\n \"title\",\n \"translate\",\n \"upper\",\n \"wrap\",\n \"zfill\",\n )\n _accessor_properties = ()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor._split_StringAccessor.rsplit.return.self__split_rsplit_pat": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor._split_StringAccessor.rsplit.return.self__split_rsplit_pat", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 252, "end_line": 277, "span_ids": ["StringAccessor.split", "StringAccessor._split", "StringAccessor.rsplit"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StringAccessor(Accessor):\n\n def _split(self, method, pat=None, n=-1, expand=False):\n if expand:\n if n == -1:\n raise NotImplementedError(\n \"To use the expand parameter you must specify the number of \"\n \"expected splits with the n= parameter. Usually n splits \"\n \"result in n+1 output columns.\"\n )\n else:\n delimiter = \" \" if pat is None else pat\n meta = self._series._meta._constructor(\n [delimiter.join([\"a\"] * (n + 1))],\n index=self._series._meta_nonempty[:1].index,\n )\n meta = getattr(meta.str, method)(n=n, expand=expand, pat=pat)\n else:\n meta = (self._series.name, object)\n return self._function_map(method, pat=pat, n=n, expand=expand, meta=meta)\n\n @derived_from(pd.core.strings.StringMethods)\n def split(self, pat=None, n=-1, expand=False):\n return self._split(\"split\", pat=pat, n=n, expand=expand)\n\n @derived_from(pd.core.strings.StringMethods)\n def rsplit(self, pat=None, n=-1, expand=False):\n return self._split(\"rsplit\", pat=pat, n=n, expand=expand)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor.cat_StringAccessor.__getitem__.return.self__series_map_partitio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_StringAccessor.cat_StringAccessor.__getitem__.return.self__series_map_partitio", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 279, "end_line": 311, "span_ids": ["StringAccessor.cat", "StringAccessor.__getitem__", "StringAccessor.extractall"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StringAccessor(Accessor):\n\n @derived_from(pd.core.strings.StringMethods)\n def cat(self, others=None, sep=None, na_rep=None):\n from .core import Index, Series\n\n if others is None:\n\n def str_cat_none(x):\n\n if isinstance(x, (Series, Index)):\n x = x.compute()\n\n return x.str.cat(sep=sep, na_rep=na_rep)\n\n return self._series.reduction(chunk=str_cat_none, aggregate=str_cat_none)\n\n valid_types = (Series, Index, pd.Series, pd.Index)\n if isinstance(others, valid_types):\n others = [others]\n elif not all(isinstance(a, valid_types) for a in others):\n raise TypeError(\"others must be Series/Index\")\n\n return self._series.map_partitions(\n str_cat, *others, sep=sep, na_rep=na_rep, meta=self._series._meta\n )\n\n @derived_from(pd.core.strings.StringMethods)\n def extractall(self, pat, flags=0):\n return self._series.map_partitions(\n str_extractall, pat, flags, token=\"str-extractall\"\n )\n\n def __getitem__(self, index):\n return self._series.map_partitions(str_get, index, meta=self._series._meta)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_str_extractall_CachedAccessor.__init__.self._accessor.accessor": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_str_extractall_CachedAccessor.__init__.self._accessor.accessor", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 314, "end_line": 345, "span_ids": ["CachedAccessor", "str_get", "str_extractall", "str_cat", "CachedAccessor.__init__"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def str_extractall(series, pat, flags):\n return series.str.extractall(pat, flags=flags)\n\n\ndef str_get(series, index):\n \"\"\"Implements series.str[index]\"\"\"\n return series.str[index]\n\n\ndef str_cat(self, *others, **kwargs):\n return self.str.cat(others=others, **kwargs)\n\n\n# Ported from pandas\n# https://github.com/pandas-dev/pandas/blob/master/pandas/core/accessor.py\nclass CachedAccessor:\n \"\"\"\n Custom property-like object (descriptor) for caching accessors.\n\n Parameters\n ----------\n name : str\n The namespace this will be accessed under, e.g. ``df.foo``\n accessor : cls\n The class with the extension methods. The class' __init__ method\n should expect one of a ``Series``, ``DataFrame`` or ``Index`` as\n the single argument ``data``\n \"\"\"\n\n def __init__(self, name, accessor):\n self._name = name\n self._accessor = accessor", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_CachedAccessor.__get___CachedAccessor.__get__.return.accessor_obj": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_CachedAccessor.__get___CachedAccessor.__get__.return.accessor_obj", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 347, "end_line": 357, "span_ids": ["CachedAccessor.__get__"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CachedAccessor:\n\n def __get__(self, obj, cls):\n if obj is None:\n # we're accessing the attribute of the class, i.e., Dataset.geo\n return self._accessor\n accessor_obj = self._accessor(obj)\n # Replace the property with the accessor object. Inspired by:\n # http://www.pydanny.com/cached-property.html\n # We need to use object.__setattr__ because we overwrite __setattr__ on\n # NDFrame\n object.__setattr__(obj, self._name, accessor_obj)\n return accessor_obj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py__register_accessor__register_accessor.return.decorator": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py__register_accessor__register_accessor.return.decorator", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 360, "end_line": 374, "span_ids": ["_register_accessor"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _register_accessor(name, cls):\n def decorator(accessor):\n if hasattr(cls, name):\n warnings.warn(\n \"registration of accessor {!r} under name {!r} for type \"\n \"{!r} is overriding a preexisting attribute with the same \"\n \"name.\".format(accessor, name, cls),\n UserWarning,\n stacklevel=2,\n )\n setattr(cls, name, CachedAccessor(name, accessor))\n cls._accessors.add(name)\n return accessor\n\n return decorator", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_register_dataframe_accessor_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/accessor.py_register_dataframe_accessor_", "embedding": null, "metadata": {"file_path": "dask/dataframe/accessor.py", "file_name": "accessor.py", "file_type": "text/x-python", "category": "implementation", "start_line": 377, "end_line": 408, "span_ids": ["register_index_accessor", "register_series_accessor", "register_dataframe_accessor"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def register_dataframe_accessor(name):\n \"\"\"\n Register a custom accessor on :class:`dask.dataframe.DataFrame`.\n\n See :func:`pandas.api.extensions.register_dataframe_accessor` for more.\n \"\"\"\n from dask.dataframe import DataFrame\n\n return _register_accessor(name, DataFrame)\n\n\ndef register_series_accessor(name):\n \"\"\"\n Register a custom accessor on :class:`dask.dataframe.Series`.\n\n See :func:`pandas.api.extensions.register_series_accessor` for more.\n \"\"\"\n from dask.dataframe import Series\n\n return _register_accessor(name, Series)\n\n\ndef register_index_accessor(name):\n \"\"\"\n Register a custom accessor on :class:`dask.dataframe.Index`.\n\n See :func:`pandas.api.extensions.register_index_accessor` for more.\n \"\"\"\n from dask.dataframe import Index\n\n return _register_accessor(name, Index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._create_dd_meta_ArrowDatasetEngine._create_dd_meta.if_categories_.meta.clear_known_categories_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._create_dd_meta_ArrowDatasetEngine._create_dd_meta.if_categories_.meta.clear_known_categories_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1035, "end_line": 1120, "span_ids": ["ArrowDatasetEngine._create_dd_meta"], "tokens": 705}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _create_dd_meta(cls, dataset_info):\n \"\"\"Use parquet schema and hive-partition information\n (stored in dataset_info) to construct DataFrame metadata.\n\n This method is used by both arrow engines.\n \"\"\"\n\n # Collect necessary information from dataset_info\n schema = dataset_info[\"schema\"]\n index = dataset_info[\"index\"]\n categories = dataset_info[\"categories\"]\n partition_obj = dataset_info[\"partitions\"]\n partitions = dataset_info[\"partition_names\"]\n physical_column_names = dataset_info.get(\"physical_schema\", schema).names\n columns = None\n\n # Set index and column names using\n # pandas metadata (when available)\n pandas_metadata = _get_pandas_metadata(schema)\n if pandas_metadata:\n (\n index_names,\n column_names,\n storage_name_mapping,\n column_index_names,\n ) = _parse_pandas_metadata(pandas_metadata)\n if categories is None:\n categories = []\n for col in pandas_metadata[\"columns\"]:\n if (col[\"pandas_type\"] == \"categorical\") and (\n col[\"name\"] not in categories\n ):\n categories.append(col[\"name\"])\n else:\n # No pandas metadata implies no index, unless selected by the user\n index_names = []\n column_names = physical_column_names\n storage_name_mapping = {k: k for k in column_names}\n column_index_names = [None]\n if index is None and index_names:\n # Pandas metadata has provided the index name for us\n index = index_names\n\n # Ensure that there is no overlap between partition columns\n # and explicit column storage\n if partitions:\n _partitions = [p for p in partitions if p not in physical_column_names]\n if not _partitions:\n partitions = []\n dataset_info[\"partitions\"] = None\n dataset_info[\"partition_keys\"] = {}\n dataset_info[\"partition_names\"] = partitions\n elif len(_partitions) != len(partitions):\n raise ValueError(\n \"No partition-columns should be written in the \\n\"\n \"file unless they are ALL written in the file.\\n\"\n \"physical columns: {} | partitions: {}\".format(\n physical_column_names, partitions\n )\n )\n\n column_names, index_names = _normalize_index_columns(\n columns, column_names + partitions, index, index_names\n )\n\n all_columns = index_names + column_names\n\n # Check that categories are included in columns\n if categories and not set(categories).intersection(all_columns):\n raise ValueError(\n \"categories not in available columns.\\n\"\n \"categories: {} | columns: {}\".format(categories, list(all_columns))\n )\n\n dtypes = _get_pyarrow_dtypes(schema, categories)\n dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}\n\n index_cols = index or ()\n meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)\n if categories:\n # Make sure all categories are set to \"unknown\".\n # Cannot include index names in the `cols` argument.\n meta = clear_known_categories(\n meta, cols=[c for c in categories if c not in meta.index.names]\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._create_dd_meta.if_partition_obj__ArrowDatasetEngine._create_dd_meta.return.meta": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/parquet/arrow.py_ArrowDatasetEngine._create_dd_meta.if_partition_obj__ArrowDatasetEngine._create_dd_meta.return.meta", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/parquet/arrow.py", "file_name": "arrow.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1122, "end_line": 1146, "span_ids": ["ArrowDatasetEngine._create_dd_meta"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ArrowDatasetEngine(Engine):\n\n #\n # Public Class Methods\n\n @classmethod\n def _create_dd_meta(cls, dataset_info):\n # ... other code\n\n if partition_obj:\n\n for partition in partition_obj:\n if isinstance(index, list) and partition.name == index[0]:\n # Index from directory structure\n meta.index = pd.CategoricalIndex(\n [], categories=partition.keys, name=index[0]\n )\n elif partition.name == meta.index.name:\n # Index created from a categorical column\n meta.index = pd.CategoricalIndex(\n [], categories=partition.keys, name=meta.index.name\n )\n elif partition.name in meta.columns:\n meta[partition.name] = pd.Series(\n pd.Categorical(categories=partition.keys, values=[]),\n index=meta.index,\n )\n\n # Update `dataset_info` and return `meta`\n dataset_info[\"index\"] = index\n dataset_info[\"index_cols\"] = index_cols\n dataset_info[\"categories\"] = categories\n\n return meta", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_gzip_tsv_text2._": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_gzip_tsv_text2._", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 80, "span_ids": ["parse_filename", "imports", "normalize_text", "impl:7"], "tokens": 435}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import gzip\nimport os\nimport warnings\nfrom io import BytesIO\nfrom unittest import mock\n\nimport pytest\n\npd = pytest.importorskip(\"pandas\")\ndd = pytest.importorskip(\"dask.dataframe\")\n\nfrom fsspec.compression import compr\nfrom tlz import partition_all, valmap\n\nimport dask\nfrom dask.base import compute_as_if_collection\nfrom dask.bytes.core import read_bytes\nfrom dask.bytes.utils import compress\nfrom dask.core import flatten\nfrom dask.dataframe._compat import tm\nfrom dask.dataframe.io.csv import (\n _infer_block_size,\n auto_blocksize,\n block_mask,\n pandas_read_text,\n text_blocks_to_pandas,\n)\nfrom dask.dataframe.optimize import optimize_dataframe_getitem\nfrom dask.dataframe.utils import assert_eq, has_known_categories\nfrom dask.layers import DataFrameIOLayer\nfrom dask.utils import filetext, filetexts, tmpdir, tmpfile\nfrom dask.utils_test import hlg_layer\n\n# List of available compression format for test_read_csv_compression\ncompression_fmts = [fmt for fmt in compr] + [None]\n\n\ndef normalize_text(s):\n return \"\\n\".join(map(str.strip, s.strip().split(\"\\n\")))\n\n\ndef parse_filename(path):\n return os.path.split(path)[1]\n\n\ncsv_text = \"\"\"\nname,amount\nAlice,100\nBob,-200\nCharlie,300\nDennis,400\nEdith,-500\nFrank,600\nAlice,200\nFrank,-200\nBob,600\nAlice,400\nFrank,200\nAlice,300\nEdith,600\n\"\"\".strip()\n\ntsv_text = csv_text.replace(\",\", \"\\t\")\n\ntsv_text2 = \"\"\"\nname amount\nAlice 100\nBob -200\nCharlie 300\nDennis 400\nEdith -500\nFrank 600\nAlice 200\nFrank -200\nBob 600\nAlice 400\nFrank 200\nAlice 300\nEdith 600\n\"\"\".strip()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_timeseries_tsv_units_row.csv_units_row_replace_b_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_timeseries_tsv_units_row.csv_units_row_replace_b_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 82, "end_line": 135, "span_ids": ["impl:15", "impl:7"], "tokens": 745}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "timeseries = \"\"\"\nDate,Open,High,Low,Close,Volume,Adj Close\n2015-08-28,198.50,199.839996,197.919998,199.240005,143298900,199.240005\n2015-08-27,197.020004,199.419998,195.210007,199.160004,266244700,199.160004\n2015-08-26,192.080002,194.789993,188.369995,194.679993,328058100,194.679993\n2015-08-25,195.429993,195.449997,186.919998,187.229996,353966700,187.229996\n2015-08-24,197.630005,197.630005,182.399994,189.550003,478672400,189.550003\n2015-08-21,201.729996,203.940002,197.520004,197.630005,328271500,197.630005\n2015-08-20,206.509995,208.289993,203.899994,204.009995,185865600,204.009995\n2015-08-19,209.089996,210.009995,207.350006,208.279999,167316300,208.279999\n2015-08-18,210.259995,210.679993,209.699997,209.929993,70043800,209.929993\n\"\"\".strip()\n\ncsv_files = {\n \"2014-01-01.csv\": (\n b\"name,amount,id\\n\" b\"Alice,100,1\\n\" b\"Bob,200,2\\n\" b\"Charlie,300,3\\n\"\n ),\n \"2014-01-02.csv\": b\"name,amount,id\\n\",\n \"2014-01-03.csv\": (\n b\"name,amount,id\\n\" b\"Dennis,400,4\\n\" b\"Edith,500,5\\n\" b\"Frank,600,6\\n\"\n ),\n}\n\ntsv_files = {k: v.replace(b\",\", b\"\\t\") for (k, v) in csv_files.items()}\n\nfwf_files = {\n \"2014-01-01.csv\": (\n b\" name amount id\\n\"\n b\" Alice 100 1\\n\"\n b\" Bob 200 2\\n\"\n b\" Charlie 300 3\\n\"\n ),\n \"2014-01-02.csv\": b\" name amount id\\n\",\n \"2014-01-03.csv\": (\n b\" name amount id\\n\"\n b\" Dennis 400 4\\n\"\n b\" Edith 500 5\\n\"\n b\" Frank 600 6\\n\"\n ),\n}\n\nexpected = pd.concat([pd.read_csv(BytesIO(csv_files[k])) for k in sorted(csv_files)])\n\ncomment_header = b\"\"\"# some header lines\n# that may be present\n# in a data file\n# before any data\"\"\"\n\ncomment_footer = b\"\"\"# some footer lines\n# that may be present\n# at the end of the file\"\"\"\n\ncsv_units_row = b\"str, int, int\\n\"\ntsv_units_row = csv_units_row.replace(b\",\", b\"\\t\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_and_table_test_pandas_read_text_with_header.assert_df_id_sum_1_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_csv_and_table_test_pandas_read_text_with_header.assert_df_id_sum_1_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 179, "span_ids": ["test_pandas_read_text_dtype_coercion", "test_pandas_read_text_with_header", "test_pandas_read_text_kwargs", "test_pandas_read_text", "impl:15"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "csv_and_table = pytest.mark.parametrize(\n \"reader,files\",\n [\n (pd.read_csv, csv_files),\n (pd.read_table, tsv_files),\n (pd.read_fwf, fwf_files),\n ],\n)\n\n\n@csv_and_table\ndef test_pandas_read_text(reader, files):\n b = files[\"2014-01-01.csv\"]\n df = pandas_read_text(reader, b, b\"\", {})\n assert list(df.columns) == [\"name\", \"amount\", \"id\"]\n assert len(df) == 3\n assert df.id.sum() == 1 + 2 + 3\n\n\n@csv_and_table\ndef test_pandas_read_text_kwargs(reader, files):\n b = files[\"2014-01-01.csv\"]\n df = pandas_read_text(reader, b, b\"\", {\"usecols\": [\"name\", \"id\"]})\n assert list(df.columns) == [\"name\", \"id\"]\n\n\n@csv_and_table\ndef test_pandas_read_text_dtype_coercion(reader, files):\n b = files[\"2014-01-01.csv\"]\n df = pandas_read_text(reader, b, b\"\", {}, {\"amount\": \"float\"})\n assert df.amount.dtype == \"float\"\n\n\n@csv_and_table\ndef test_pandas_read_text_with_header(reader, files):\n b = files[\"2014-01-01.csv\"]\n header, b = b.split(b\"\\n\", 1)\n header = header + b\"\\n\"\n df = pandas_read_text(reader, b, header, {})\n assert list(df.columns) == [\"name\", \"amount\", \"id\"]\n assert len(df) == 3\n assert df.id.sum() == 1 + 2 + 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_getitem_optimization_after_filter_test_getitem_optimization_after_filter.with_filetext_timeseries_.assert_eq_expect_ddf_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_csv.py_test_getitem_optimization_after_filter_test_getitem_optimization_after_filter.with_filetext_timeseries_.assert_eq_expect_ddf_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_csv.py", "file_name": "test_csv.py", "file_type": "text/x-python", "category": "test", "start_line": 1714, "end_line": 1725, "span_ids": ["test_getitem_optimization_after_filter"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_optimization_after_filter():\n with filetext(timeseries) as fn:\n expect = pd.read_csv(fn)\n expect = expect[expect[\"High\"] > 205.0][[\"Low\"]]\n ddf = dd.read_csv(fn)\n ddf = ddf[ddf[\"High\"] > 205.0][[\"Low\"]]\n\n dsk = optimize_dataframe_getitem(ddf.dask, keys=[ddf._name])\n subgraph_rd = hlg_layer(dsk, \"read-csv\")\n assert isinstance(subgraph_rd, DataFrameIOLayer)\n assert set(subgraph_rd.columns) == {\"High\", \"Low\"}\n assert_eq(expect, ddf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_fmt_warns_test_to_fmt_warns.with_tmpdir_as_dn_.a_to_csv_fn_name_functio": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_hdf.py_test_to_fmt_warns_test_to_fmt_warns.with_tmpdir_as_dn_.a_to_csv_fn_name_functio", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "file_type": "text/x-python", "category": "test", "start_line": 564, "end_line": 618, "span_ids": ["test_to_fmt_warns"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_to_fmt_warns():\n pytest.importorskip(\"tables\")\n df16 = pd.DataFrame(\n {\n \"x\": [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n ],\n \"y\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],\n },\n index=[\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n 13.0,\n 14.0,\n 15.0,\n 16.0,\n ],\n )\n a = dd.from_pandas(df16, 16)\n\n # testing warning when breaking order\n with tmpfile(\"h5\") as fn:\n with pytest.warns(\n UserWarning, match=\"To preserve order between partitions name_function\"\n ):\n a.to_hdf(fn, \"/data*\", name_function=str)\n\n with tmpdir() as dn:\n fn = os.path.join(dn, \"data_*.csv\")\n a.to_csv(fn, name_function=str)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_after_filter_test_getitem_optimization_after_filter.assert_eq_df2_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_after_filter_test_getitem_optimization_after_filter.assert_eq_df2_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2507, "end_line": 2520, "span_ids": ["test_getitem_optimization_after_filter"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_optimization_after_filter(tmpdir, engine):\n df = pd.DataFrame({\"a\": [1, 2, 3] * 5, \"b\": range(15), \"c\": range(15)})\n dd.from_pandas(df, npartitions=3).to_parquet(tmpdir, engine=engine)\n ddf = dd.read_parquet(tmpdir, engine=engine)\n\n df2 = df[df[\"b\"] > 10][[\"a\"]]\n ddf2 = ddf[ddf[\"b\"] > 10][[\"a\"]]\n\n dsk = optimize_dataframe_getitem(ddf2.dask, keys=[ddf2._name])\n subgraph_rd = hlg_layer(dsk, \"read-parquet\")\n assert isinstance(subgraph_rd, DataFrameIOLayer)\n assert set(subgraph_rd.columns) == {\"a\", \"b\"}\n\n assert_eq(df2, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_after_filter_complex_test_getitem_optimization_after_filter_complex.assert_eq_df2_ddf2_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_getitem_optimization_after_filter_complex_test_getitem_optimization_after_filter_complex.assert_eq_df2_ddf2_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 2523, "end_line": 2541, "span_ids": ["test_getitem_optimization_after_filter_complex"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getitem_optimization_after_filter_complex(tmpdir, engine):\n df = pd.DataFrame({\"a\": [1, 2, 3] * 5, \"b\": range(15), \"c\": range(15)})\n dd.from_pandas(df, npartitions=3).to_parquet(tmpdir, engine=engine)\n ddf = dd.read_parquet(tmpdir, engine=engine)\n\n df2 = df[[\"b\"]]\n df2 = df2.assign(d=1)\n df2 = df[df2[\"d\"] == 1][[\"b\"]]\n\n ddf2 = ddf[[\"b\"]]\n ddf2 = ddf2.assign(d=1)\n ddf2 = ddf[ddf2[\"d\"] == 1][[\"b\"]]\n\n dsk = optimize_dataframe_getitem(ddf2.dask, keys=[ddf2._name])\n subgraph_rd = hlg_layer(dsk, \"read-parquet\")\n assert isinstance(subgraph_rd, DataFrameIOLayer)\n assert set(subgraph_rd.columns) == {\"b\"}\n\n assert_eq(df2, ddf2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_filename_with_partition_test_custom_filename_with_partition.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_custom_filename_with_partition_test_custom_filename_with_partition.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3950, "end_line": 3984, "span_ids": ["test_custom_filename_with_partition"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_custom_filename_with_partition(tmpdir, engine):\n fn = str(tmpdir)\n pdf = pd.DataFrame(\n {\n \"first_name\": [\"frank\", \"li\", \"marcela\", \"luis\"],\n \"country\": [\"canada\", \"china\", \"venezuela\", \"venezuela\"],\n },\n )\n df = dd.from_pandas(pdf, npartitions=4)\n df.to_parquet(\n fn,\n partition_on=[\"country\"],\n name_function=lambda x: f\"{x}-cool.parquet\",\n write_index=False,\n )\n\n for _, dirs, files in os.walk(fn):\n for dir in dirs:\n assert dir in (\n \"country=canada\",\n \"country=china\",\n \"country=venezuela\",\n )\n for file in files:\n assert file in (\n \"0-cool.parquet\",\n \"1-cool.parquet\",\n \"2-cool.parquet\",\n \"_common_metadata\",\n \"_metadata\",\n )\n actual = dd.read_parquet(fn, engine=engine, index=False)\n assert_eq(\n pdf, actual, check_index=False, check_dtype=False, check_categorical=False\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_partitioned_pyarrow_dataset_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/io/tests/test_parquet.py_test_roundtrip_partitioned_pyarrow_dataset_", "embedding": null, "metadata": {"file_path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "file_type": "text/x-python", "category": "test", "start_line": 3987, "end_line": 4031, "span_ids": ["test_roundtrip_partitioned_pyarrow_dataset"], "tokens": 463}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@PYARROW_MARK\n@pytest.mark.skipif(\n pa_version < parse_version(\"5.0\"),\n reason=\"pyarrow write_dataset was added in version 5.0\",\n)\ndef test_roundtrip_partitioned_pyarrow_dataset(tmpdir, engine):\n # See: https://github.com/dask/dask/issues/8650\n\n import pyarrow.parquet as pq\n from pyarrow.dataset import HivePartitioning, write_dataset\n\n # Sample data\n df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [\"a\", \"b\"]})\n\n # Write partitioned dataset with dask\n dask_path = tmpdir.mkdir(\"foo-dask\")\n ddf = dd.from_pandas(df, npartitions=2)\n ddf.to_parquet(dask_path, engine=engine, partition_on=[\"col1\"], write_index=False)\n\n # Write partitioned dataset with pyarrow\n pa_path = tmpdir.mkdir(\"foo-pyarrow\")\n table = pa.Table.from_pandas(df)\n write_dataset(\n data=table,\n base_dir=pa_path,\n basename_template=\"part.{i}.parquet\",\n format=\"parquet\",\n partitioning=HivePartitioning(pa.schema([(\"col1\", pa.int32())])),\n )\n\n # Define simple function to ensure results should\n # be comparable (same column and row order)\n def _prep(x):\n return x.sort_values(\"col2\")[[\"col1\", \"col2\"]]\n\n # Check that reading dask-written data is the same for pyarrow and dask\n df_read_dask = dd.read_parquet(dask_path, engine=engine)\n df_read_pa = pq.read_table(dask_path).to_pandas()\n assert_eq(_prep(df_read_dask), _prep(df_read_pa), check_index=False)\n\n # Check that reading pyarrow-written data is the same for pyarrow and dask\n df_read_dask = dd.read_parquet(pa_path, engine=engine)\n df_read_pa = pq.read_table(pa_path).to_pandas()\n assert_eq(_prep(df_read_dask), _prep(df_read_pa), check_index=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py_optimize_dataframe_getitem_optimize_dataframe_getitem.dependencies.dsk_dependencies_copy_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py_optimize_dataframe_getitem_optimize_dataframe_getitem.dependencies.dsk_dependencies_copy_", "embedding": null, "metadata": {"file_path": "dask/dataframe/optimize.py", "file_name": "optimize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 49, "end_line": 85, "span_ids": ["optimize_dataframe_getitem"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize_dataframe_getitem(dsk, keys):\n # This optimization looks for all `DataFrameIOLayer` instances,\n # and calls `project_columns` on any IO layers that precede\n # a (qualified) `getitem` operation.\n\n from ..layers import DataFrameIOLayer\n\n # Construct a list containg the names of all\n # DataFrameIOLayer layers in the graph\n io_layers = [k for k, v in dsk.layers.items() if isinstance(v, DataFrameIOLayer)]\n\n def _is_selection(layer):\n # Utility to check if layer is a getitem selection\n\n # Must be Blockwise\n if not isinstance(layer, Blockwise):\n return False\n\n # Callable must be `getitem`\n if layer.dsk[layer.output][0] != operator.getitem:\n return False\n\n return True\n\n def _kind(layer):\n # Utility to check type of getitem selection\n\n # Selection is second indice\n key, ind = layer.indices[1]\n if ind is None:\n if isinstance(key, (tuple, str, list, np.ndarray)) or np.isscalar(key):\n return \"column-selection\"\n return \"row-selection\"\n\n # Loop over each DataFrameIOLayer layer\n layers = dsk.layers.copy()\n dependencies = dsk.dependencies.copy()\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py_optimize_dataframe_getitem.for_io_layer_name_in_io_l_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/optimize.py_optimize_dataframe_getitem.for_io_layer_name_in_io_l_", "embedding": null, "metadata": {"file_path": "dask/dataframe/optimize.py", "file_name": "optimize.py", "file_type": "text/x-python", "category": "implementation", "start_line": 86, "end_line": 232, "span_ids": ["optimize_dataframe_getitem"], "tokens": 1312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def optimize_dataframe_getitem(dsk, keys):\n # ... other code\n for io_layer_name in io_layers:\n columns = set()\n\n # Bail on the optimization if the IO layer is in the\n # requested keys, because we cannot change the name\n # anymore. These keys are structured like\n # [('getitem-', 0), ...] so we check for the\n # first item of the tuple.\n # (See https://github.com/dask/dask/issues/5893)\n if any(\n layers[io_layer_name].name == x[0] for x in keys if isinstance(x, tuple)\n ):\n continue\n\n # Inspect dependents of the current IO layer\n deps = dsk.dependents[io_layer_name]\n\n # This optimization currently supports two variations\n # of `io_layer_name` dependents (`deps`):\n #\n # - CASE A\n # - 1 Dependent: A column-based getitem layer\n # - This corresponds to the simple case that a\n # column selection directly follows the IO layer\n #\n # - CASE B\n # - >1 Dependents: An arbitrary number of column-\n # based getitem layers, and a single row selection\n # - Usually corresponds to a filter operation that\n # may (or may not) precede a column selection\n # - This pattern is typical in Dask-SQL SELECT\n # queries that include a WHERE statement\n\n # Bail if dependent layer type(s) do not agree with\n # case A or case B\n\n if not all(_is_selection(dsk.layers[k]) for k in deps) or {\n _kind(dsk.layers[k]) for k in deps\n } not in (\n {\"column-selection\"},\n {\"column-selection\", \"row-selection\"},\n ):\n continue\n\n # Split the column- and row-selection layers.\n # For case A, we will simply use information\n # from col_select_layers to perform column\n # projection in the root IO layer. For case B,\n # these layers are not the final column selection\n # (they are only part of a filtering operation).\n row_select_layers = {k for k in deps if _kind(dsk.layers[k]) == \"row-selection\"}\n col_select_layers = deps - row_select_layers\n\n # Can only handle single row-selection dependent (case B)\n if len(row_select_layers) > 1:\n continue\n\n # Define utility to walk the dependency graph\n # and check that the graph terminates with\n # the `success` key\n def _walk_deps(dependents, key, success):\n if key == success:\n return True\n deps = dependents[key]\n if deps:\n return all(_walk_deps(dependents, dep, success) for dep in deps)\n else:\n return False\n\n # If this is not case A, we now need to check if\n # we are dealing with case B (and should bail on\n # the optimization if not).\n #\n # For case B, we should be able to start at\n # col_select_layer, and follow the graph to\n # row_select_layer. The subgraph between these\n # layers must depend ONLY on col_select_layer,\n # and be consumed ONLY by row_select_layer.\n # If these conditions are met, then a column-\n # selection layer directly following\n # row_select_layer can be used for projection.\n if row_select_layers:\n\n # Before walking the subgraph, check that there\n # is a column-selection layer directly following\n # row_select_layer. Otherwise, we can bail now.\n row_select_layer = row_select_layers.pop()\n if len(dsk.dependents[row_select_layer]) != 1:\n continue # Too many/few row_select_layer dependents\n _layer = dsk.layers[list(dsk.dependents[row_select_layer])[0]]\n if _is_selection(_layer) and _kind(_layer) == \"column-selection\":\n # Include this column selection in our list of columns\n selection = _layer.indices[1][0]\n columns |= set(\n selection if isinstance(selection, list) else [selection]\n )\n else:\n continue # row_select_layer dependent not column selection\n\n # Walk the subgraph to check that all dependencies flow\n # from col_select_layers to the same col_select_layer\n if not all(\n _walk_deps(dsk.dependents, col_select_layer, col_select_layer)\n for col_select_layer in col_select_layers\n ):\n continue\n\n # Update columns with selections in col_select_layers\n for col_select_layer in col_select_layers:\n selection = dsk.layers[col_select_layer].indices[1][0]\n columns |= set(selection if isinstance(selection, list) else [selection])\n\n # If we got here, column projection is supported.\n # Add deps to update_blocks\n update_blocks = {dep: dsk.layers[dep] for dep in deps}\n\n # Project columns and update blocks\n old = layers[io_layer_name]\n new = old.project_columns(columns)\n if new.name != old.name:\n columns = list(columns)\n assert len(update_blocks)\n for block_key, block in update_blocks.items():\n # (('read-parquet-old', (.,)), ( ... )) ->\n # (('read-parquet-new', (.,)), ( ... ))\n new_indices = ((new.name, block.indices[0][1]), block.indices[1])\n numblocks = {new.name: block.numblocks[old.name]}\n new_block = Blockwise(\n block.output,\n block.output_indices,\n block.dsk,\n new_indices,\n numblocks,\n block.concatenate,\n block.new_axes,\n )\n layers[block_key] = new_block\n dependencies[block_key] = {new.name}\n dependencies[new.name] = dependencies.pop(io_layer_name)\n\n layers[new.name] = new\n if new.name != old.name:\n del layers[old.name]\n\n new_hlg = HighLevelGraph(layers, dependencies)\n return new_hlg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_cat_none_test_str_accessor_split_noexpand.assert_call_ds_n_1_expa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_cat_none_test_str_accessor_split_noexpand.assert_call_ds_n_1_expa", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 217, "end_line": 237, "span_ids": ["test_str_accessor_cat_none", "test_str_accessor_split_noexpand"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_str_accessor_cat_none():\n s = pd.Series([\"a\", \"a\", \"b\", \"b\", \"c\", np.nan], name=\"foo\")\n ds = dd.from_pandas(s, npartitions=2)\n\n assert_eq(ds.str.cat(), s.str.cat())\n assert_eq(ds.str.cat(na_rep=\"-\"), s.str.cat(na_rep=\"-\"))\n assert_eq(ds.str.cat(sep=\"_\", na_rep=\"-\"), s.str.cat(sep=\"_\", na_rep=\"-\"))\n\n\n@pytest.mark.parametrize(\"method\", [\"split\", \"rsplit\"])\ndef test_str_accessor_split_noexpand(method):\n def call(obj, *args, **kwargs):\n return getattr(obj.str, method)(*args, **kwargs)\n\n s = pd.Series([\"a b c d\", \"aa bb cc dd\", \"aaa bbb ccc dddd\"], name=\"foo\")\n ds = dd.from_pandas(s, npartitions=2)\n\n for n in [1, 2, 3]:\n assert_eq(call(s, n=n, expand=False), call(ds, n=n, expand=False))\n\n assert call(ds, n=1, expand=False).name == \"foo\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_split_expand_test_str_accessor_split_expand.None_2.assert_eq_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_split_expand_test_str_accessor_split_expand.None_2.assert_eq_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 240, "end_line": 264, "span_ids": ["test_str_accessor_split_expand"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"split\", \"rsplit\"])\ndef test_str_accessor_split_expand(method):\n def call(obj, *args, **kwargs):\n return getattr(obj.str, method)(*args, **kwargs)\n\n s = pd.Series(\n [\"a b c d\", \"aa bb cc dd\", \"aaa bbb ccc dddd\"], index=[\"row1\", \"row2\", \"row3\"]\n )\n ds = dd.from_pandas(s, npartitions=2)\n\n for n in [1, 2, 3]:\n assert_eq(call(s, n=n, expand=True), call(ds, n=n, expand=True))\n\n with pytest.raises(NotImplementedError) as info:\n call(ds, expand=True)\n\n assert \"n=\" in str(info.value)\n\n s = pd.Series([\"a,bcd,zz,f\", \"aabb,ccdd,z,kk\", \"aaabbb,cccdddd,l,pp\"])\n ds = dd.from_pandas(s, npartitions=2)\n\n for n in [1, 2, 3]:\n assert_eq(\n call(s, pat=\",\", n=n, expand=True), call(ds, pat=\",\", n=n, expand=True)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_split_expand_more_columns_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_accessors.py_test_str_accessor_split_expand_more_columns_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_accessors.py", "file_name": "test_accessors.py", "file_type": "text/x-python", "category": "test", "start_line": 267, "end_line": 284, "span_ids": ["test_string_nullable_types", "test_str_accessor_split_expand_more_columns"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(reason=\"Need to pad columns\")\ndef test_str_accessor_split_expand_more_columns():\n s = pd.Series([\"a b c d\", \"aa\", \"aaa bbb ccc dddd\"])\n ds = dd.from_pandas(s, npartitions=2)\n\n assert_eq(s.str.split(n=3, expand=True), ds.str.split(n=3, expand=True))\n\n s = pd.Series([\"a b c\", \"aa bb cc\", \"aaa bbb ccc\"])\n ds = dd.from_pandas(s, npartitions=2)\n\n ds.str.split(n=10, expand=True).compute()\n\n\ndef test_string_nullable_types(df_ddf):\n df, ddf = df_ddf\n assert_eq(ddf.string_col.str.count(\"A\"), df.string_col.str.count(\"A\"))\n assert_eq(ddf.string_col.str.isalpha(), df.string_col.str.isalpha())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in__test_frame_series_arithmetic_methods.for_l_r_el_er_in_.assert_eq_l_rmod_r_fill_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_arithmetics_reduction.py_test_frame_series_arithmetic_methods.for_l_r_el_er_in__test_frame_series_arithmetic_methods.for_l_r_el_er_in_.assert_eq_l_rmod_r_fill_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_arithmetics_reduction.py", "file_name": "test_arithmetics_reduction.py", "file_type": "text/x-python", "category": "test", "start_line": 581, "end_line": 613, "span_ids": ["test_frame_series_arithmetic_methods"], "tokens": 655}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\n PANDAS_VERSION == \"1.0.2\",\n reason=\"https://github.com/pandas-dev/pandas/issues/32685\",\n)\ndef test_frame_series_arithmetic_methods():\n # ... other code\n\n for l, r, el, er in [\n (ddf1, ddf2, pdf1, pdf2),\n (ds1, ds2, ps1, ps2),\n (ddf1.repartition([\"a\", \"f\", \"j\"]), ddf2, pdf1, pdf2),\n (ds1.repartition([\"a\", \"b\", \"f\", \"j\"]), ds2, ps1, ps2),\n (ddf1, ddf2.repartition([\"a\", \"k\"]), pdf1, pdf2),\n (ds1, ds2.repartition([\"a\", \"b\", \"d\", \"h\", \"k\"]), ps1, ps2),\n (ddf1, 3, pdf1, 3),\n (ds1, 3, ps1, 3),\n (ddf1, s, pdf1, 4),\n (ds1, s, ps1, 4),\n ]:\n # l, r may be repartitioned, test whether repartition keeps original data\n assert_eq(l, el)\n assert_eq(r, er)\n\n assert_eq(l.add(r, fill_value=0), el.add(er, fill_value=0))\n assert_eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))\n assert_eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))\n assert_eq(l.div(r, fill_value=0), el.div(er, fill_value=0))\n assert_eq(l.divide(r, fill_value=0), el.divide(er, fill_value=0))\n assert_eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))\n assert_eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))\n assert_eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))\n assert_eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))\n\n assert_eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))\n assert_eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))\n assert_eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))\n assert_eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))\n assert_eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))\n assert_eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))\n assert_eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_test_corr.pytest_raises_TypeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_test_corr.pytest_raises_TypeError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 2951, "end_line": 3158, "span_ids": ["test_apply", "test_add_suffix", "test_apply_warns", "test_corr", "test_abs", "test_cov", "test_round", "test_applymap", "test_apply_warns_with_invalid_meta", "test_add_prefix"], "tokens": 2106}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n func = lambda row: row[\"x\"] + row[\"y\"]\n assert_eq(\n ddf.x.apply(lambda x: x + 1, meta=(\"x\", int)), df.x.apply(lambda x: x + 1)\n )\n\n # specify meta\n assert_eq(\n ddf.apply(lambda xy: xy[0] + xy[1], axis=1, meta=(None, int)),\n df.apply(lambda xy: xy[0] + xy[1], axis=1),\n )\n assert_eq(\n ddf.apply(lambda xy: xy[0] + xy[1], axis=\"columns\", meta=(None, int)),\n df.apply(lambda xy: xy[0] + xy[1], axis=\"columns\"),\n )\n\n # inference\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n assert_eq(\n ddf.apply(lambda xy: xy[0] + xy[1], axis=1),\n df.apply(lambda xy: xy[0] + xy[1], axis=1),\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n assert_eq(ddf.apply(lambda xy: xy, axis=1), df.apply(lambda xy: xy, axis=1))\n\n # specify meta\n func = lambda x: pd.Series([x, x])\n assert_eq(ddf.x.apply(func, meta=[(0, int), (1, int)]), df.x.apply(func))\n # inference\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n assert_eq(ddf.x.apply(func), df.x.apply(func))\n\n # axis=0\n with pytest.raises(NotImplementedError):\n ddf.apply(lambda xy: xy, axis=0)\n\n with pytest.raises(NotImplementedError):\n ddf.apply(lambda xy: xy, axis=\"index\")\n\n\ndef test_apply_warns():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n func = lambda row: row[\"x\"] + row[\"y\"]\n\n with pytest.warns(UserWarning) as w:\n ddf.apply(func, axis=1)\n assert len(w) == 1\n\n with warnings.catch_warnings(record=True) as record:\n ddf.apply(func, axis=1, meta=(None, int))\n assert not record\n\n with pytest.warns(UserWarning) as w:\n ddf.apply(lambda x: x, axis=1)\n assert len(w) == 1\n assert \"'x'\" in str(w[0].message)\n assert \"int64\" in str(w[0].message)\n\n\ndef test_apply_warns_with_invalid_meta():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n func = lambda row: row[\"x\"] + row[\"y\"]\n\n with pytest.warns(FutureWarning, match=\"Meta is not valid\"):\n ddf.apply(func, axis=1, meta=int)\n\n\ndef test_applymap():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.applymap(lambda x: x + 1), df.applymap(lambda x: x + 1))\n\n assert_eq(ddf.applymap(lambda x: (x, x)), df.applymap(lambda x: (x, x)))\n\n\ndef test_add_prefix():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [4, 5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.add_prefix(\"abc\"), df.add_prefix(\"abc\"))\n assert_eq(ddf.x.add_prefix(\"abc\"), df.x.add_prefix(\"abc\"))\n\n\ndef test_add_suffix():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5], \"y\": [4, 5, 6, 7, 8]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.add_suffix(\"abc\"), df.add_suffix(\"abc\"))\n assert_eq(ddf.x.add_suffix(\"abc\"), df.x.add_suffix(\"abc\"))\n\n\ndef test_abs():\n df = pd.DataFrame(\n {\n \"A\": [1, -2, 3, -4, 5],\n \"B\": [-6.0, -7, -8, -9, 10],\n \"C\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n }\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.A.abs(), df.A.abs())\n assert_eq(ddf[[\"A\", \"B\"]].abs(), df[[\"A\", \"B\"]].abs())\n pytest.raises(ValueError, lambda: ddf.C.abs())\n pytest.raises(TypeError, lambda: ddf.abs())\n\n\ndef test_round():\n df = pd.DataFrame({\"col1\": [1.123, 2.123, 3.123], \"col2\": [1.234, 2.234, 3.234]})\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.round(), df.round())\n assert_eq(ddf.round(2), df.round(2))\n\n\ndef test_cov():\n # DataFrame\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=6)\n\n res = ddf.cov()\n res2 = ddf.cov(split_every=2)\n res3 = ddf.cov(10)\n res4 = ddf.cov(10, split_every=2)\n sol = df.cov()\n sol2 = df.cov(10)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert_eq(res3, sol2)\n assert_eq(res4, sol2)\n assert res._name == ddf.cov()._name\n assert res._name != res2._name\n assert res3._name != res4._name\n assert res._name != res3._name\n\n # Series\n a = df.A\n b = df.B\n da = dd.from_pandas(a, npartitions=6)\n db = dd.from_pandas(b, npartitions=7)\n\n res = da.cov(db)\n res2 = da.cov(db, split_every=2)\n res3 = da.cov(db, 10)\n res4 = da.cov(db, 10, split_every=2)\n sol = a.cov(b)\n sol2 = a.cov(b, 10)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert_eq(res3, sol2)\n assert_eq(res4, sol2)\n assert res._name == da.cov(db)._name\n assert res._name != res2._name\n assert res3._name != res4._name\n assert res._name != res3._name\n\n\ndef test_corr():\n # DataFrame\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=6)\n\n res = ddf.corr()\n res2 = ddf.corr(split_every=2)\n res3 = ddf.corr(min_periods=10)\n res4 = ddf.corr(min_periods=10, split_every=2)\n sol = df.corr()\n sol2 = df.corr(min_periods=10)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert_eq(res3, sol2)\n assert_eq(res4, sol2)\n assert res._name == ddf.corr()._name\n assert res._name != res2._name\n assert res3._name != res4._name\n assert res._name != res3._name\n\n pytest.raises(NotImplementedError, lambda: ddf.corr(method=\"spearman\"))\n\n # Series\n a = df.A\n b = df.B\n da = dd.from_pandas(a, npartitions=6)\n db = dd.from_pandas(b, npartitions=7)\n\n res = da.corr(db)\n res2 = da.corr(db, split_every=2)\n res3 = da.corr(db, min_periods=10)\n res4 = da.corr(db, min_periods=10, split_every=2)\n sol = da.corr(db)\n sol2 = da.corr(db, min_periods=10)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert_eq(res3, sol2)\n assert_eq(res4, sol2)\n assert res._name == da.corr(db)._name\n assert res._name != res2._name\n assert res3._name != res4._name\n assert res._name != res3._name\n\n pytest.raises(NotImplementedError, lambda: da.corr(db, method=\"spearman\"))\n pytest.raises(TypeError, lambda: da.corr(ddf))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_corr_same_name_test_autocorr.pytest_raises_TypeError_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_corr_same_name_test_autocorr.pytest_raises_TypeError_", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3161, "end_line": 3233, "span_ids": ["test_cov_corr_stable", "test_autocorr", "test_cov_corr_meta", "test_cov_corr_mixed", "test_corr_same_name"], "tokens": 861}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_corr_same_name():\n # Series with same names (see https://github.com/dask/dask/issues/4906)\n\n df = _compat.makeMissingDataframe()\n ddf = dd.from_pandas(df, npartitions=6)\n\n result = ddf.A.corr(ddf.B.rename(\"A\"))\n expected = ddf.A.corr(ddf.B)\n assert_eq(result, expected)\n\n # test with split_every\n result2 = ddf.A.corr(ddf.B.rename(\"A\"), split_every=2)\n assert_eq(result2, expected)\n\n\ndef test_cov_corr_meta():\n df = pd.DataFrame(\n {\n \"a\": np.array([1, 2, 3]),\n \"b\": np.array([1.0, 2.0, 3.0], dtype=\"f4\"),\n \"c\": np.array([1.0, 2.0, 3.0]),\n },\n index=pd.Index([1, 2, 3], name=\"myindex\"),\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(ddf.corr(), df.corr())\n assert_eq(ddf.cov(), df.cov())\n assert ddf.a.cov(ddf.b)._meta.dtype == \"f8\"\n assert ddf.a.corr(ddf.b)._meta.dtype == \"f8\"\n\n\n@pytest.mark.slow\ndef test_cov_corr_stable():\n df = pd.DataFrame(np.random.uniform(-1, 1, (20000000, 2)), columns=[\"a\", \"b\"])\n ddf = dd.from_pandas(df, npartitions=50)\n assert_eq(ddf.cov(split_every=8), df.cov())\n assert_eq(ddf.corr(split_every=8), df.corr())\n\n\ndef test_cov_corr_mixed():\n size = 1000\n d = {\n \"dates\": pd.date_range(\"2015-01-01\", periods=size, freq=\"1T\"),\n \"unique_id\": np.arange(0, size),\n \"ints\": np.random.randint(0, size, size=size),\n \"floats\": np.random.randn(size),\n \"bools\": np.random.choice([0, 1], size=size),\n \"int_nans\": np.random.choice([0, 1, np.nan], size=size),\n \"float_nans\": np.random.choice([0.0, 1.0, np.nan], size=size),\n \"constant\": 1,\n \"int_categorical\": np.random.choice([10, 20, 30, 40, 50], size=size),\n \"categorical_binary\": np.random.choice([\"a\", \"b\"], size=size),\n \"categorical_nans\": np.random.choice([\"a\", \"b\", \"c\"], size=size),\n }\n df = pd.DataFrame(d)\n df[\"hardbools\"] = df[\"bools\"] == 1\n df[\"categorical_nans\"] = df[\"categorical_nans\"].replace(\"c\", np.nan)\n df[\"categorical_binary\"] = df[\"categorical_binary\"].astype(\"category\")\n df[\"unique_id\"] = df[\"unique_id\"].astype(str)\n\n ddf = dd.from_pandas(df, npartitions=20)\n assert_eq(ddf.corr(split_every=4), df.corr(), check_divisions=False)\n assert_eq(ddf.cov(split_every=4), df.cov(), check_divisions=False)\n\n\ndef test_autocorr():\n x = pd.Series(np.random.random(100))\n dx = dd.from_pandas(x, npartitions=10)\n assert_eq(dx.autocorr(2), x.autocorr(2))\n assert_eq(dx.autocorr(0), x.autocorr(0))\n assert_eq(dx.autocorr(-2), x.autocorr(-2))\n assert_eq(dx.autocorr(2, split_every=3), x.autocorr(2))\n pytest.raises(TypeError, lambda: dx.autocorr(1.5))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_infer_columns_test_astype.assert_eq_a_x_astype_floa": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_apply_infer_columns_test_astype.assert_eq_a_x_astype_floa", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 3236, "end_line": 3455, "span_ids": ["test_reset_index", "test_series_iter", "test_dataframe_itertuples", "test_contains_series_raises_deprecated_warning_preserves_behavior", "test_apply_infer_columns", "test_astype", "test_index_time_properties", "test_dataframe_items", "test_series_iteritems", "test_dataframe_itertuples_with_index_false", "test_dataframe_itertuples_with_name_none", "test_nlargest_nsmallest", "test_dataframe_iterrows", "test_dataframe_compute_forward_kwargs"], "tokens": 2055}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apply_infer_columns():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n def return_df(x):\n # will create new DataFrame which columns is ['sum', 'mean']\n return pd.Series([x.sum(), x.mean()], index=[\"sum\", \"mean\"])\n\n # DataFrame to completely different DataFrame\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n result = ddf.apply(return_df, axis=1)\n assert isinstance(result, dd.DataFrame)\n tm.assert_index_equal(result.columns, pd.Index([\"sum\", \"mean\"]))\n assert_eq(result, df.apply(return_df, axis=1))\n\n # DataFrame to Series\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n result = ddf.apply(lambda x: 1, axis=1)\n assert isinstance(result, dd.Series)\n assert result.name is None\n assert_eq(result, df.apply(lambda x: 1, axis=1))\n\n def return_df2(x):\n return pd.Series([x * 2, x * 3], index=[\"x2\", \"x3\"])\n\n # Series to completely different DataFrame\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n result = ddf.x.apply(return_df2)\n assert isinstance(result, dd.DataFrame)\n tm.assert_index_equal(result.columns, pd.Index([\"x2\", \"x3\"]))\n assert_eq(result, df.x.apply(return_df2))\n\n # Series to Series\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n result = ddf.x.apply(lambda x: 1)\n assert isinstance(result, dd.Series)\n assert result.name == \"x\"\n assert_eq(result, df.x.apply(lambda x: 1))\n\n\ndef test_index_time_properties():\n i = _compat.makeTimeSeries()\n a = dd.from_pandas(i, npartitions=3)\n\n assert \"day\" in dir(a.index)\n # returns a numpy array in pandas, but a Index in dask\n assert_eq(a.index.day, pd.Index(i.index.day))\n assert_eq(a.index.month, pd.Index(i.index.month))\n\n\ndef test_nlargest_nsmallest():\n from string import ascii_lowercase\n\n df = pd.DataFrame(\n {\n \"a\": np.random.permutation(20),\n \"b\": list(ascii_lowercase[:20]),\n \"c\": np.random.permutation(20).astype(\"float64\"),\n }\n )\n ddf = dd.from_pandas(df, npartitions=3)\n\n for m in [\"nlargest\", \"nsmallest\"]:\n f = lambda df, *args, **kwargs: getattr(df, m)(*args, **kwargs)\n\n res = f(ddf, 5, \"a\")\n res2 = f(ddf, 5, \"a\", split_every=2)\n sol = f(df, 5, \"a\")\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n res = f(ddf, 5, [\"a\", \"c\"])\n res2 = f(ddf, 5, [\"a\", \"c\"], split_every=2)\n sol = f(df, 5, [\"a\", \"c\"])\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n res = f(ddf.a, 5)\n res2 = f(ddf.a, 5, split_every=2)\n sol = f(df.a, 5)\n assert_eq(res, sol)\n assert_eq(res2, sol)\n assert res._name != res2._name\n\n\ndef test_reset_index():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n sol = df.reset_index()\n res = ddf.reset_index()\n assert all(d is None for d in res.divisions)\n assert_eq(res, sol, check_index=False)\n\n sol = df.reset_index(drop=True)\n res = ddf.reset_index(drop=True)\n assert all(d is None for d in res.divisions)\n assert_eq(res, sol, check_index=False)\n\n sol = df.x.reset_index()\n res = ddf.x.reset_index()\n assert all(d is None for d in res.divisions)\n assert_eq(res, sol, check_index=False)\n\n sol = df.x.reset_index(drop=True)\n res = ddf.x.reset_index(drop=True)\n assert all(d is None for d in res.divisions)\n assert_eq(res, sol, check_index=False)\n\n\ndef test_dataframe_compute_forward_kwargs():\n x = dd.from_pandas(pd.DataFrame({\"a\": range(10)}), npartitions=2).a.sum()\n x.compute(bogus_keyword=10)\n\n\ndef test_contains_series_raises_deprecated_warning_preserves_behavior():\n s = pd.Series([\"a\", \"b\", \"c\", \"d\"])\n ds = dd.from_pandas(s, npartitions=2)\n\n with pytest.warns(\n FutureWarning,\n match=\"Using the ``in`` operator to test for membership in Series is deprecated\",\n ):\n output = \"a\" in ds\n assert output\n\n with pytest.warns(\n FutureWarning,\n match=\"Using the ``in`` operator to test for membership in Series is deprecated\",\n ):\n output = 0 in ds\n assert not output\n\n\ndef test_series_iteritems():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n # `iteritems` was deprecated starting in `pandas=1.5.0`\n with _check_warning(\n PANDAS_GT_150, FutureWarning, message=\"iteritems is deprecated\"\n ):\n pd_items = df[\"x\"].iteritems()\n with _check_warning(\n PANDAS_GT_150, FutureWarning, message=\"iteritems is deprecated\"\n ):\n dd_items = ddf[\"x\"].iteritems()\n for (a, b) in zip(pd_items, dd_items):\n assert a == b\n\n\ndef test_series_iter():\n s = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n ds = dd.from_pandas(s, npartitions=2)\n for (a, b) in zip(s[\"x\"], ds[\"x\"]):\n assert a == b\n\n\ndef test_dataframe_iterrows():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for (a, b) in zip(df.iterrows(), ddf.iterrows()):\n tm.assert_series_equal(a[1], b[1])\n\n\ndef test_dataframe_itertuples():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for (a, b) in zip(df.itertuples(), ddf.itertuples()):\n assert a == b\n\n\n@pytest.mark.parametrize(\n \"columns\",\n [\n (\"x\", \"y\"),\n (\"x\", \"x\"),\n pd.MultiIndex.from_tuples([(\"x\", 1), (\"x\", 2)], names=(\"letter\", \"number\")),\n ],\n)\ndef test_dataframe_items(columns):\n df = pd.DataFrame([[1, 10], [2, 20], [3, 30], [4, 40]], columns=columns)\n ddf = dd.from_pandas(df, npartitions=2)\n for (a, b) in zip(df.items(), ddf.items()):\n assert a[0] == b[0] # column name\n assert_eq(a[1], b[1].compute()) # column values\n\n\ndef test_dataframe_itertuples_with_index_false():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for (a, b) in zip(df.itertuples(index=False), ddf.itertuples(index=False)):\n assert a == b\n\n\ndef test_dataframe_itertuples_with_name_none():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4], \"y\": [10, 20, 30, 40]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n for (a, b) in zip(df.itertuples(name=None), ddf.itertuples(name=None)):\n assert a == b\n assert type(a) is type(b)\n\n\ndef test_astype():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, None], \"y\": [10, 20, 30, 40]}, index=[10, 20, 30, 40]\n )\n a = dd.from_pandas(df, 2)\n\n assert_eq(a.astype(float), df.astype(float))\n assert_eq(a.x.astype(float), df.x.astype(float))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_boundary_slice_same_test_meta_nonempty_uses_meta_value_if_provided.with_warnings_catch_warni.assert_eq_expected_actua": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_boundary_slice_same_test_meta_nonempty_uses_meta_value_if_provided.with_warnings_catch_warni.assert_eq_expected_actua", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4339, "end_line": 4537, "span_ids": ["test_better_errors_object_reductions", "test_mixed_dask_array_operations", "test_meta_nonempty_uses_meta_value_if_provided", "test_mixed_dask_array_operations_errors", "test_mixed_dask_array_multi_dimensional", "test_map_partition_sparse", "test_bool", "test_meta_raises", "test_sample_empty_partitions", "test_cumulative_multiple_columns", "test_map_partition_array", "test_boundary_slice_same", "test_coerce"], "tokens": 2011}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"index, left, right\",\n [\n (range(10), 0, 9),\n (range(10), -1, None),\n (range(10), None, 10),\n ([-1, 0, 2, 1], None, None),\n ([-1, 0, 2, 1], -1, None),\n ([-1, 0, 2, 1], None, 2),\n ([-1, 0, 2, 1], -2, 3),\n (pd.date_range(\"2017\", periods=10), None, None),\n (pd.date_range(\"2017\", periods=10), pd.Timestamp(\"2017\"), None),\n (pd.date_range(\"2017\", periods=10), None, pd.Timestamp(\"2017-01-10\")),\n (pd.date_range(\"2017\", periods=10), pd.Timestamp(\"2016\"), None),\n (pd.date_range(\"2017\", periods=10), None, pd.Timestamp(\"2018\")),\n ],\n)\ndef test_boundary_slice_same(index, left, right):\n df = pd.DataFrame({\"A\": range(len(index))}, index=index)\n result = methods.boundary_slice(df, left, right)\n tm.assert_frame_equal(result, df)\n\n\ndef test_better_errors_object_reductions():\n # GH2452\n s = pd.Series([\"a\", \"b\", \"c\", \"d\"])\n ds = dd.from_pandas(s, npartitions=2)\n with pytest.raises(ValueError) as err:\n ds.mean()\n assert str(err.value) == \"`mean` not supported with object series\"\n\n\ndef test_sample_empty_partitions():\n @dask.delayed\n def make_df(n):\n return pd.DataFrame(np.zeros((n, 4)), columns=list(\"abcd\"))\n\n ddf = dd.from_delayed([make_df(0), make_df(100), make_df(0)])\n ddf2 = ddf.sample(frac=0.2)\n # smoke test sample on empty partitions\n res = ddf2.compute()\n assert res.dtypes.equals(ddf2.dtypes)\n\n\ndef test_coerce():\n df = pd.DataFrame(np.arange(100).reshape((10, 10)))\n ddf = dd.from_pandas(df, npartitions=2)\n funcs = (int, float, complex)\n for d, t in product(funcs, (ddf, ddf[0])):\n pytest.raises(TypeError, lambda: t(d))\n\n\ndef test_bool():\n df = pd.DataFrame(np.arange(100).reshape((10, 10)))\n ddf = dd.from_pandas(df, npartitions=2)\n conditions = [ddf, ddf[0], ddf == ddf, ddf[0] == ddf[0]]\n for cond in conditions:\n with pytest.raises(ValueError):\n bool(cond)\n\n\ndef test_cumulative_multiple_columns():\n # GH 3037\n df = pd.DataFrame(np.random.randn(100, 5), columns=list(\"abcde\"))\n ddf = dd.from_pandas(df, 5)\n\n for d in [ddf, df]:\n for c in df.columns:\n d[c + \"cs\"] = d[c].cumsum()\n d[c + \"cmin\"] = d[c].cummin()\n d[c + \"cmax\"] = d[c].cummax()\n d[c + \"cp\"] = d[c].cumprod()\n\n assert_eq(ddf, df)\n\n\n@pytest.mark.parametrize(\"func\", [np.asarray, M.to_records])\ndef test_map_partition_array(func):\n from dask.array.utils import assert_eq\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5], \"y\": [6.0, 7.0, 8.0, 9.0, 10.0]},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n for pre in [lambda a: a, lambda a: a.x, lambda a: a.y, lambda a: a.index]:\n\n try:\n expected = func(pre(df))\n except Exception:\n continue\n x = pre(ddf).map_partitions(func)\n assert_eq(x, expected, check_type=False) # TODO: make check_type pass\n\n assert isinstance(x, da.Array)\n assert x.chunks[0] == (np.nan, np.nan)\n\n\ndef test_map_partition_sparse():\n sparse = pytest.importorskip(\"sparse\")\n # Avoid searchsorted failure.\n pytest.importorskip(\"numba\", minversion=\"0.40.0\")\n\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5], \"y\": [6.0, 7.0, 8.0, 9.0, 10.0]},\n index=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n def f(d):\n return sparse.COO(np.array(d))\n\n for pre in [lambda a: a, lambda a: a.x]:\n expected = f(pre(df))\n result = pre(ddf).map_partitions(f)\n assert isinstance(result, da.Array)\n computed = result.compute()\n assert (computed.data == expected.data).all()\n assert (computed.coords == expected.coords).all()\n\n\ndef test_mixed_dask_array_operations():\n df = pd.DataFrame({\"x\": [1, 2, 3]}, index=[4, 5, 6])\n ddf = dd.from_pandas(df, npartitions=2)\n\n assert_eq(df.x + df.x.values, ddf.x + ddf.x.values)\n assert_eq(df.x.values + df.x, ddf.x.values + ddf.x)\n\n assert_eq(df.x + df.index.values, ddf.x + ddf.index.values)\n assert_eq(df.index.values + df.x, ddf.index.values + ddf.x)\n\n assert_eq(df.x + df.x.values.sum(), ddf.x + ddf.x.values.sum())\n\n\ndef test_mixed_dask_array_operations_errors():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4, 5]}, index=[4, 5, 6, 7, 8])\n ddf = dd.from_pandas(df, npartitions=2)\n\n x = da.arange(5, chunks=((1, 4),))\n x._chunks = ((np.nan, np.nan),)\n\n with pytest.raises(ValueError):\n (ddf.x + x).compute()\n\n x = da.arange(5, chunks=((2, 2, 1),))\n with pytest.raises(ValueError) as info:\n ddf.x + x\n\n assert \"add\" in str(info.value)\n\n\ndef test_mixed_dask_array_multi_dimensional():\n df = pd.DataFrame(\n {\"x\": [1, 2, 3, 4, 5], \"y\": [5.0, 6.0, 7.0, 8.0, 9.0]}, columns=[\"x\", \"y\"]\n )\n ddf = dd.from_pandas(df, npartitions=2)\n\n x = (df.values + 1).astype(float)\n dx = (ddf.values + 1).astype(float)\n\n assert_eq(ddf + dx + 1, df + x + 1)\n assert_eq(ddf + dx.rechunk((None, 1)) + 1, df + x + 1)\n assert_eq(ddf[[\"y\", \"x\"]] + dx + 1, df[[\"y\", \"x\"]] + x + 1)\n\n\ndef test_meta_raises():\n # Raise when we use a user defined function\n s = pd.Series([\"abcd\", \"abcd\"])\n ds = dd.from_pandas(s, npartitions=2)\n try:\n ds.map(lambda x: x[3])\n except ValueError as e:\n assert \"meta=\" in str(e)\n\n # But not otherwise\n df = pd.DataFrame({\"a\": [\"x\", \"y\", \"y\"], \"b\": [\"x\", \"y\", \"z\"], \"c\": [1, 2, 3]})\n ddf = dd.from_pandas(df, npartitions=1)\n\n with pytest.raises(Exception) as info:\n ddf.a + ddf.c\n\n assert \"meta=\" not in str(info.value)\n\n\ndef test_meta_nonempty_uses_meta_value_if_provided():\n # https://github.com/dask/dask/issues/6958\n base = pd.Series([1, 2, 3], dtype=\"datetime64[ns]\")\n offsets = pd.Series([pd.offsets.DateOffset(years=o) for o in range(3)])\n dask_base = dd.from_pandas(base, npartitions=1)\n dask_offsets = dd.from_pandas(offsets, npartitions=1)\n dask_offsets._meta = offsets.head()\n\n with warnings.catch_warnings(): # not vectorized performance warning\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"ignore\", UserWarning)\n expected = base + offsets\n actual = dask_base + dask_offsets\n assert_eq(expected, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dask_dataframe_holds_scipy_sparse_containers_test_partitions_indexer.None_2": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_dataframe.py_test_dask_dataframe_holds_scipy_sparse_containers_test_partitions_indexer.None_2", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "file_type": "text/x-python", "category": "test", "start_line": 4540, "end_line": 4579, "span_ids": ["test_map_partitions_delays_large_inputs", "test_partitions_indexer", "test_dask_dataframe_holds_scipy_sparse_containers"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dask_dataframe_holds_scipy_sparse_containers():\n sparse = pytest.importorskip(\"scipy.sparse\")\n da = pytest.importorskip(\"dask.array\")\n x = da.random.random((1000, 10), chunks=(100, 10))\n x[x < 0.9] = 0\n df = dd.from_dask_array(x)\n y = df.map_partitions(sparse.csr_matrix)\n\n assert isinstance(y, da.Array)\n\n vs = y.to_delayed().flatten().tolist()\n values = dask.compute(*vs, scheduler=\"single-threaded\")\n assert all(isinstance(v, sparse.csr_matrix) for v in values)\n\n\ndef test_map_partitions_delays_large_inputs():\n df = pd.DataFrame({\"x\": [1, 2, 3, 4]})\n ddf = dd.from_pandas(df, npartitions=2)\n\n big = np.ones(1000000)\n\n b = ddf.map_partitions(lambda x, y: x, y=big)\n assert any(big is v for v in b.dask.values())\n\n a = ddf.map_partitions(lambda x, y: x, big)\n assert any(big is v for v in a.dask.values())\n\n\ndef test_partitions_indexer():\n df = pd.DataFrame({\"x\": range(10)})\n ddf = dd.from_pandas(df, npartitions=5)\n\n assert_eq(ddf.partitions[0], ddf.get_partition(0))\n assert_eq(ddf.partitions[3], ddf.get_partition(3))\n assert_eq(ddf.partitions[-1], ddf.get_partition(4))\n\n assert ddf.partitions[:3].npartitions == 3\n assert ddf.x.partitions[:3].npartitions == 3\n\n assert ddf.x.partitions[::2].compute().tolist() == [0, 1, 4, 5, 8, 9]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_on_index_test_groupby_on_index.with_dask_config_set_sche.with_warnings_catch_warni.None_5": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_groupby.py_test_groupby_on_index_test_groupby_on_index.with_dask_config_set_sche.with_warnings_catch_warni.None_5", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "file_type": "text/x-python", "category": "test", "start_line": 271, "end_line": 314, "span_ids": ["test_groupby_on_index"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"scheduler\", [\"sync\", \"threads\"])\ndef test_groupby_on_index(scheduler):\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9], \"b\": [4, 5, 6, 3, 2, 1, 0, 0, 0]},\n index=[0, 1, 3, 5, 6, 8, 9, 9, 9],\n )\n ddf = dd.from_pandas(pdf, npartitions=3)\n\n ddf2 = ddf.set_index(\"a\")\n pdf2 = pdf.set_index(\"a\")\n assert_eq(ddf.groupby(\"a\").b.mean(), ddf2.groupby(ddf2.index).b.mean())\n\n def func(df):\n return df.assign(b=df.b - df.b.mean())\n\n def func2(df):\n return df[[\"b\"]] - df[[\"b\"]].mean()\n\n def func3(df):\n return df.mean()\n\n with dask.config.set(scheduler=scheduler):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n assert_eq(ddf.groupby(\"a\").apply(func), pdf.groupby(\"a\").apply(func))\n\n assert_eq(\n ddf.groupby(\"a\").apply(func).set_index(\"a\"),\n pdf.groupby(\"a\").apply(func).set_index(\"a\"),\n )\n\n assert_eq(\n pdf2.groupby(pdf2.index).apply(func2),\n ddf2.groupby(ddf2.index).apply(func2),\n )\n\n assert_eq(\n ddf2.b.groupby(\"a\").apply(func3), pdf2.b.groupby(\"a\").apply(func3)\n )\n\n assert_eq(\n ddf2.b.groupby(ddf2.index).apply(func3),\n pdf2.b.groupby(pdf2.index).apply(func3),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_test_concat_unknown_divisions.assert_not_record": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_unknown_divisions_test_concat_unknown_divisions.assert_not_record", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1586, "end_line": 1603, "span_ids": ["test_concat_unknown_divisions"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_unknown_divisions():\n a = pd.Series([1, 2, 3, 4])\n b = pd.Series([4, 3, 2, 1])\n aa = dd.from_pandas(a, npartitions=2, sort=False)\n bb = dd.from_pandas(b, npartitions=2, sort=False)\n\n assert not aa.known_divisions\n\n with pytest.warns(UserWarning):\n assert_eq(pd.concat([a, b], axis=1), dd.concat([aa, bb], axis=1))\n\n cc = dd.from_pandas(b, npartitions=1, sort=False)\n with pytest.raises(ValueError):\n dd.concat([aa, cc], axis=1)\n\n with warnings.catch_warnings(record=True) as record:\n dd.concat([aa, bb], axis=1, ignore_unknown_divisions=True)\n assert not record", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat5_test_concat5.None_1.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat5_test_concat5.None_1.None_3", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1793, "end_line": 1883, "span_ids": ["test_concat5"], "tokens": 768}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat5():\n pdf1 = pd.DataFrame(\n np.random.randn(7, 5), columns=list(\"ABCDE\"), index=list(\"abcdefg\")\n )\n pdf2 = pd.DataFrame(\n np.random.randn(7, 6), columns=list(\"FGHIJK\"), index=list(\"abcdefg\")\n )\n pdf3 = pd.DataFrame(\n np.random.randn(7, 6), columns=list(\"FGHIJK\"), index=list(\"cdefghi\")\n )\n pdf4 = pd.DataFrame(\n np.random.randn(7, 5), columns=list(\"FGHAB\"), index=list(\"cdefghi\")\n )\n pdf5 = pd.DataFrame(\n np.random.randn(7, 5), columns=list(\"FGHAB\"), index=list(\"fklmnop\")\n )\n\n ddf1 = dd.from_pandas(pdf1, 2)\n ddf2 = dd.from_pandas(pdf2, 3)\n ddf3 = dd.from_pandas(pdf3, 2)\n ddf4 = dd.from_pandas(pdf4, 2)\n ddf5 = dd.from_pandas(pdf5, 3)\n\n cases = [\n [ddf1, ddf2],\n [ddf1, ddf3],\n [ddf1, ddf4],\n [ddf1, ddf5],\n [ddf3, ddf4],\n [ddf3, ddf5],\n [ddf5, ddf1, ddf4],\n [ddf5, ddf3],\n [ddf1.A, ddf4.A],\n [ddf2.F, ddf3.F],\n [ddf4.A, ddf5.A],\n [ddf1.A, ddf4.F],\n [ddf2.F, ddf3.H],\n [ddf4.A, ddf5.B],\n [ddf1, ddf4.A],\n [ddf3.F, ddf2],\n [ddf5, ddf1.A, ddf2],\n ]\n\n for case in cases:\n pdcase = [c.compute() for c in case]\n\n assert_eq(\n dd.concat(case, interleave_partitions=True),\n pd.concat(pdcase, sort=False),\n )\n\n assert_eq(\n dd.concat(case, join=\"inner\", interleave_partitions=True),\n pd.concat(pdcase, join=\"inner\"),\n )\n\n assert_eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))\n\n assert_eq(\n dd.concat(case, axis=1, join=\"inner\"),\n pd.concat(pdcase, axis=1, join=\"inner\"),\n )\n\n # Dask + pandas\n cases = [\n [ddf1, pdf2],\n [ddf1, pdf3],\n [pdf1, ddf4],\n [pdf1.A, ddf4.A],\n [ddf2.F, pdf3.F],\n [ddf1, pdf4.A],\n [ddf3.F, pdf2],\n [ddf2, pdf1, ddf3.F],\n ]\n\n for case in cases:\n pdcase = [c.compute() if isinstance(c, _Frame) else c for c in case]\n\n assert_eq(dd.concat(case, interleave_partitions=True), pd.concat(pdcase))\n\n assert_eq(\n dd.concat(case, join=\"inner\", interleave_partitions=True),\n pd.concat(pdcase, join=\"inner\"),\n )\n\n assert_eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))\n\n assert_eq(\n dd.concat(case, axis=1, join=\"inner\"),\n pd.concat(pdcase, axis=1, join=\"inner\"),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_datetimeindex_check_append_with_warning.return.result": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_concat_datetimeindex_check_append_with_warning.return.result", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 1984, "end_line": 2020, "span_ids": ["check_append_with_warning", "test_concat_datetimeindex"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_concat_datetimeindex():\n # https://github.com/dask/dask/issues/2932\n b2 = pd.DataFrame(\n {\"x\": [\"a\"]},\n index=pd.DatetimeIndex([\"2015-03-24 00:00:16\"], dtype=\"datetime64[ns]\"),\n )\n b3 = pd.DataFrame(\n {\"x\": [\"c\"]},\n index=pd.DatetimeIndex([\"2015-03-29 00:00:44\"], dtype=\"datetime64[ns]\"),\n )\n\n b2[\"x\"] = b2.x.astype(\"category\").cat.set_categories([\"a\", \"c\"])\n b3[\"x\"] = b3.x.astype(\"category\").cat.set_categories([\"a\", \"c\"])\n\n db2 = dd.from_pandas(b2, 1)\n db3 = dd.from_pandas(b3, 1)\n\n result = concat([b2.iloc[:0], b3.iloc[:0]])\n assert result.index.dtype == \"M8[ns]\"\n\n result = dd.concat([db2, db3])\n expected = pd.concat([b2, b3])\n assert_eq(result, expected)\n\n\ndef check_append_with_warning(dask_obj, dask_append, pandas_obj, pandas_append):\n if PANDAS_GT_140:\n with pytest.warns(FutureWarning, match=\"append method is deprecated\"):\n expected = pandas_obj.append(pandas_append)\n result = dask_obj.append(dask_append)\n assert_eq(result, expected)\n else:\n expected = pandas_obj.append(pandas_append)\n result = dask_obj.append(dask_append)\n assert_eq(result, expected)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_outer_empty_test_dtype_equality_warning.assert_not_record": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_multi.py_test_merge_outer_empty_test_dtype_equality_warning.assert_not_record", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_multi.py", "file_name": "test_multi.py", "file_type": "text/x-python", "category": "test", "start_line": 2188, "end_line": 2213, "span_ids": ["test_merge_outer_empty", "test_dtype_equality_warning"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_merge_outer_empty():\n # Issue #5470 bug reproducer\n k_clusters = 3\n df = pd.DataFrame(\n {\"user\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\"], \"cluster\": [1, 1, 2, 2, 3, 3]}\n )\n df = dd.from_pandas(df, npartitions=10)\n empty_df = dd.from_pandas(pd.DataFrame(), npartitions=10)\n\n for x in range(0, k_clusters + 1):\n assert_eq(\n dd.merge(empty_df, df[df.cluster == x], how=\"outer\"),\n df[df.cluster == x],\n check_index=False,\n check_divisions=False,\n )\n\n\ndef test_dtype_equality_warning():\n # https://github.com/dask/dask/issues/5437\n df1 = pd.DataFrame({\"a\": np.array([1, 2], dtype=np.dtype(np.int64))})\n df2 = pd.DataFrame({\"a\": np.array([1, 2], dtype=np.dtype(np.longlong))})\n\n with warnings.catch_warnings(record=True) as record:\n dd.multi.warn_dtype_mismatch(df1, df2, \"a\", \"a\")\n assert not record", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_warnings_test_get_dummies.tm_assert_index_equal_res": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/dataframe/tests/test_reshape.py_warnings_test_get_dummies.tm_assert_index_equal_res", "embedding": null, "metadata": {"file_path": "dask/dataframe/tests/test_reshape.py", "file_name": "test_reshape.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 29, "span_ids": ["imports", "test_get_dummies"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom packaging.version import parse as parse_version\n\nimport dask.dataframe as dd\nfrom dask.dataframe._compat import PANDAS_VERSION, tm\nfrom dask.dataframe.utils import assert_eq, make_meta\n\n\n@pytest.mark.parametrize(\n \"data\",\n [\n pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype=\"category\"),\n pd.Series(pd.Categorical([1, 1, 1, 2, 2, 1, 3, 4], categories=[4, 3, 2, 1])),\n pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 4, 3, 2, 1], \"b\": pd.Categorical(list(\"abcdabcd\"))}\n ),\n ],\n)\ndef test_get_dummies(data):\n exp = pd.get_dummies(data)\n\n ddata = dd.from_pandas(data, 2)\n res = dd.get_dummies(ddata)\n assert_eq(res, exp)\n tm.assert_index_equal(res.columns, exp.columns)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_plot_test_profiler_plot.assert_not_record": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_profiler_plot_test_profiler_plot.assert_not_record", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 218, "end_line": 243, "span_ids": ["test_profiler_plot"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\ndef test_profiler_plot():\n with prof:\n get(dsk, \"e\")\n p = prof.visualize(\n width=500,\n height=300,\n tools=\"hover\",\n title=\"Not the default\",\n show=False,\n save=False,\n )\n if BOKEH_VERSION().major < 3:\n assert p.plot_width == 500\n assert p.plot_height == 300\n else:\n assert p.width == 500\n assert p.height == 300\n assert len(p.tools) == 1\n assert isinstance(p.tools[0], bokeh.models.HoverTool)\n assert p.title.text == \"Not the default\"\n # Test empty, checking for errors\n prof.clear()\n with warnings.catch_warnings(record=True) as record:\n prof.visualize(show=False, save=False)\n assert not record", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_plot_test_cache_profiler_plot.assert_not_record": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/diagnostics/tests/test_profiler.py_test_cache_profiler_plot_test_cache_profiler_plot.assert_not_record", "embedding": null, "metadata": {"file_path": "dask/diagnostics/tests/test_profiler.py", "file_name": "test_profiler.py", "file_type": "text/x-python", "category": "test", "start_line": 285, "end_line": 311, "span_ids": ["test_cache_profiler_plot"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not bokeh\")\ndef test_cache_profiler_plot():\n with CacheProfiler(metric_name=\"non-standard\") as cprof:\n get(dsk, \"e\")\n p = cprof.visualize(\n width=500,\n height=300,\n tools=\"hover\",\n title=\"Not the default\",\n show=False,\n save=False,\n )\n if BOKEH_VERSION().major < 3:\n assert p.plot_width == 500\n assert p.plot_height == 300\n else:\n assert p.width == 500\n assert p.height == 300\n assert len(p.tools) == 1\n assert isinstance(p.tools[0], bokeh.models.HoverTool)\n assert p.title.text == \"Not the default\"\n assert p.axis[1].axis_label == \"Cache Size (non-standard)\"\n # Test empty, checking for errors\n cprof.clear()\n with warnings.catch_warnings(record=True) as record:\n cprof.visualize(show=False, save=False)\n assert not record", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_dense_sparse_array_test_tokenize_dense_sparse_array.None_3": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_tokenize_dense_sparse_array_test_tokenize_dense_sparse_array.None_3", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 484, "end_line": 508, "span_ids": ["test_tokenize_dense_sparse_array"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"not sp\")\n@pytest.mark.parametrize(\"cls_name\", (\"dia\", \"bsr\", \"coo\", \"csc\", \"csr\", \"dok\", \"lil\"))\ndef test_tokenize_dense_sparse_array(cls_name):\n rng = np.random.RandomState(1234)\n\n a = sp.rand(10, 10000, random_state=rng).asformat(cls_name)\n b = a.copy()\n\n assert tokenize(a) == tokenize(b)\n\n # modifying the data values\n if hasattr(b, \"data\"):\n b.data[:10] = 1\n elif cls_name == \"dok\":\n b[3, 3] = 1\n else:\n raise ValueError\n\n assert tokenize(a) != tokenize(b)\n\n # modifying the data indices\n b = a.copy().asformat(\"coo\")\n b.row[:10] = np.arange(10)\n b = b.asformat(cls_name)\n assert tokenize(a) != tokenize(b)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_as_if_collection_low_level_task_graph_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_base.py_test_compute_as_if_collection_low_level_task_graph_", "embedding": null, "metadata": {"file_path": "dask/tests/test_base.py", "file_name": "test_base.py", "file_type": "text/x-python", "category": "test", "start_line": 1497, "end_line": 1521, "span_ids": ["test_compute_as_if_collection_low_level_task_graph"], "tokens": 221}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_compute_as_if_collection_low_level_task_graph():\n # See https://github.com/dask/dask/pull/7969\n da = pytest.importorskip(\"dask.array\")\n x = da.arange(10)\n\n # Boolean flag to ensure MyDaskArray.__dask_optimize__ is called\n optimized = False\n\n class MyDaskArray(da.Array):\n \"\"\"Dask Array subclass with validation logic in __dask_optimize__\"\"\"\n\n @classmethod\n def __dask_optimize__(cls, dsk, keys, **kwargs):\n # Ensure `compute_as_if_collection` don't convert to a low-level task graph\n assert type(dsk) is HighLevelGraph\n nonlocal optimized\n optimized = True\n return super().__dask_optimize__(dsk, keys, **kwargs)\n\n result = compute_as_if_collection(\n MyDaskArray, x.__dask_graph__(), x.__dask_keys__()\n )[0]\n assert optimized\n da.utils.assert_eq(x, result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_no_filename_test_dot_graph_no_filename.assert_isinstance_result_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_no_filename_test_dot_graph_no_filename.assert_isinstance_result_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 203, "end_line": 230, "span_ids": ["test_dot_graph_no_filename"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"format,typ\",\n [\n pytest.param(\"png\", Image, marks=ipython_not_installed_mark),\n pytest.param(\n \"jpeg\",\n Image,\n marks=pytest.mark.xfail(\n reason=\"jpeg not always supported in dot\", strict=False\n ),\n ),\n (\"dot\", type(None)),\n (\"pdf\", type(None)),\n pytest.param(\"svg\", SVG, marks=ipython_not_installed_mark),\n ],\n)\n@pytest.mark.xfail(\n sys.platform == \"win32\",\n reason=\"graphviz/pango on conda-forge currently broken for windows\",\n strict=False,\n)\ndef test_dot_graph_no_filename(tmpdir, format, typ):\n before = tmpdir.listdir()\n result = dot_graph(dsk, filename=None, format=format)\n # We shouldn't write any files if filename is None.\n after = tmpdir.listdir()\n assert before == after\n assert isinstance(result, typ)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_defaults_test_dot_graph_defaults.try_.finally_.ensure_not_exists_target_": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/dask/tests/test_dot.py_test_dot_graph_defaults_test_dot_graph_defaults.try_.finally_.ensure_not_exists_target_", "embedding": null, "metadata": {"file_path": "dask/tests/test_dot.py", "file_name": "test_dot.py", "file_type": "text/x-python", "category": "test", "start_line": 233, "end_line": 251, "span_ids": ["test_dot_graph_defaults"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@ipython_not_installed_mark\n@pytest.mark.xfail(\n sys.platform == \"win32\",\n reason=\"graphviz/pango on conda-forge currently broken for windows\",\n strict=False,\n)\ndef test_dot_graph_defaults():\n # Test with default args.\n default_name = \"mydask\"\n default_format = \"png\"\n target = \".\".join([default_name, default_format])\n\n ensure_not_exists(target)\n try:\n result = dot_graph(dsk)\n assert os.path.isfile(target)\n assert isinstance(result, Image)\n finally:\n ensure_not_exists(target)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py____Options_for_HTML_out": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py____Options_for_HTML_out", "embedding": null, "metadata": {"file_path": "docs/source/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 111, "span_ids": ["docstring:26", "docstring"], "tokens": 796}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#\n# dask documentation build configuration file, created by\n# sphinx-quickstart on Sun Jan 4 08:58:22 2015.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nfrom __future__ import annotations\n\nimport os\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nimport sys\n\nimport sphinx_autosummary_accessors\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"../../\"))\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.autosummary\",\n \"sphinx_autosummary_accessors\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.viewcode\",\n \"numpydoc\",\n \"sphinx_click.ext\",\n \"dask_sphinx_theme.ext.dask_config_sphinx_ext\",\n \"sphinx_tabs.tabs\",\n \"sphinx_remove_toctrees\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n \"IPython.sphinxext.ipython_directive\",\n]\n\nnumpydoc_show_class_members = False\n\nsphinx_tabs_disable_tab_closing = True\n\n# Remove individual API pages from sphinx toctree to prevent long build times.\n# See https://github.com/dask/dask/issues/8227.\nremove_from_toctrees = [\"generated/*\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\", sphinx_autosummary_accessors.templates_path]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Dask\"\ncopyright = \"2014-2018, Anaconda, Inc. and contributors\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns: list[str] = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\npygments_style = \"default\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_html_theme_None_124": {"__data__": {"id_": "/home/jiayipan/code/24FA/temp/ml-01/moatless-tools/t/repos/swe-train_dask__dask/docs/source/conf.py_html_theme_None_124", "embedding": null, "metadata": {"file_path": "docs/source/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 113, "end_line": 240, "span_ids": ["docstring:65", "docstring:103", "docstring:26"], "tokens": 868}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "html_theme = \"dask_sphinx_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\"logo_only\": True}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = \"images/dask_horizontal_white_no_pad.svg\"\n\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\nhtmlhelp_basename = \"daskdoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements: dict[str, str] = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\nlatex_documents = [\n (master_doc, \"dask.tex\", \"dask Documentation\", \"Dask Development Team\", \"manual\")\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\nman_pages = [(master_doc, \"dask\", \"dask Documentation\", [\"Dask Development Team\"], 1)]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}}} \ No newline at end of file